Skip to content

[pull] master from postgres:master #140

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 15 additions & 3 deletions src/backend/access/transam/xlogrecovery.c
Original file line number Diff line number Diff line change
Expand Up @@ -4994,13 +4994,25 @@ check_recovery_target_timeline(char **newval, void **extra, GucSource source)
rttg = RECOVERY_TARGET_TIMELINE_LATEST;
else
{
char *endp;
uint64 timeline;

rttg = RECOVERY_TARGET_TIMELINE_NUMERIC;

errno = 0;
strtoul(*newval, NULL, 0);
if (errno == EINVAL || errno == ERANGE)
timeline = strtou64(*newval, &endp, 0);

if (*endp != '\0' || errno == EINVAL || errno == ERANGE)
{
GUC_check_errdetail("\"%s\" is not a valid number.",
"recovery_target_timeline");
return false;
}

if (timeline < 1 || timeline > PG_UINT32_MAX)
{
GUC_check_errdetail("\"recovery_target_timeline\" is not a valid number.");
GUC_check_errdetail("\"%s\" must be between %u and %u.",
"recovery_target_timeline", 1, UINT_MAX);
return false;
}
}
Expand Down
47 changes: 25 additions & 22 deletions src/backend/optimizer/path/joinpath.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,13 +154,17 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* See if the inner relation is provably unique for this outer rel.
*
* We have some special cases: for JOIN_SEMI and JOIN_ANTI, it doesn't
* matter since the executor can make the equivalent optimization anyway;
* we need not expend planner cycles on proofs. For JOIN_UNIQUE_INNER, we
* must be considering a semijoin whose inner side is not provably unique
* (else reduce_unique_semijoins would've simplified it), so there's no
* point in calling innerrel_is_unique. However, if the LHS covers all of
* the semijoin's min_lefthand, then it's appropriate to set inner_unique
* We have some special cases: for JOIN_SEMI, it doesn't matter since the
* executor can make the equivalent optimization anyway. It also doesn't
* help enable use of Memoize, since a semijoin with a provably unique
* inner side should have been reduced to an inner join in that case.
* Therefore, we need not expend planner cycles on proofs. (For
* JOIN_ANTI, although it doesn't help the executor for the same reason,
* it can benefit Memoize paths.) For JOIN_UNIQUE_INNER, we must be
* considering a semijoin whose inner side is not provably unique (else
* reduce_unique_semijoins would've simplified it), so there's no point in
* calling innerrel_is_unique. However, if the LHS covers all of the
* semijoin's min_lefthand, then it's appropriate to set inner_unique
* because the path produced by create_unique_path will be unique relative
* to the LHS. (If we have an LHS that's only part of the min_lefthand,
* that is *not* true.) For JOIN_UNIQUE_OUTER, pass JOIN_INNER to avoid
Expand All @@ -169,12 +173,6 @@ add_paths_to_joinrel(PlannerInfo *root,
switch (jointype)
{
case JOIN_SEMI:
case JOIN_ANTI:

/*
* XXX it may be worth proving this to allow a Memoize to be
* considered for Nested Loop Semi/Anti Joins.
*/
extra.inner_unique = false; /* well, unproven */
break;
case JOIN_UNIQUE_INNER:
Expand Down Expand Up @@ -715,16 +713,21 @@ get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
return NULL;

/*
* Currently we don't do this for SEMI and ANTI joins unless they're
* marked as inner_unique. This is because nested loop SEMI/ANTI joins
* don't scan the inner node to completion, which will mean memoize cannot
* mark the cache entry as complete.
*
* XXX Currently we don't attempt to mark SEMI/ANTI joins as inner_unique
* = true. Should we? See add_paths_to_joinrel()
* Currently we don't do this for SEMI and ANTI joins, because nested loop
* SEMI/ANTI joins don't scan the inner node to completion, which means
* memoize cannot mark the cache entry as complete. Nor can we mark the
* cache entry as complete after fetching the first inner tuple, because
* if that tuple and the current outer tuple don't satisfy the join
* clauses, a second inner tuple that satisfies the parameters would find
* the cache entry already marked as complete. The only exception is when
* the inner relation is provably unique, as in that case, there won't be
* a second matching tuple and we can safely mark the cache entry as
* complete after fetching the first inner tuple. Note that in such
* cases, the SEMI join should have been reduced to an inner join by
* reduce_unique_semijoins.
*/
if (!extra->inner_unique && (jointype == JOIN_SEMI ||
jointype == JOIN_ANTI))
if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
!extra->inner_unique)
return NULL;

/*
Expand Down
46 changes: 46 additions & 0 deletions src/backend/utils/misc/injection_point.c
Original file line number Diff line number Diff line change
Expand Up @@ -584,3 +584,49 @@ IsInjectionPointAttached(const char *name)
return false; /* silence compiler */
#endif
}

/*
* Retrieve a list of all the injection points currently attached.
*
* This list is palloc'd in the current memory context.
*/
List *
InjectionPointList(void)
{
#ifdef USE_INJECTION_POINTS
List *inj_points = NIL;
uint32 max_inuse;

LWLockAcquire(InjectionPointLock, LW_SHARED);

max_inuse = pg_atomic_read_u32(&ActiveInjectionPoints->max_inuse);

for (uint32 idx = 0; idx < max_inuse; idx++)
{
InjectionPointEntry *entry;
InjectionPointData *inj_point;
uint64 generation;

entry = &ActiveInjectionPoints->entries[idx];
generation = pg_atomic_read_u64(&entry->generation);

/* skip free slots */
if (generation % 2 == 0)
continue;

inj_point = (InjectionPointData *) palloc0(sizeof(InjectionPointData));
inj_point->name = pstrdup(entry->name);
inj_point->library = pstrdup(entry->library);
inj_point->function = pstrdup(entry->function);
inj_points = lappend(inj_points, inj_point);
}

LWLockRelease(InjectionPointLock);

return inj_points;

#else
elog(ERROR, "Injection points are not supported by this build");
return NIL; /* keep compiler quiet */
#endif
}
16 changes: 16 additions & 0 deletions src/include/utils/injection_point.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,19 @@
#ifndef INJECTION_POINT_H
#define INJECTION_POINT_H

#include "nodes/pg_list.h"

/*
* Injection point data, used when retrieving a list of all the attached
* injection points.
*/
typedef struct InjectionPointData
{
const char *name;
const char *library;
const char *function;
} InjectionPointData;

/*
* Injection points require --enable-injection-points.
*/
Expand Down Expand Up @@ -47,6 +60,9 @@ extern void InjectionPointCached(const char *name, void *arg);
extern bool IsInjectionPointAttached(const char *name);
extern bool InjectionPointDetach(const char *name);

/* Get the current set of injection points attached */
extern List *InjectionPointList(void);

#ifdef EXEC_BACKEND
extern PGDLLIMPORT struct InjectionPointsCtl *ActiveInjectionPoints;
#endif
Expand Down
50 changes: 50 additions & 0 deletions src/test/recovery/t/003_recovery_targets.pl
Original file line number Diff line number Diff line change
Expand Up @@ -187,4 +187,54 @@ sub test_recovery_standby
qr/FATAL: .* recovery ended before configured recovery target was reached/,
'recovery end before target reached is a fatal error');

# Invalid timeline target
$node_standby = PostgreSQL::Test::Cluster->new('standby_9');
$node_standby->init_from_backup($node_primary, 'my_backup',
has_restoring => 1);
$node_standby->append_conf('postgresql.conf',
"recovery_target_timeline = 'bogus'");

$res = run_log(
[
'pg_ctl',
'--pgdata' => $node_standby->data_dir,
'--log' => $node_standby->logfile,
'start',
]);
ok(!$res, 'invalid timeline target (bogus value)');

my $log_start = $node_standby->wait_for_log("is not a valid number");

# Timeline target out of min range
$node_standby->append_conf('postgresql.conf',
"recovery_target_timeline = '0'");

$res = run_log(
[
'pg_ctl',
'--pgdata' => $node_standby->data_dir,
'--log' => $node_standby->logfile,
'start',
]);
ok(!$res, 'invalid timeline target (lower bound check)');

$log_start =
$node_standby->wait_for_log("must be between 1 and 4294967295", $log_start);

# Timeline target out of max range
$node_standby->append_conf('postgresql.conf',
"recovery_target_timeline = '4294967296'");

$res = run_log(
[
'pg_ctl',
'--pgdata' => $node_standby->data_dir,
'--log' => $node_standby->logfile,
'start',
]);
ok(!$res, 'invalid timeline target (upper bound check)');

$log_start =
$node_standby->wait_for_log("must be between 1 and 4294967295", $log_start);

done_testing();
60 changes: 60 additions & 0 deletions src/test/regress/expected/memoize.out
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ begin
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N');
ln := regexp_replace(ln, 'Memory: \d+kB', 'Memory: NkB');
return next ln;
end loop;
end;
Expand Down Expand Up @@ -500,3 +501,62 @@ RESET max_parallel_workers_per_gather;
RESET parallel_tuple_cost;
RESET parallel_setup_cost;
RESET min_parallel_table_scan_size;
-- Ensure memoize works for ANTI joins
CREATE TABLE tab_anti (a int, b boolean);
INSERT INTO tab_anti SELECT i%3, false FROM generate_series(1,100)i;
ANALYZE tab_anti;
-- Ensure we get a Memoize plan for ANTI join
SELECT explain_memoize('
SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
ON t1.a+1 = t2.a
WHERE t2.a IS NULL;', false);
explain_memoize
--------------------------------------------------------------------------------------------
Aggregate (actual rows=1.00 loops=N)
-> Nested Loop Anti Join (actual rows=33.00 loops=N)
-> Seq Scan on tab_anti t1 (actual rows=100.00 loops=N)
-> Memoize (actual rows=0.67 loops=N)
Cache Key: (t1.a + 1), t1.a
Cache Mode: binary
Hits: 97 Misses: 3 Evictions: Zero Overflows: 0 Memory Usage: NkB
-> Subquery Scan on t2 (actual rows=0.67 loops=N)
Filter: ((t1.a + 1) = t2.a)
Rows Removed by Filter: 2
-> Unique (actual rows=2.67 loops=N)
-> Sort (actual rows=67.33 loops=N)
Sort Key: t2_1.a
Sort Method: quicksort Memory: NkB
-> Seq Scan on tab_anti t2_1 (actual rows=100.00 loops=N)
(15 rows)

-- And check we get the expected results.
SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
ON t1.a+1 = t2.a
WHERE t2.a IS NULL;
count
-------
33
(1 row)

-- Ensure we do not add memoize node for SEMI join
EXPLAIN (COSTS OFF)
SELECT * FROM tab_anti t1 WHERE t1.a IN
(SELECT a FROM tab_anti t2 WHERE t2.b IN
(SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0));
QUERY PLAN
-------------------------------------------------
Nested Loop Semi Join
-> Seq Scan on tab_anti t1
-> Nested Loop Semi Join
Join Filter: (t1.a = t2.a)
-> Seq Scan on tab_anti t2
-> Subquery Scan on "ANY_subquery"
Filter: (t2.b = "ANY_subquery".b)
-> Result
One-Time Filter: (t2.a > 1)
-> Seq Scan on tab_anti t3
(10 rows)

DROP TABLE tab_anti;
27 changes: 27 additions & 0 deletions src/test/regress/sql/memoize.sql
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ begin
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N');
ln := regexp_replace(ln, 'Memory: \d+kB', 'Memory: NkB');
return next ln;
end loop;
end;
Expand Down Expand Up @@ -244,3 +245,29 @@ RESET max_parallel_workers_per_gather;
RESET parallel_tuple_cost;
RESET parallel_setup_cost;
RESET min_parallel_table_scan_size;

-- Ensure memoize works for ANTI joins
CREATE TABLE tab_anti (a int, b boolean);
INSERT INTO tab_anti SELECT i%3, false FROM generate_series(1,100)i;
ANALYZE tab_anti;

-- Ensure we get a Memoize plan for ANTI join
SELECT explain_memoize('
SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
ON t1.a+1 = t2.a
WHERE t2.a IS NULL;', false);

-- And check we get the expected results.
SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
ON t1.a+1 = t2.a
WHERE t2.a IS NULL;

-- Ensure we do not add memoize node for SEMI join
EXPLAIN (COSTS OFF)
SELECT * FROM tab_anti t1 WHERE t1.a IN
(SELECT a FROM tab_anti t2 WHERE t2.b IN
(SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0));

DROP TABLE tab_anti;
1 change: 1 addition & 0 deletions src/tools/pgindent/typedefs.list
Original file line number Diff line number Diff line change
Expand Up @@ -1291,6 +1291,7 @@ InjectionPointCacheEntry
InjectionPointCallback
InjectionPointCondition
InjectionPointConditionType
InjectionPointData
InjectionPointEntry
InjectionPointSharedState
InjectionPointsCtl
Expand Down