diff --git a/configure b/configure index 3d3d3db97a456..16ef5b58d1a87 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for PostgreSQL 18beta1. +# Generated by GNU Autoconf 2.69 for PostgreSQL 19devel. # # Report bugs to . # @@ -582,8 +582,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='PostgreSQL' PACKAGE_TARNAME='postgresql' -PACKAGE_VERSION='18beta1' -PACKAGE_STRING='PostgreSQL 18beta1' +PACKAGE_VERSION='19devel' +PACKAGE_STRING='PostgreSQL 19devel' PACKAGE_BUGREPORT='pgsql-bugs@lists.postgresql.org' PACKAGE_URL='https://www.postgresql.org/' @@ -1468,7 +1468,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures PostgreSQL 18beta1 to adapt to many kinds of systems. +\`configure' configures PostgreSQL 19devel to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1533,7 +1533,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of PostgreSQL 18beta1:";; + short | recursive ) echo "Configuration of PostgreSQL 19devel:";; esac cat <<\_ACEOF @@ -1724,7 +1724,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -PostgreSQL configure 18beta1 +PostgreSQL configure 19devel generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2477,7 +2477,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by PostgreSQL $as_me 18beta1, which was +It was created by PostgreSQL $as_me 19devel, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -20063,7 +20063,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by PostgreSQL $as_me 18beta1, which was +This file was extended by PostgreSQL $as_me 19devel, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -20134,7 +20134,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -PostgreSQL config.status 18beta1 +PostgreSQL config.status 19devel configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 4b8335dc6138e..b3efc49c97a9d 100644 --- a/configure.ac +++ b/configure.ac @@ -17,7 +17,7 @@ dnl Read the Autoconf manual for details. dnl m4_pattern_forbid(^PGAC_)dnl to catch undefined macros -AC_INIT([PostgreSQL], [18beta1], [pgsql-bugs@lists.postgresql.org], [], [https://www.postgresql.org/]) +AC_INIT([PostgreSQL], [19devel], [pgsql-bugs@lists.postgresql.org], [], [https://www.postgresql.org/]) m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required. Untested combinations of 'autoconf' and PostgreSQL versions are not diff --git a/contrib/amcheck/t/006_verify_gin.pl b/contrib/amcheck/t/006_verify_gin.pl index e540cd6606adf..5be0bee32183f 100644 --- a/contrib/amcheck/t/006_verify_gin.pl +++ b/contrib/amcheck/t/006_verify_gin.pl @@ -54,20 +54,17 @@ sub invalid_entry_order_leaf_page_test $node->stop; - my $blkno = 1; # root + my $blkno = 1; # root # produce wrong order by replacing aaaaa with ccccc - string_replace_block( - $relpath, - 'aaaaa', - 'ccccc', - $blkno - ); + string_replace_block($relpath, 'aaaaa', 'ccccc', $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; like($stderr, qr/$expected/); } @@ -96,20 +93,17 @@ sub invalid_entry_order_inner_page_test $node->stop; - my $blkno = 1; # root + my $blkno = 1; # root # we have rrrrrrrrr... and tttttttttt... as keys in the root, so produce wrong order by replacing rrrrrrrrrr.... - string_replace_block( - $relpath, - 'rrrrrrrrrr', - 'zzzzzzzzzz', - $blkno - ); + string_replace_block($relpath, 'rrrrrrrrrr', 'zzzzzzzzzz', $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; like($stderr, qr/$expected/); } @@ -129,7 +123,7 @@ sub invalid_entry_columns_order_test $node->stop; - my $blkno = 1; # root + my $blkno = 1; # root # mess column numbers # root items order before: (1,aaa), (2,bbb) @@ -139,26 +133,18 @@ sub invalid_entry_columns_order_test my $find = qr/($attrno_1)(.)(aaa)/s; my $replace = $attrno_2 . '$2$3'; - string_replace_block( - $relpath, - $find, - $replace, - $blkno - ); + string_replace_block($relpath, $find, $replace, $blkno); $find = qr/($attrno_2)(.)(bbb)/s; $replace = $attrno_1 . '$2$3'; - string_replace_block( - $relpath, - $find, - $replace, - $blkno - ); + string_replace_block($relpath, $find, $replace, $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295"; like($stderr, qr/$expected/); } @@ -183,20 +169,17 @@ sub inconsistent_with_parent_key__parent_key_corrupted_test $node->stop; - my $blkno = 1; # root + my $blkno = 1; # root # we have nnnnnnnnnn... as parent key in the root, so replace it with something smaller then child's keys - string_replace_block( - $relpath, - 'nnnnnnnnnn', - 'aaaaaaaaaa', - $blkno - ); + string_replace_block($relpath, 'nnnnnnnnnn', 'aaaaaaaaaa', $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has inconsistent records on page 3 offset 3"; like($stderr, qr/$expected/); } @@ -221,20 +204,17 @@ sub inconsistent_with_parent_key__child_key_corrupted_test $node->stop; - my $blkno = 3; # leaf + my $blkno = 3; # leaf # we have nnnnnnnnnn... as parent key in the root, so replace child key with something bigger - string_replace_block( - $relpath, - 'nnnnnnnnnn', - 'pppppppppp', - $blkno - ); + string_replace_block($relpath, 'nnnnnnnnnn', 'pppppppppp', $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\" has inconsistent records on page 3 offset 3"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\" has inconsistent records on page 3 offset 3"; like($stderr, qr/$expected/); } @@ -254,24 +234,21 @@ sub inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test $node->stop; - my $blkno = 2; # posting tree root + my $blkno = 2; # posting tree root # we have a posting tree for 'aaaaa' key with the root at 2nd block # and two leaf pages 3 and 4. replace 4th page's high key with (1,1) # so that there are tid's in leaf page that are larger then the new high key. my $find = pack('S*', 0, 4, 0) . '....'; my $replace = pack('S*', 0, 4, 0, 1, 1); - string_replace_block( - $relpath, - $find, - $replace, - $blkno - ); + string_replace_block($relpath, $find, $replace, $blkno); $node->start; - my ($result, $stdout, $stderr) = $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); - my $expected = "index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4"; + my ($result, $stdout, $stderr) = + $node->psql('postgres', qq(SELECT gin_index_check('$indexname'))); + my $expected = + "index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4"; like($stderr, qr/$expected/); } diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml index fef9584f908ec..bcde3cfd0374a 100644 --- a/doc/src/sgml/filelist.sgml +++ b/doc/src/sgml/filelist.sgml @@ -180,7 +180,7 @@ - + diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml deleted file mode 100644 index 66a6817a2be0f..0000000000000 --- a/doc/src/sgml/release-18.sgml +++ /dev/null @@ -1,4200 +0,0 @@ - - - - - Release 18 - - - Release date: - 2025-??-??, CURRENT AS OF 2025-06-20 - - - - Overview - - - PostgreSQL 18 contains many new features - and enhancements, including: - - - - - - - (to be completed) - - - - - - The above items and other new features of - PostgreSQL 18 are explained in more detail - in the sections below. - - - - - - - Migration to Version 18 - - - A dump/restore using or use of - or logical replication is required for - those wishing to migrate data from any previous release. See for general information on migrating to new - major releases. - - - - Version 18 contains a number of changes that may affect compatibility - with previous releases. Observe the following incompatibilities: - - - - - - - - - Change default to enable data checksums - (Greg Sabino Mullane) - § - - - - Checksums can be disabled with the - new initdb option - . - requires matching cluster checksum settings, so this new option can - be useful to upgrade non-checksum old clusters. - - - - - - - - Change time zone abbreviation handling (Tom Lane) - § - - - - The system will now favor the current session's time - zone abbreviations before checking the server variable - . Previously - timezone_abbreviations was checked first. - - - - - - - - Deprecate MD5 password - authentication (Nathan Bossart) - § - - - - Support for MD5 passwords will be removed in a future major - version release. and now emit deprecation warnings when - setting MD5 passwords. These warnings can be disabled by setting - the parameter to - off. - - - - - - - - Change and - to process the inheritance children of a parent (Michael Harris) - § - - - - The previous behavior can be performed by using the new - ONLY option. - - - - - - - - Prevent COPY FROM - from treating \. as an end-of-file marker when - reading CSV files (Daniel Vérité, Tom Lane) - § - § - - - - will still treat - \. as an end-of-file marker when reading - CSV files from STDIN. - Older psql clients connecting to - PostgreSQL 18 servers might experience \copy - problems. This release also enforces that \. - must appear alone on a line. - - - - - - - - Disallow unlogged partitioned tables (Michael Paquier) - § - - - - Previously ALTER TABLE SET - [UN]LOGGED did nothing, and the creation of an - unlogged partitioned table did not cause its children to be unlogged. - - - - - - - - Execute AFTER triggers as the role that was active when - trigger events were queued (Laurenz Albe) - § - - - - Previously such triggers were run as the role that was active at - trigger execution time (e.g., at ). - This is significant for cases where the role is changed between queue - time and transaction commit. - - - - - - - - Remove non-functional support for rule privileges in / (Fujii Masao) - § - - - - These have been non-functional since - PostgreSQL 8.2. - - - - - - - - Remove column pg_backend_memory_contexts.parent - (Melih Mutlu) - § - - - - This is no longer needed since - pg_backend_memory_contexts.path - was added. - - - - - - - - Change - pg_backend_memory_contexts.level - and pg_log_backend_memory_contexts() - to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, - Fujii Masao) - § - § - § - - - - These were previously zero-based. - - - - - - - - - Changes - - - Below you will find a detailed account of the changes between - PostgreSQL 18 and the previous major - release. - - - - Server - - - Optimizer - - - - - - - - Automatically remove some unnecessary table self-joins (Andrey - Lepikhov, Alexander Kuzmenkov, Alexander Korotkov, Alena Rybakina) - § - - - - This optimization can be disabled using server variable . - - - - - - - - Convert some IN (VALUES - ...) to x = ANY ... for better - optimizer statistics (Alena Rybakina, Andrei Lepikhov) - § - - - - - - - - Allow transforming OR-clauses - to arrays for faster index processing (Alexander Korotkov, Andrey - Lepikhov) - § - - - - - - - - Speed up the processing of INTERSECT, - EXCEPT, window aggregates, and view column aliases (Tom Lane, - David Rowley) - § - § - § - § - - - - - - - - Allow the keys of SELECT - DISTINCT to be internally reordered to avoid sorting - (Richard Guo) - § - - - - This optimization can be disabled using . - - - - - - - - Ignore GROUP BY - columns that are functionally dependent on other columns (Zhang - Mingli, Jian He, David Rowley) - § - - - - If a GROUP BY clause includes all columns of - a unique index, as well as other columns of the same table, those - other columns are redundant and can be dropped from the grouping. - This was already true for non-deferred primary keys. - - - - - - - - Allow some HAVING clauses - on GROUPING - SETS to be pushed to WHERE clauses - (Richard Guo) - § - § - § - § - - - - This allows earlier row filtering. This release also fixes some - GROUPING SETS queries that used to return - incorrect results. - - - - - - - - Improve row estimates for generate_series() - using numeric - and timestamp - values (David Rowley, Song Jinzhou) - § - § - - - - - - - - Allow the optimizer to use Right Semi Join plans - (Richard Guo) - § - - - - Semi-joins are used when needing to find if there is at least - one match. - - - - - - - - Allow merge joins to use incremental sorts - (Richard Guo) - § - - - - - - - - Improve the efficiency of planning queries accessing many partitions - (Ashutosh Bapat, Yuya Watari, David Rowley) - § - § - - - - - - - - Allow partitionwise - joins in more cases, and reduce its memory usage (Richard Guo, - Tom Lane, Ashutosh Bapat) - § - § - - - - - - - - Improve cost estimates of partition queries (Nikita Malakhov, - Andrei Lepikhov) - § - - - - - - - - Improve SQL-language - function plan caching (Alexander Pyhalov, Tom Lane) - § - § - - - - - - - - Improve handling of disabled optimizer features (Robert Haas) - § - - - - - - - - - Indexes - - - - - - - - Allow skip scans of btree indexes - (Peter Geoghegan) - § - § - - - - This allows multi-column btree indexes to be used by queries that - only equality-reference the second or later indexed columns. - - - - - - - - Allow non-btree unique indexes to be used as partition keys and in - materialized views (Mark Dilger) - § - § - - - - The index type must still support equality. - - - - - - - - Allow GIN indexes to - be created in parallel (Tomas Vondra, Matthias van de Meent) - § - - - - - - - - Allow values to be sorted to speed range-type GiST and btree - index builds (Bernd Helmle) - § - - - - - - - - - General Performance - - - - - - - - Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, - Nazir Bilal Yavuz, Melanie Plageman) - § - § - § - § - § - § - § - § - § - § - § - - - - This feature allows backends to queue multiple read requests, - which allows for more efficient sequential scans, bitmap - heap scans, vacuums, etc. This is enabled by server - variable , with server - variables and added to control it. - This also enables - and - values greater than zero for systems without - fadvise() support. The new system view pg_aios - shows the file handles being used for asynchronous I/O. - - - - - - - - Improve the locking performance of queries that access many relations - (Tomas Vondra) - § - - - - - - - - Improve the performance and reduce memory usage of hash joins and - GROUP BY - (David Rowley, Jeff Davis) - § - § - § - § - § - - - - This also improves hash set operations used by EXCEPT, and hash - lookups of subplan values. - - - - - - - - Allow normal vacuums to freeze some pages, even though they are - all-visible (Melanie Plageman) - § - § - - - - This reduces the overhead of later full-relation - freezing. The aggressiveness of this can be - controlled by server variable and per-table setting . Previously - vacuum never processed all-visible pages until freezing was required. - - - - - - - - Add server variable to control - file truncation during (Nathan Bossart, - Gurjeet Singh) - § - - - - A storage-level parameter with the same name and behavior already - existed. - - - - - - - - Increase server variables 's and 's default values to 16 - (Melanie Plageman) - § - § - - - - This more accurately reflects modern hardware. - - - - - - - - - Monitoring - - - - - - - - Increase the logging granularity of server variable (Melanie Plageman) - § - - - - This server variable was previously only boolean, which is still - supported. - - - - - - - - Add log_connections option to report the duration - of connection stages (Melanie Plageman) - § - - - - - - - - Add escape - %L to output the client IP - address (Greg Sabino Mullane) - § - - - - - - - - Add server variable to log - lock acquisition failures (Yuki Seino, Fujii Masao) - § - § - - - - Specifically it reports SELECT - ... NOWAIT lock failures. - - - - - - - - Modify pg_stat_all_tables - and its variants to report the time spent in , , and their - automatic variants (Sami Imseih) - § - - - - The new columns are total_vacuum_time, - total_autovacuum_time, - total_analyze_time, and - total_autoanalyze_time. - - - - - - - - Add delay time reporting to and (Bertrand Drouvot, Nathan Bossart) - § - § - - - - This information appears in the server log, the system views pg_stat_progress_vacuum - and pg_stat_progress_analyze, - and the output of and when in VERBOSE - mode; tracking must be enabled with the server variable . - - - - - - - - Add WAL, CPU, and average - read statistics output to ANALYZE VERBOSE - (Anthonin Bonnefoy) - § - § - - - - - - - - Add full WAL buffer count to - VACUUM/ANALYZE (VERBOSE) - and autovacuum log output (Bertrand Drouvot) - § - - - - - - - - Add per-backend I/O statistics reporting (Bertrand Drouvot) - § - § - - - - The statistics are accessed via pg_stat_get_backend_io(). - Per-backend I/O statistics can be cleared via pg_stat_reset_backend_stats(). - - - - - - - - Add pg_stat_io - columns to report I/O activity in bytes (Nazir Bilal Yavuz) - § - - - - The new columns are read_bytes, - write_bytes, and - extend_bytes. The - op_bytes column, which always equaled - BLCKSZ, - has been removed. - - - - - - - - Add WAL I/O activity rows to - pg_stat_io (Nazir Bilal Yavuz, Bertrand - Drouvot, Michael Paquier) - § - § - § - - - - This includes WAL receiver activity and a wait - event for such writes. - - - - - - - - - Change server variable - to control tracking WAL timing - in pg_stat_io instead of pg_stat_wal - (Bertrand Drouvot) - § - - - - - - - - Remove read/sync columns from pg_stat_wal - (Bertrand Drouvot) - § - § - - - - This removes columns wal_write, - wal_sync, - wal_write_time, and - wal_sync_time. - - - - - - - - Add function pg_stat_get_backend_wal() - to return per-backend WAL statistics (Bertrand - Drouvot) - § - - - - Per-backend WAL - statistics can be cleared via pg_stat_reset_backend_stats(). - - - - - - - - Add function pg_ls_summariesdir() - to specifically list the contents of PGDATA/pg_wal/summaries - (Yushi Ogiwara) - § - - - - - - - - Add column pg_stat_checkpointer.num_done - to report the number of completed checkpoints (Anton A. Melnikov) - § - - - - Columns num_timed and - num_requested count both completed and - skipped checkpoints. - - - - - - - - Add column - pg_stat_checkpointer.slru_written - to report SLRU buffers written (Nitin Jadhav) - § - - - - Also, modify the checkpoint server log message to report separate - shared buffer and SLRU buffer values. - - - - - - - - Add columns to pg_stat_database - to report parallel worker activity (Benoit Lobréau) - § - - - - The new columns are - parallel_workers_to_launch and - parallel_workers_launched. - - - - - - - - Have query id computation - of arrays consider only the first and last array elements (Dmitry - Dolgov, Sami Imseih) - § - § - - - - Jumbling is used by . - - - - - - - - Adjust query id computations to group together queries using the - same relation name (Michael Paquier, Sami Imseih) - § - - - - This is true even if the tables in different schemas have different - column names. - - - - - - - - Add column pg_backend_memory_contexts.type - to report the type of memory context (David Rowley) - § - - - - - - - - Add column - pg_backend_memory_contexts.path - to show memory context parents (Melih Mutlu) - § - - - - - - - - - Privileges - - - - - - - - Add function pg_get_acl() - to retrieve database access control details (Joel Jacobson) - § - § - - - - - - - - Add function has_largeobject_privilege() - to check large object privileges (Yugo Nagata) - § - - - - - - - - Allow to define - large object default privileges (Takatsuka Haruka, Yugo Nagata, - Laurenz Albe) - § - - - - - - - - Add predefined role pg_signal_autovacuum_worker - (Kirill Reshke) - § - - - - This allows sending signals to autovacuum workers. - - - - - - - - - Server Configuration - - - - - - - - Add support for the OAuth authentication - method (Jacob Champion, Daniel Gustafsson, Thomas Munro) - § - - - - This adds an oauth authentication method to pg_hba.conf, - libpq OAuth options, a server variable to load - token validation libraries, and a configure flag - to add the required compile-time libraries. - - - - - - - - Add server variable to allow - specification of multiple colon-separated TLSv1.3 cipher suites - (Erica Zhang, Daniel Gustafsson) - § - - - - - - - - Change server variable 's default - to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion) - § - - - - - - - - Rename server variable ssl_ecdh_curve to and allow multiple colon-separated - ECDH curves to be specified (Erica Zhang, - Daniel Gustafsson) - § - - - - The previous name still works. - - - - - - - - Make cancel request - keys 256 bits (Heikki Linnakangas, Jelte Fennema-Nio) - § - § - - - - This is only possible when the server and client support wire - protocol version 3.2, introduced in this release. - - - - - - - - Add server variable - to specify the maximum number of background workers (Nathan Bossart) - § - - - - With this variable set, - can be adjusted at runtime up to this maximum without a server - restart. - - - - - - - - Allow specification of the fixed number of dead tuples that will - trigger an autovacuum (Nathan - Bossart, Frédéric Yhuel) - § - - - - The server variable is . Percentages are - still used for triggering. - - - - - - - - Change server variable - to limit only files opened by a backend (Andres Freund) - § - - - - Previously files opened by the postmaster were also counted toward - this limit. - - - - - - - - Add server variable to - report the required number of semaphores (Nathan Bossart) - § - - - - This is useful for operating system configuration. - - - - - - - - Add server variable to - specify the location of extension control files (Peter Eisentraut, - Matheus Alcantara) - § - § - - - - - - - - - Streaming Replication and Recovery - - - - - - - - Allow inactive replication slots to be automatically invalided using - server variable - (Nisha Moond, Bharath Rupireddy) - § - - - - - - - - Add server variable to control the - maximum active replication origins (Euler Taveira) - § - - - - This was previously controlled by , but this new setting allows - a higher origin count in cases where fewer slots are required. - - - - - - - - - <link linkend="logical-replication">Logical Replication</link> - - - - - - - - Allow the values of generated - columns to be logically replicated (Shubham Khanna, Vignesh C, - Zhijie Hou, Shlok Kyal, Peter Smith) - § - § - § - § - - - - If the publication specifies a column list, all specified - columns, generated and non-generated, are published. - Without a specified column list, publication option - publish_generated_columns controls whether - generated columns are published. Previously generated columns - were not replicated and the subscriber had to compute - the values if possible; this is particularly useful for - non-PostgreSQL subscribers which lack - such a capability. - - - - - - - - Change the default streaming - option from off to parallel - (Vignesh C) - § - - - - - - - - Allow to change the - replication slot's two-phase commit behavior (Hayato Kuroda, Ajin - Cherian, Amit Kapila, Zhijie Hou) - § - § - - - - - - - - Log conflicts while - applying logical replication changes (Zhijie Hou, Nisha Moond) - § - § - § - § - § - - - - Also report in new columns of pg_stat_subscription_stats. - - - - - - - - - - - Utility Commands - - - - - - - - Allow generated - columns to be virtual, and make them the default (Peter - Eisentraut, Jian He, Richard Guo, Dean Rasheed) - § - § - § - - - - Virtual generated columns generate their values when the columns - are read, not written. The write behavior can still be specified - via the STORED option. - - - - - - - - Add OLD/NEW support to RETURNING in - DML queries (Dean Rasheed) - § - - - - Previously RETURNING only returned new values for - and , and old - values for ; - would return the appropriate value for the internal query executed. - This new syntax allows the RETURNING list of - INSERT/UPDATE/DELETE/MERGE - to explicitly return old and new values by using the special aliases - old and new. These aliases - can be renamed to avoid identifier conflicts. - - - - - - - - Allow foreign tables to be created like existing local tables - (Zhang Mingli) - § - - - - The syntax is CREATE - FOREIGN TABLE ... LIKE. - - - - - - - - Allow LIKE - with nondeterministic - collations (Peter Eisentraut) - § - - - - - - - - Allow text position search functions with nondeterministic collations - (Peter Eisentraut) - § - - - - These used to generate an error. - - - - - - - - Add builtin collation provider PG_UNICODE_FAST - (Jeff Davis) - § - - - - This locale supports case mapping, but sorts in code point order, - not natural language order. - - - - - - - - Allow and - to process partitioned tables without processing their children - (Michael Harris) - § - - - - This is enabled with the new ONLY option. This is - useful since autovacuum does not process partitioned tables, just - its children. - - - - - - - - Add functions to modify per-relation and per-column optimizer - statistics (Corey Huinker) - § - § - § - - - - The functions are pg_restore_relation_stats(), - pg_restore_attribute_stats(), - pg_clear_relation_stats(), and - pg_clear_attribute_stats(). - - - - - - - - - Add server variable to control - the file copying method (Nazir Bilal Yavuz) - § - - - - This controls whether CREATE DATABASE - ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET - TABLESPACE uses file copy or clone. - - - - - - - <link linkend="ddl-constraints">Constraints</link> - - - - - - - - Allow the specification of non-overlapping PRIMARY - KEY and UNIQUE - constraints (Paul A. Jungwirth) - § - - - - This is specified by WITHOUT OVERLAPS on the - last specified column. - - - - - - - - Allow CHECK - and foreign - key constraints to be specified as NOT - ENFORCED (Amul Sul) - § - § - - - - This also adds column pg_constraint.conenforced. - - - - - - - - Require primary/foreign key - relationships to use either deterministic collations or the the - same nondeterministic collations (Peter Eisentraut) - § - - - - The restore of a , also used by , will fail if these requirements are not met; - schema changes must be made for these upgrade methods to succeed. - - - - - - - - Store column NOT - NULL specifications in pg_constraint - (Álvaro Herrera, Bernd Helmle) - § - - - - This allows names to be specified for NOT NULL - constraint. This also adds NOT NULL constraints - to foreign tables and NOT NULL inheritance - control to local tables. - - - - - - - - Allow to set the NOT - VALID attribute of NOT NULL constraints - (Rushabh Lathia, Jian He) - § - - - - - - - - Allow modification of the inheritability of NOT - NULL constraints (Suraj Kharage, Álvaro Herrera) - § - § - - - - The syntax is ALTER TABLE - ... ALTER CONSTRAINT ... [NO] INHERIT. - - - - - - - - Allow NOT VALID foreign key constraints on - partitioned tables (Amul Sul) - § - - - - - - - - Allow dropping - of constraints ONLY on partitioned tables - (Álvaro Herrera) - § - - - - This was previously erroneously prohibited. - - - - - - - - <xref linkend="sql-copy"/> - - - - - - - - Add REJECT_LIMIT to control the number of invalid - rows COPY FROM can ignore (Atsushi Torikoshi) - § - - - - This is available when ON_ERROR = 'ignore'. - - - - - - - - Allow COPY TO to copy rows from populated - materialized views (Jian He) - § - - - - - - - - Add COPY LOG_VERBOSITY level - silent to suppress log output of ignored rows - (Atsushi Torikoshi) - § - - - - This new level suppresses output for discarded input rows when - on_error = 'ignore'. - - - - - - - - Disallow COPY FREEZE on foreign tables (Nathan - Bossart) - § - - - - Previously, the COPY worked but the - FREEZE was ignored, so disallow this command. - - - - - - - - <xref linkend="sql-explain"/> - - - - - - - - Automatically include BUFFERS output in - EXPLAIN ANALYZE (Guillaume Lelarge, David Rowley) - § - - - - - - - - Add full WAL buffer count to EXPLAIN - (WAL) output (Bertrand Drouvot) - § - - - - - - - - In EXPLAIN ANALYZE, report the number of index - lookups used per index scan node (Peter Geoghegan) - § - - - - - - - - Modify EXPLAIN to output fractional row counts - (Ibrar Ahmed, Ilia Evdokimov, Robert Haas) - § - § - - - - - - - - Add memory and disk usage details to Material, - Window Aggregate, and common table expression - nodes to EXPLAIN output (David Rowley, Tatsuo - Ishii) - § - § - § - § - - - - - - - - - Add details about window function arguments to - EXPLAIN output (Tom Lane) - § - - - - - - - - Add Parallel Bitmap Heap Scan worker cache - statistics to EXPLAIN ANALYZE (David Geier, - Heikki Linnakangas, Donghang Lin, Alena Rybakina, David Rowley) - § - - - - - - - - Indicate disabled nodes in EXPLAIN ANALYZE output - (Robert Haas, David Rowley, Laurenz Albe) - § - § - § - - - - - - - - - - - Data Types - - - - - - - - Improve Unicode - full case mapping and conversion (Jeff Davis) - § - § - - - - This adds the ability to do conditional and title case mapping, - and case map single characters to multiple characters. - - - - - - - - Allow jsonb - null values to be cast to scalar types as - NULL (Tom Lane) - § - - - - Previously such casts generated an error. - - - - - - - - Add optional parameter to json{b}_strip_nulls - to allow removal of null array elements (Florents Tselai) - § - - - - - - - - Add function array_sort() - which sorts an array's first dimension (Junwang Zhao, Jian He) - § - - - - - - - - Add function array_reverse() - which reverses an array's first dimension (Aleksander Alekseev) - § - - - - - - - - Add function reverse() - to reverse bytea bytes (Aleksander Alekseev) - § - - - - - - - - Allow casting between integer types and bytea (Aleksander - Alekseev) - § - - - - The integer values are stored as bytea two's complement - values. - - - - - - - - Update Unicode data to Unicode 16.0.0 (Peter - Eisentraut) - § - - - - - - - - Add full text search stemming for Estonian - (Tom Lane) - § - - - - - - - - Improve the XML - error codes to more closely match the SQL standard - (Tom Lane) - § - - - - These errors are reported via SQLSTATE. - - - - - - - - - Functions - - - - - - - - Add function casefold() - to allow for more sophisticated case-insensitive matching (Jeff Davis) - § - - - - This allows more accurate comparisons, i.e., a character can have - multiple upper or lower case equivalents, or upper or lower case - conversion changes the number of characters. - - - - - - - - Allow MIN()/MAX() - aggregates on arrays and composite types (Aleksander Alekseev, - Marat Buharov) - § - § - - - - - - - - Add a WEEK option to EXTRACT() - (Tom Lane) - § - - - - - - - - Improve the output EXTRACT(QUARTER ...) for - negative values (Tom Lane) - § - - - - - - - - Add roman numeral support to to_number() - (Hunaid Sohail) - § - - - - This is accessed via the RN pattern. - - - - - - - - Add UUID - version 7 generation function uuidv7() - (Andrey Borodin) - § - - - - This UUID value is - temporally sortable. Function alias uuidv4() - has been added to explicitly generate version 4 UUIDs. - - - - - - - - Add functions crc32() - and crc32c() - to compute CRC values (Aleksander Alekseev) - § - - - - - - - - Add math functions gamma() - and lgamma() - (Dean Rasheed) - § - - - - - - - - Allow => syntax for named cursor arguments in - PL/pgSQL (Pavel Stehule) - § - - - - We previously only accepted :=. - - - - - - - - Allow regexp_match[es]()/regexp_like()/regexp_replace()/regexp_count()/regexp_instr()/regexp_substr()/regexp_split_to_table()/regexp_split_to_array() - to use named arguments (Jian He) - § - - - - - - - - - <xref linkend="libpq"/> - - - - - - - - Add function PQfullProtocolVersion() - to report the full, including minor, protocol version number (Jacob - Champion, Jelte Fennema-Nio) - § - - - - - - - - Add libpq connection parameters - and environment variables to - specify the minimum and maximum acceptable protocol version for - connections (Jelte Fennema-Nio) - § - § - - - - - - - - Add libpq function PQservice() - to return the connection service name (Michael Banck) - § - - - - - - - - Report changes to the client - (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra) - § - § - - - - - - - - Add PQtrace() output - for all message types, including authentication (Jelte Fennema-Nio) - § - § - § - § - § - - - - - - - - Add libpq connection parameter sslkeylogfile - which dumps out SSL key material (Abhishek Chanda, - Daniel Gustafsson) - § - - - - This is useful for debugging. - - - - - - - - Modify some libpq function signatures to use - int64_t (Thomas Munro) - § - - - - These previously used pg_int64, which is now - deprecated. - - - - - - - - - <xref linkend="app-psql"/> - - - - - - - - Allow psql to parse, bind, and close - named prepared statements (Anthonin Bonnefoy, Michael Paquier) - § - - - - This is accomplished with new commands \parse, - \bind_named, - and \close_prepared. - - - - - - - - Add psql backslash commands to allowing - issuance of pipeline queries (Anthonin Bonnefoy) - § - § - § - - - - The new commands are \startpipeline, - \syncpipeline, \sendpipeline, - \endpipeline, \flushrequest, - \flush, and \getresults. - - - - - - - - Allow adding pipeline status to the psql - prompt and add related state variables (Anthonin Bonnefoy) - § - - - - The new prompt character is %P and - the new psql variables are PIPELINE_SYNC_COUNT, - PIPELINE_COMMAND_COUNT, - and PIPELINE_RESULT_COUNT. - - - - - - - - Allow adding the connection service name to the - psql prompt or access it via - psql variable (Michael Banck) - § - - - - - - - - Add psql option to use expanded mode on - all list commands (Dean Rasheed) - § - - - - Adding backslash suffix x enables this. - - - - - - - - Change psql's to use tabular format - and include more information (Álvaro Herrera, Maiquel Grassi, - Hunaid Sohail) - § - - - - - - - - Add function's leakproof indicator - to psql's \df+, - \do+, \dAo+, and - \dC+ outputs (Yugo Nagata) - § - - - - - - - - Add access method details for partitioned relations in \dP+ - (Justin Pryzby) - § - - - - - - - - Add default_version - to the psql \dx - extension output (Magnus Hagander) - § - - - - - - - - Add psql variable to set the default \watch - wait time (Daniel Gustafsson) - § - - - - - - - - - Server Applications - - - - - - - - Change to default to enabling checksums - (Greg Sabino Mullane) - § - § - - - - The new initdb option - disables checksums. - - - - - - - - Add initdb option - to avoid syncing heap/index - files (Nathan Bossart) - § - - - - initdb option - is still available to avoid syncing any files. - - - - - - - - Add option - to compute only missing - optimizer statistics (Corey Huinker, Nathan Bossart) - § - § - - - - This option can only be used by - and . - - - - - - - - Add option - / to enable hard linking - (Israel Barth Rubio, Robert Haas) - § - - - - Only some files can be hard linked. This should not be used if the - backups will be used independently. - - - - - - - - Allow to verify tar-format - backups (Amul Sul) - § - - - - - - - - If 's - specifies a database name, use it in - output (Masahiko Sawada) - § - - - - - - - - Add option - to change the default - char signedness (Masahiko Sawada) - § - - - - - - - - <link - linkend="app-pgdump"><application>pg_dump</application></link>/<link - linkend="app-pg-dumpall"><application>pg_dumpall</application></link>/<link - linkend="app-pgrestore"><application>pg_restore</application></link> - - - - - - - - Allow to dump in the same output - formats as pg_dump supports (Mahendra - Singh Thalor, Andrew Dunstan) - § - - - - Also modify to handle such dumps. - Previously pg_dumpall only supported - text format. - - - - - - - - Add options - , , - and (Jeff Davis) - § - - - - - - - - Add pg_dump and option to - dump sequence data that would normally be excluded (Nathan Bossart) - § - § - - - - - - - - Add , , - and options - , , - , and - (Corey Huinker, Jeff Davis) - § - - - - - - - - Add option to disable row level - security policy processing in , - , - (Nikolay Samokhvalov) - § - - - - This is useful for migrating to systems with different policies. - - - - - - - - - <xref linkend="pgupgrade"/> - - - - - - - - Allow pg_upgrade to preserve optimizer - statistics (Corey Huinker, Jeff Davis, Nathan Bossart) - § - § - § - § - - - - Extended statistics are not preserved. Also add - pg_upgrade option - to disable statistics preservation. - - - - - - - - Allow pg_upgrade to process database - checks in parallel (Nathan Bossart) - § - § - § - § - § - § - § - § - § - § - § - - - - This is controlled by the existing option. - - - - - - - - Add pg_upgrade option - to swap directories rather than copy, clone, - or link files (Nathan Bossart) - § - - - - This mode is potentially the fastest. - - - - - - - - Add pg_upgrade option - to set the default - char signedness of new cluster (Masahiko Sawada) - § - § - - - - This is to handle cases where a - pre-PostgreSQL 18 cluster's default - CPU signedness does not match the new cluster. - - - - - - - - - Logical Replication Applications - - - - - - - - Add option - to create logical replicas for all databases - (Shubham Khanna) - § - - - - - - - - Add pg_createsubscriber option - to remove publications (Shubham Khanna) - § - - - - - - - - Add pg_createsubscriber option - to enable prepared transactions - (Shubham Khanna) - § - - - - - - - - Add option - to specify failover slots (Hayato Kuroda) - § - - - - - - - - Allow pg_recvlogical - to work without - (Hayato Kuroda) - § - - - - - - - - - - - Source Code - - - - - - - - Separate the loading and running of injection points - (Michael Paquier, Heikki Linnakangas) - § - § - - - - Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), - and such injection points can be run via INJECTION_POINT_CACHED(). - - - - - - - - Support runtime arguments in injection points (Michael Paquier) - § - - - - - - - - Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() - (Heikki Linnakangas) - § - - - - - - - - Improve the performance of processing long JSON strings using - SIMD (Single Instruction Multiple Data) (David - Rowley) - § - - - - - - - - Speed up CRC32C calculations using x86 AVX-512 - instructions (Raghuveer Devulapalli, Paul Amonson) - § - - - - - - - - Add ARM Neon and SVE CPU - intrinsics for popcount (integer bit counting) (Chiranmoy - Bhattacharya, Devanga Susmitha, Rama Malladi) - § - § - - - - - - - - Improve the speed of numeric multiplication and division (Joel - Jacobson, Dean Rasheed) - § - § - § - § - - - - - - - - Add configure option - to enable NUMA awareness (Jakub Wartak, Bertrand - Drouvot) - § - § - § - - - - The function pg_numa_available() - reports on NUMA awareness, and system views pg_shmem_allocations_numa - and pg_buffercache_numa - which report on shared memory distribution across - NUMA nodes. - - - - - - - - Add TOAST table to pg_index - to allow for very large expression indexes (Nathan Bossart) - § - - - - - - - - Remove column pg_attribute.attcacheoff - (David Rowley) - § - - - - - - - - Add column pg_class.relallfrozen - (Melanie Plageman) - § - - - - - - - - Add amgettreeheight, - amconsistentequality, and - amconsistentordering to the index access method - API (Mark Dilger) - § - § - - - - - - - - Add GiST support function stratnum() - (Paul A. Jungwirth) - § - - - - - - - - Record the default CPU signedness of - char in - (Masahiko Sawada) - § - - - - - - - - Add support for Python "Limited API" in (Peter Eisentraut) - § - § - - - - This helps prevent problems caused by - Python 3.x version mismatches. - - - - - - - - Change the minimum supported Python - version to 3.6.8 (Jacob Champion) - § - - - - - - - - Remove support for OpenSSL versions older - than 1.1.1 (Daniel Gustafsson) - § - § - - - - - - - - If LLVM is enabled, require version 14 - or later (Thomas Munro) - § - - - - - - - - Add macro PG_MODULE_MAGIC_EXT - to allow extensions to report their name and version (Andrei Lepikhov) - § - - - - This information can be access via the new function pg_get_loaded_modules(). - - - - - - - - Document that SPI_connect()/SPI_connect_ext() - always returns success (SPI_OK_CONNECT) (Stepan - Neretin) - § - - - - Errors are always reported via ereport(). - - - - - - - - Add documentation - section about API and ABI - compatibility (David Wheeler, Peter Eisentraut) - § - - - - - - - - Remove the experimental designation of - Meson builds on Windows (Aleksander Alekseev) - § - - - - - - - - Remove configure options and - (Thomas Munro) - § - § - - - - Thirty-two-bit atomic operations are now required. - - - - - - - - Remove support for the - HPPA/PA-RISC architecture - (Tom Lane) - § - - - - - - - - - Additional Modules - - - - - - - - Add extension to inspect logical - snapshots (Bertrand Drouvot) - § - - - - - - - - Add extension which adds debug details - to EXPLAIN - output (Robert Haas) - § - - - - - - - - Add output columns to postgres_fdw_get_connections() - (Hayato Kuroda, Sagar Dilip Shedge) - § - § - § - § - - - - New output column used_in_xact indicates - if the foreign data wrapper is being used by a current transaction, - closed indicates if it is closed, - user_name indicates the user name, and - remote_backend_pid indicates the remote - backend process identifier. - - - - - - - - Allow SCRAM - authentication from the client to be passed to servers (Matheus Alcantara, Peter Eisentraut) - § - - - - This avoids storing postgres_fdw - authentication information in the database, and is - enabled with the postgres_fdw use_scram_passthrough - connection option. libpq uses new connection parameters - and . - - - - - - - - Allow SCRAM authentication from the client to be - passed to servers (Matheus Alcantara) - § - - - - - - - - Add on_error and log_verbosity - options to (Atsushi Torikoshi) - § - - - - These control how file_fdw handles and - reports invalid file rows. - - - - - - - - Add reject_limit to control the number of - invalid rows file_fdw can ignore (Atsushi - Torikoshi) - § - - - - This is active when ON_ERROR = 'ignore'. - - - - - - - - Add configurable variable min_password_length to - (Emanuele Musella, Maurizio Boriani) - § - - - - This controls the minimum password length. - - - - - - - - Have report the number of failed, retried, - or skipped transactions in per-script reports (Yugo Nagata) - § - - - - - - - - Add server variable weak - to control invalid check digit acceptance (Viktor Holmberg) - § - - - - This was previously only controlled by function isn_weak(). - - - - - - - - Allow values to be sorted to speed - index builds (Bernd Helmle, Andrey Borodin) - § - - - - - - - - Add check function gin_index_check() - to verify GIN indexes (Grigory Kryachko, Heikki - Linnakangas, Andrey Borodin) - § - - - - - - - - Add functions pg_buffercache_evict_relation() - and pg_buffercache_evict_all() - to evict unpinned shared buffers (Nazir Bilal Yavuz) - § - - - - The existing function pg_buffercache_evict() - now returns the buffer flush status. - - - - - - - - Allow extensions to install custom - options (Robert Haas, Sami Imseih) - § - § - § - - - - - - - - Allow extensions to use the server's cumulative statistics - API (Michael Paquier) - § - § - - - - - - - <xref linkend="pgstatstatements"/> - - - - - - - - Allow the queries of - and to be tracked by - pg_stat_statements (Anthonin Bonnefoy) - § - - - - They are also now assigned query ids. - - - - - - - - Allow the parameterization of values in - pg_stat_statements (Greg Sabino Mullane, - Michael Paquier) - § - - - - This reduces the bloat caused by SET statements - with differing constants. - - - - - - - - Add pg_stat_statements - columns to report parallel activity (Guillaume Lelarge) - § - - - - The new columns are - parallel_workers_to_launch and - parallel_workers_launched. - - - - - - - - Add - pg_stat_statements.wal_buffers_full - to report full WAL buffers (Bertrand Drouvot) - § - - - - - - - - - <xref linkend="pgcrypto"/> - - - - - - - - Add pgcrypto algorithms sha256crypt - and sha512crypt - (Bernd Helmle) - § - - - - - - - - Add CFB mode - to pgcrypto encryption and decryption - (Umar Hayat) - § - - - - - - - - Add function fips_mode() - to report the server's FIPS mode (Daniel - Gustafsson) - § - - - - - - - - Add pgcrypto server variable builtin_crypto_enabled - to allow disabling builtin non-FIPS mode - cryptographic functions (Daniel Gustafsson, Joe Conway) - § - - - - This is useful for guaranteeing FIPS mode behavior. - - - - - - - - - - - - Acknowledgments - - - The following individuals (in alphabetical order) have contributed - to this release as patch authors, committers, reviewers, testers, - or reporters of issues. - - - - (to be completed) - - - - diff --git a/doc/src/sgml/release-19.sgml b/doc/src/sgml/release-19.sgml new file mode 100644 index 0000000000000..8d242b5b28141 --- /dev/null +++ b/doc/src/sgml/release-19.sgml @@ -0,0 +1,16 @@ + + + + + Release 19 + + + Release date: + 2026-??-?? + + + + This is just a placeholder for now. + + + diff --git a/doc/src/sgml/release.sgml b/doc/src/sgml/release.sgml index cee577ff8d353..a659d382db95c 100644 --- a/doc/src/sgml/release.sgml +++ b/doc/src/sgml/release.sgml @@ -70,7 +70,7 @@ For new features, add links to the documentation sections. All the active branches have to be edited concurrently when doing that. --> -&release-18; +&release-19; Prior Releases diff --git a/meson.build b/meson.build index 6ffe7b4727556..36e168a1a2ace 100644 --- a/meson.build +++ b/meson.build @@ -8,7 +8,7 @@ project('postgresql', ['c'], - version: '18beta1', + version: '19devel', license: 'PostgreSQL', # We want < 0.56 for python 3.5 compatibility on old platforms. EPEL for diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl index 15dd10ce40a31..b7ef7ed8d06b7 100644 --- a/src/bin/initdb/t/001_initdb.pl +++ b/src/bin/initdb/t/001_initdb.pl @@ -76,7 +76,8 @@ 'checksums are enabled in control file'); command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only'); -command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ], '--no-sync-data-files'); +command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ], + '--no-sync-data-files'); command_fails([ 'initdb', $datadir ], 'existing data directory'); if ($supports_syncfs) diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl index 5f46357e72ac7..1b7a6f6f43fdd 100644 --- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl +++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl @@ -147,7 +147,8 @@ 'slot with failover created'); my $result = $node->safe_psql('postgres', - "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'"); + "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'" +); is($result, 't', "failover is enabled for the new slot"); done_testing(); diff --git a/src/bin/pg_combinebackup/t/010_hardlink.pl b/src/bin/pg_combinebackup/t/010_hardlink.pl index a0ee419090cf6..4f92d6676bdef 100644 --- a/src/bin/pg_combinebackup/t/010_hardlink.pl +++ b/src/bin/pg_combinebackup/t/010_hardlink.pl @@ -56,7 +56,7 @@ '--pgdata' => $backup1path, '--no-sync', '--checkpoint' => 'fast', - '--wal-method' => 'none' + '--wal-method' => 'none' ], "full backup"); @@ -74,7 +74,7 @@ '--pgdata' => $backup2path, '--no-sync', '--checkpoint' => 'fast', - '--wal-method' => 'none', + '--wal-method' => 'none', '--incremental' => $backup1path . '/backup_manifest' ], "incremental backup"); @@ -112,45 +112,45 @@ # of the given data file. sub check_data_file { - my ($data_file, $last_segment_nlinks) = @_; - - my @data_file_segments = ($data_file); - - # Start checking for additional segments - my $segment_number = 1; - - while (1) - { - my $next_segment = $data_file . '.' . $segment_number; - - # If the file exists and is a regular file, add it to the list - if (-f $next_segment) - { - push @data_file_segments, $next_segment; - $segment_number++; - } - # Stop the loop if the file doesn't exist - else - { - last; - } - } - - # All segments of the given data file should contain 2 hard links, except - # for the last one, which should match the given number of links. - my $last_segment = pop @data_file_segments; - - for my $segment (@data_file_segments) - { - # Get the file's stat information of each segment - my $nlink_count = get_hard_link_count($segment); - ok($nlink_count == 2, "File '$segment' has 2 hard links"); - } - - # Get the file's stat information of the last segment - my $nlink_count = get_hard_link_count($last_segment); - ok($nlink_count == $last_segment_nlinks, - "File '$last_segment' has $last_segment_nlinks hard link(s)"); + my ($data_file, $last_segment_nlinks) = @_; + + my @data_file_segments = ($data_file); + + # Start checking for additional segments + my $segment_number = 1; + + while (1) + { + my $next_segment = $data_file . '.' . $segment_number; + + # If the file exists and is a regular file, add it to the list + if (-f $next_segment) + { + push @data_file_segments, $next_segment; + $segment_number++; + } + # Stop the loop if the file doesn't exist + else + { + last; + } + } + + # All segments of the given data file should contain 2 hard links, except + # for the last one, which should match the given number of links. + my $last_segment = pop @data_file_segments; + + for my $segment (@data_file_segments) + { + # Get the file's stat information of each segment + my $nlink_count = get_hard_link_count($segment); + ok($nlink_count == 2, "File '$segment' has 2 hard links"); + } + + # Get the file's stat information of the last segment + my $nlink_count = get_hard_link_count($last_segment); + ok($nlink_count == $last_segment_nlinks, + "File '$last_segment' has $last_segment_nlinks hard link(s)"); } @@ -159,11 +159,11 @@ sub check_data_file # that file. sub get_hard_link_count { - my ($file) = @_; + my ($file) = @_; - # Get file stats - my @stats = stat($file); - my $nlink = $stats[3]; # Number of hard links + # Get file stats + my @stats = stat($file); + my $nlink = $stats[3]; # Number of hard links - return $nlink; + return $nlink; } diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl index 0be9f6dd538fd..c3c5fae11eaaf 100644 --- a/src/bin/pg_dump/t/001_basic.pl +++ b/src/bin/pg_dump/t/001_basic.pl @@ -240,17 +240,20 @@ command_fails_like( [ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ], qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/, - 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only'); + 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only' +); command_fails_like( [ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ], qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/, - 'When option --exclude-database is used in pg_restore with dump of pg_dump'); + 'When option --exclude-database is used in pg_restore with dump of pg_dump' +); command_fails_like( [ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ], qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/, - 'When option --globals-only is not used in pg_restore with dump of pg_dump'); + 'When option --globals-only is not used in pg_restore with dump of pg_dump' +); # also fails for -r and -t, but it seems pointless to add more tests for those. command_fails_like( diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index e1cfa99874ec4..2485d8f360e5a 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -368,7 +368,7 @@ '--data-only', '--superuser' => 'test_superuser', '--disable-triggers', - '--verbose', # no-op, just make sure it works + '--verbose', # no-op, just make sure it works 'postgres', ], }, @@ -810,8 +810,7 @@ dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/no_schema.sql", '--no-schema', - '--with-statistics', - 'postgres', + '--with-statistics', 'postgres', ], },); diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl index 0ea02a3a4a940..c274b777586ad 100644 --- a/src/bin/pg_dump/t/006_pg_dumpall.pl +++ b/src/bin/pg_dump/t/006_pg_dumpall.pl @@ -294,17 +294,17 @@ '--format' => 'directory', '--globals-only', '--file' => "$tempdir/dump_globals_only", - ], - restore_cmd => [ - 'pg_restore', '-C', '--globals-only', - '--format' => 'directory', - '--file' => "$tempdir/dump_globals_only.sql", - "$tempdir/dump_globals_only", - ], - like => qr/ + ], + restore_cmd => [ + 'pg_restore', '-C', '--globals-only', + '--format' => 'directory', + '--file' => "$tempdir/dump_globals_only.sql", + "$tempdir/dump_globals_only", + ], + like => qr/ ^\s*\QCREATE ROLE dumpall;\E\s*\n /xm - }, ); + },); # First execute the setup_sql foreach my $run (sort keys %pgdumpall_runs) @@ -339,7 +339,8 @@ # pg_restore --file output file. my $output_file = slurp_file("$tempdir/${run}.sql"); - if (!($pgdumpall_runs{$run}->{like}) && !($pgdumpall_runs{$run}->{unlike})) + if ( !($pgdumpall_runs{$run}->{like}) + && !($pgdumpall_runs{$run}->{unlike})) { die "missing \"like\" or \"unlike\" in test \"$run\""; } @@ -361,30 +362,38 @@ # Some negative test case with dump of pg_dumpall and restore using pg_restore # test case 1: when -C is not used in pg_restore with dump of pg_dumpall $node->command_fails_like( - [ 'pg_restore', - "$tempdir/format_custom", - '--format' => 'custom', - '--file' => "$tempdir/error_test.sql", ], - qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/, - 'When -C is not used in pg_restore with dump of pg_dumpall'); + [ + 'pg_restore', + "$tempdir/format_custom", + '--format' => 'custom', + '--file' => "$tempdir/error_test.sql", + ], + qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/, + 'When -C is not used in pg_restore with dump of pg_dumpall'); # test case 2: When --list option is used with dump of pg_dumpall $node->command_fails_like( - [ 'pg_restore', + [ + 'pg_restore', "$tempdir/format_custom", '-C', - '--format' => 'custom', '--list', - '--file' => "$tempdir/error_test.sql", ], + '--format' => 'custom', + '--list', + '--file' => "$tempdir/error_test.sql", + ], qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/, 'When --list is used in pg_restore with dump of pg_dumpall'); # test case 3: When non-exist database is given with -d option $node->command_fails_like( - [ 'pg_restore', + [ + 'pg_restore', "$tempdir/format_custom", '-C', '--format' => 'custom', - '-d' => 'dbpq', ], + '-d' => 'dbpq', + ], qr/\Qpg_restore: error: could not connect to database "dbpq"\E/, - 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall'); + 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall' +); $node->stop('fast'); diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm index 3efab8317978a..b0234ebfaf218 100644 --- a/src/bin/pg_rewind/t/RewindTest.pm +++ b/src/bin/pg_rewind/t/RewindTest.pm @@ -285,7 +285,7 @@ sub run_pg_rewind # Check that pg_rewind with dbname and --write-recovery-conf # wrote the dbname in the generated primary_conninfo value. like(slurp_file("$primary_pgdata/postgresql.auto.conf"), - qr/dbname=postgres/m, 'recovery conf file sets dbname'); + qr/dbname=postgres/m, 'recovery conf file sets dbname'); # Check that standby.signal is here as recovery configuration # was requested. diff --git a/src/bin/pg_upgrade/t/004_subscription.pl b/src/bin/pg_upgrade/t/004_subscription.pl index c545abf65816e..e46f02c6cc612 100644 --- a/src/bin/pg_upgrade/t/004_subscription.pl +++ b/src/bin/pg_upgrade/t/004_subscription.pl @@ -53,7 +53,8 @@ $old_sub->stop; -$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 0"); +$new_sub->append_conf('postgresql.conf', + "max_active_replication_origins = 0"); # pg_upgrade will fail because the new cluster has insufficient # max_active_replication_origins. @@ -80,7 +81,8 @@ ); # Reset max_active_replication_origins -$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 10"); +$new_sub->append_conf('postgresql.conf', + "max_active_replication_origins = 10"); # Cleanup $publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1"); diff --git a/src/bin/pg_upgrade/t/006_transfer_modes.pl b/src/bin/pg_upgrade/t/006_transfer_modes.pl index 550a63fdf7d47..58fe8a8c7dcea 100644 --- a/src/bin/pg_upgrade/t/006_transfer_modes.pl +++ b/src/bin/pg_upgrade/t/006_transfer_modes.pl @@ -13,7 +13,8 @@ sub test_mode { my ($mode) = @_; - my $old = PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall}); + my $old = + PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall}); my $new = PostgreSQL::Test::Cluster->new('new'); # --swap can't be used to upgrade from versions older than 10, so just skip @@ -40,9 +41,11 @@ sub test_mode # Create a small variety of simple test objects on the old cluster. We'll # check that these reach the new version after upgrading. $old->start; - $old->safe_psql('postgres', "CREATE TABLE test1 AS SELECT generate_series(1, 100)"); + $old->safe_psql('postgres', + "CREATE TABLE test1 AS SELECT generate_series(1, 100)"); $old->safe_psql('postgres', "CREATE DATABASE testdb1"); - $old->safe_psql('testdb1', "CREATE TABLE test2 AS SELECT generate_series(200, 300)"); + $old->safe_psql('testdb1', + "CREATE TABLE test2 AS SELECT generate_series(200, 300)"); $old->safe_psql('testdb1', "VACUUM FULL test2"); $old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432"); @@ -51,10 +54,15 @@ sub test_mode if (defined($ENV{oldinstall})) { my $tblspc = PostgreSQL::Test::Utils::tempdir_short(); - $old->safe_psql('postgres', "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'"); - $old->safe_psql('postgres', "CREATE DATABASE testdb2 TABLESPACE test_tblspc"); - $old->safe_psql('postgres', "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)"); - $old->safe_psql('testdb2', "CREATE TABLE test4 AS SELECT generate_series(400, 502)"); + $old->safe_psql('postgres', + "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'"); + $old->safe_psql('postgres', + "CREATE DATABASE testdb2 TABLESPACE test_tblspc"); + $old->safe_psql('postgres', + "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)" + ); + $old->safe_psql('testdb2', + "CREATE TABLE test4 AS SELECT generate_series(400, 502)"); } $old->stop; @@ -90,9 +98,11 @@ sub test_mode # tablespace. if (defined($ENV{oldinstall})) { - $result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3"); + $result = + $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3"); is($result, '102', "test3 data after pg_upgrade $mode"); - $result = $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4"); + $result = + $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4"); is($result, '103', "test4 data after pg_upgrade $mode"); } $new->stop; diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl index 75ac24a7a5539..ff56a13b46bbb 100644 --- a/src/bin/scripts/t/100_vacuumdb.pl +++ b/src/bin/scripts/t/100_vacuumdb.pl @@ -238,62 +238,105 @@ 'cannot use option --all and a dbname as argument at the same time'); $node->safe_psql('postgres', - 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;'); + 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;' +); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing stats'); $node->safe_psql('postgres', - 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));'); + 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));' +); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-in-stages', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing index expression stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-in-stages', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing index expression stats'); $node->safe_psql('postgres', - 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;'); + 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;' +); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing extended stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing extended stats'); $node->safe_psql('postgres', "CREATE TABLE regression_vacuumdb_child (a INT) INHERITS (regression_vacuumdb_test);\n" - . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n" - . "ANALYZE regression_vacuumdb_child;\n"); + . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n" + . "ANALYZE regression_vacuumdb_child;\n"); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-in-stages', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing inherited stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ], + [ + 'vacuumdb', '--analyze-in-stages', + '--missing-stats-only', '-t', + 'regression_vacuumdb_test', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing inherited stats'); $node->safe_psql('postgres', "CREATE TABLE regression_vacuumdb_parted (a INT) PARTITION BY LIST (a);\n" - . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n" - . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n" - . "ANALYZE regression_vacuumdb_part1;\n"); + . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n" + . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n" + . "ANALYZE regression_vacuumdb_part1;\n"); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_parted', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with missing partition stats'); $node->issues_sql_unlike( - [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ], + [ + 'vacuumdb', '--analyze-only', + '--missing-stats-only', '-t', + 'regression_vacuumdb_parted', 'postgres' + ], qr/statement:\ ANALYZE/sx, '--missing-stats-only with no missing partition stats'); diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index d63db42ed7b37..479629825f5b7 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -57,6 +57,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202506251 +#define CATALOG_VERSION_NO 202506291 #endif diff --git a/src/include/catalog/pg_authid.dat b/src/include/catalog/pg_authid.dat index eb4dab5c6aa77..c881c13adf1bc 100644 --- a/src/include/catalog/pg_authid.dat +++ b/src/include/catalog/pg_authid.dat @@ -99,7 +99,7 @@ rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', rolpassword => '_null_', rolvaliduntil => '_null_' }, -{ oid => '8916', oid_symbol => 'ROLE_PG_SIGNAL_AUTOVACUUM_WORKER', +{ oid => '6392', oid_symbol => 'ROLE_PG_SIGNAL_AUTOVACUUM_WORKER', rolname => 'pg_signal_autovacuum_worker', rolsuper => 'f', rolinherit => 't', rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f', rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1', diff --git a/src/include/catalog/pg_collation.dat b/src/include/catalog/pg_collation.dat index fb76c421931ea..8cfd09f03140e 100644 --- a/src/include/catalog/pg_collation.dat +++ b/src/include/catalog/pg_collation.dat @@ -33,7 +33,8 @@ descr => 'sorts by Unicode code point; Unicode and POSIX character semantics', collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6', colllocale => 'C.UTF-8', collversion => '1' }, -{ oid => '9535', descr => 'sorts by Unicode code point; Unicode character semantics', +{ oid => '6411', + descr => 'sorts by Unicode code point; Unicode character semantics', collname => 'pg_unicode_fast', collprovider => 'b', collencoding => '6', colllocale => 'PG_UNICODE_FAST', collversion => '1' }, diff --git a/src/include/catalog/pg_index.h b/src/include/catalog/pg_index.h index 4392b9d221d5a..731d3938169e6 100644 --- a/src/include/catalog/pg_index.h +++ b/src/include/catalog/pg_index.h @@ -69,7 +69,7 @@ CATALOG(pg_index,2610,IndexRelationId) BKI_SCHEMA_MACRO */ typedef FormData_pg_index *Form_pg_index; -DECLARE_TOAST_WITH_MACRO(pg_index, 8149, 8150, PgIndexToastTable, PgIndexToastIndex); +DECLARE_TOAST_WITH_MACRO(pg_index, 6351, 6352, PgIndexToastTable, PgIndexToastIndex); DECLARE_INDEX(pg_index_indrelid_index, 2678, IndexIndrelidIndexId, pg_index, btree(indrelid oid_ops)); DECLARE_UNIQUE_INDEX_PKEY(pg_index_indexrelid_index, 2679, IndexRelidIndexId, pg_index, btree(indexrelid oid_ops)); diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index d3d28a263fa99..fb4f7f50350ad 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -1004,7 +1004,7 @@ { oid => '3129', descr => 'sort support', proname => 'btint2sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint2sortsupport' }, -{ oid => '9290', descr => 'skip support', +{ oid => '6402', descr => 'skip support', proname => 'btint2skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint2skipsupport' }, { oid => '351', descr => 'less-equal-greater', @@ -1013,7 +1013,7 @@ { oid => '3130', descr => 'sort support', proname => 'btint4sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint4sortsupport' }, -{ oid => '9291', descr => 'skip support', +{ oid => '6403', descr => 'skip support', proname => 'btint4skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint4skipsupport' }, { oid => '842', descr => 'less-equal-greater', @@ -1022,7 +1022,7 @@ { oid => '3131', descr => 'sort support', proname => 'btint8sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint8sortsupport' }, -{ oid => '9292', descr => 'skip support', +{ oid => '6404', descr => 'skip support', proname => 'btint8skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btint8skipsupport' }, { oid => '354', descr => 'less-equal-greater', @@ -1043,7 +1043,7 @@ { oid => '3134', descr => 'sort support', proname => 'btoidsortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btoidsortsupport' }, -{ oid => '9293', descr => 'skip support', +{ oid => '6405', descr => 'skip support', proname => 'btoidskipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btoidskipsupport' }, { oid => '404', descr => 'less-equal-greater', @@ -1052,7 +1052,7 @@ { oid => '358', descr => 'less-equal-greater', proname => 'btcharcmp', proleakproof => 't', prorettype => 'int4', proargtypes => 'char char', prosrc => 'btcharcmp' }, -{ oid => '9294', descr => 'skip support', +{ oid => '6406', descr => 'skip support', proname => 'btcharskipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btcharskipsupport' }, { oid => '359', descr => 'less-equal-greater', @@ -1180,24 +1180,24 @@ proname => 'name', proleakproof => 't', prorettype => 'name', proargtypes => 'bpchar', prosrc => 'bpchar_name' }, -{ oid => '8577', descr => 'convert int2 to bytea', +{ oid => '6367', descr => 'convert int2 to bytea', proname => 'bytea', proleakproof => 't', prorettype => 'bytea', proargtypes => 'int2', prosrc => 'int2_bytea' }, -{ oid => '8578', descr => 'convert int4 to bytea', +{ oid => '6368', descr => 'convert int4 to bytea', proname => 'bytea', proleakproof => 't', prorettype => 'bytea', proargtypes => 'int4', prosrc => 'int4_bytea' }, -{ oid => '8579', descr => 'convert int8 to bytea', +{ oid => '6369', descr => 'convert int8 to bytea', proname => 'bytea', proleakproof => 't', prorettype => 'bytea', proargtypes => 'int8', prosrc => 'int8_bytea' }, -{ oid => '8580', descr => 'convert bytea to int2', - proname => 'int2', prorettype => 'int2', - proargtypes => 'bytea', prosrc => 'bytea_int2' }, -{ oid => '8581', descr => 'convert bytea to int4', - proname => 'int4', prorettype => 'int4', - proargtypes => 'bytea', prosrc => 'bytea_int4' }, -{ oid => '8582', descr => 'convert bytea to int8', - proname => 'int8', prorettype => 'int8', - proargtypes => 'bytea', prosrc => 'bytea_int8' }, +{ oid => '6370', descr => 'convert bytea to int2', + proname => 'int2', prorettype => 'int2', proargtypes => 'bytea', + prosrc => 'bytea_int2' }, +{ oid => '6371', descr => 'convert bytea to int4', + proname => 'int4', prorettype => 'int4', proargtypes => 'bytea', + prosrc => 'bytea_int4' }, +{ oid => '6372', descr => 'convert bytea to int8', + proname => 'int8', prorettype => 'int8', proargtypes => 'bytea', + prosrc => 'bytea_int8' }, { oid => '449', descr => 'hash', proname => 'hashint2', prorettype => 'int4', proargtypes => 'int2', @@ -1259,10 +1259,10 @@ { oid => '772', descr => 'hash', proname => 'hashvarlenaextended', prorettype => 'int8', proargtypes => 'internal int8', prosrc => 'hashvarlenaextended' }, -{ oid => '9708', descr => 'hash', +{ oid => '6413', descr => 'hash', proname => 'hashbytea', prorettype => 'int4', proargtypes => 'bytea', prosrc => 'hashbytea' }, -{ oid => '9709', descr => 'hash', +{ oid => '6414', descr => 'hash', proname => 'hashbyteaextended', prorettype => 'int8', proargtypes => 'bytea int8', prosrc => 'hashbyteaextended' }, { oid => '457', descr => 'hash', @@ -1301,34 +1301,34 @@ { oid => '781', descr => 'hash', proname => 'hashmacaddr8extended', prorettype => 'int8', proargtypes => 'macaddr8 int8', prosrc => 'hashmacaddr8extended' }, -{ oid => '9710', descr => 'hash', +{ oid => '6415', descr => 'hash', proname => 'hashdate', prorettype => 'int4', proargtypes => 'date', prosrc => 'hashdate' }, -{ oid => '9711', descr => 'hash', +{ oid => '6416', descr => 'hash', proname => 'hashdateextended', prorettype => 'int8', proargtypes => 'date int8', prosrc => 'hashdateextended' }, -{ oid => '9712', descr => 'hash', +{ oid => '6417', descr => 'hash', proname => 'hashbool', prorettype => 'int4', proargtypes => 'bool', prosrc => 'hashbool' }, -{ oid => '9713', descr => 'hash', +{ oid => '6418', descr => 'hash', proname => 'hashboolextended', prorettype => 'int8', proargtypes => 'bool int8', prosrc => 'hashboolextended' }, -{ oid => '9714', descr => 'hash', +{ oid => '6419', descr => 'hash', proname => 'hashxid', prorettype => 'int4', proargtypes => 'xid', prosrc => 'hashxid' }, -{ oid => '9715', descr => 'hash', +{ oid => '6420', descr => 'hash', proname => 'hashxidextended', prorettype => 'int8', proargtypes => 'xid int8', prosrc => 'hashxidextended' }, -{ oid => '9716', descr => 'hash', +{ oid => '6421', descr => 'hash', proname => 'hashxid8', prorettype => 'int4', proargtypes => 'xid8', prosrc => 'hashxid8' }, -{ oid => '9717', descr => 'hash', +{ oid => '6422', descr => 'hash', proname => 'hashxid8extended', prorettype => 'int8', proargtypes => 'xid8 int8', prosrc => 'hashxid8extended' }, -{ oid => '9718', descr => 'hash', +{ oid => '6423', descr => 'hash', proname => 'hashcid', prorettype => 'int4', proargtypes => 'cid', prosrc => 'hashcid' }, -{ oid => '9719', descr => 'hash', +{ oid => '6424', descr => 'hash', proname => 'hashcidextended', prorettype => 'int8', proargtypes => 'cid int8', prosrc => 'hashcidextended' }, @@ -1348,10 +1348,10 @@ proname => 'text_smaller', proleakproof => 't', prorettype => 'text', proargtypes => 'text text', prosrc => 'text_smaller' }, -{ oid => '8920', descr => 'larger of two', +{ oid => '6393', descr => 'larger of two', proname => 'bytea_larger', proleakproof => 't', prorettype => 'bytea', proargtypes => 'bytea bytea', prosrc => 'bytea_larger' }, -{ oid => '8921', descr => 'smaller of two', +{ oid => '6394', descr => 'smaller of two', proname => 'bytea_smaller', proleakproof => 't', prorettype => 'bytea', proargtypes => 'bytea bytea', prosrc => 'bytea_smaller' }, @@ -1533,7 +1533,7 @@ { oid => '6163', descr => 'number of set bits', proname => 'bit_count', prorettype => 'int8', proargtypes => 'bytea', prosrc => 'bytea_bit_count' }, -{ oid => '8694', descr => 'reverse bytea', +{ oid => '6382', descr => 'reverse bytea', proname => 'reverse', prorettype => 'bytea', proargtypes => 'bytea', prosrc => 'bytea_reverse' }, @@ -1638,7 +1638,7 @@ proname => 'array_append', prosupport => 'array_append_support', proisstrict => 'f', prorettype => 'anycompatiblearray', proargtypes => 'anycompatiblearray anycompatible', prosrc => 'array_append' }, -{ oid => '8680', descr => 'planner support for array_append', +{ oid => '6378', descr => 'planner support for array_append', proname => 'array_append_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'array_append_support' }, { oid => '379', descr => 'prepend element onto front of array', @@ -1646,7 +1646,7 @@ proisstrict => 'f', prorettype => 'anycompatiblearray', proargtypes => 'anycompatible anycompatiblearray', prosrc => 'array_prepend' }, -{ oid => '8681', descr => 'planner support for array_prepend', +{ oid => '6379', descr => 'planner support for array_prepend', proname => 'array_prepend_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'array_prepend_support' }, { oid => '383', @@ -1784,17 +1784,17 @@ { oid => '6216', descr => 'take samples from array', proname => 'array_sample', provolatile => 'v', prorettype => 'anyarray', proargtypes => 'anyarray int4', prosrc => 'array_sample' }, -{ oid => '8686', descr => 'reverse array', +{ oid => '6381', descr => 'reverse array', proname => 'array_reverse', prorettype => 'anyarray', proargtypes => 'anyarray', prosrc => 'array_reverse' }, -{ oid => '8810', descr => 'sort array', +{ oid => '6388', descr => 'sort array', proname => 'array_sort', prorettype => 'anyarray', proargtypes => 'anyarray', prosrc => 'array_sort' }, -{ oid => '8811', descr => 'sort array', +{ oid => '6389', descr => 'sort array', proname => 'array_sort', prorettype => 'anyarray', proargtypes => 'anyarray bool', proargnames => '{array,descending}', prosrc => 'array_sort_order' }, -{ oid => '8812', descr => 'sort array', +{ oid => '6390', descr => 'sort array', proname => 'array_sort', prorettype => 'anyarray', proargtypes => 'anyarray bool bool', proargnames => '{array,descending,nulls_first}', @@ -2315,7 +2315,7 @@ { oid => '3136', descr => 'sort support', proname => 'date_sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'date_sortsupport' }, -{ oid => '9295', descr => 'skip support', +{ oid => '6407', descr => 'skip support', proname => 'date_skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'date_skipsupport' }, { oid => '4133', descr => 'window RANGE support', @@ -3433,7 +3433,7 @@ proname => 'pg_sequence_last_value', provolatile => 'v', proparallel => 'u', prorettype => 'int8', proargtypes => 'regclass', prosrc => 'pg_sequence_last_value' }, -{ oid => '9876', descr => 'return sequence tuple, for use by pg_dump', +{ oid => '6427', descr => 'return sequence tuple, for use by pg_dump', proname => 'pg_get_sequence_data', provolatile => 'v', proparallel => 'u', prorettype => 'record', proargtypes => 'regclass', proallargtypes => '{regclass,int8,bool}', proargmodes => '{i,o,o}', @@ -3594,10 +3594,11 @@ proname => 'erfc', prorettype => 'float8', proargtypes => 'float8', prosrc => 'derfc' }, -{ oid => '8702', descr => 'gamma function', +{ oid => '6383', descr => 'gamma function', proname => 'gamma', prorettype => 'float8', proargtypes => 'float8', prosrc => 'dgamma' }, -{ oid => '8703', descr => 'natural logarithm of absolute value of gamma function', +{ oid => '6384', + descr => 'natural logarithm of absolute value of gamma function', proname => 'lgamma', prorettype => 'float8', proargtypes => 'float8', prosrc => 'dlgamma' }, @@ -3688,7 +3689,7 @@ { oid => '872', descr => 'capitalize each word', proname => 'initcap', prorettype => 'text', proargtypes => 'text', prosrc => 'initcap' }, -{ oid => '9569', descr => 'fold case', +{ oid => '6412', descr => 'fold case', proname => 'casefold', prorettype => 'text', proargtypes => 'text', prosrc => 'casefold' }, { oid => '873', descr => 'left-pad string to length', @@ -4515,7 +4516,7 @@ { oid => '1693', descr => 'less-equal-greater', proname => 'btboolcmp', proleakproof => 't', prorettype => 'int4', proargtypes => 'bool bool', prosrc => 'btboolcmp' }, -{ oid => '9296', descr => 'skip support', +{ oid => '6408', descr => 'skip support', proname => 'btboolskipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'btboolskipsupport' }, @@ -5450,17 +5451,17 @@ prorettype => 'bool', proargtypes => 'oid text', prosrc => 'has_any_column_privilege_id' }, -{ oid => '8048', +{ oid => '6348', descr => 'user privilege on large object by username, large object oid', proname => 'has_largeobject_privilege', procost => '10', provolatile => 's', prorettype => 'bool', proargtypes => 'name oid text', prosrc => 'has_largeobject_privilege_name_id' }, -{ oid => '8049', +{ oid => '6349', descr => 'current user privilege on large object by large object oid', proname => 'has_largeobject_privilege', procost => '10', provolatile => 's', prorettype => 'bool', proargtypes => 'oid text', prosrc => 'has_largeobject_privilege_id' }, -{ oid => '8050', +{ oid => '6350', descr => 'user privilege on large object by user oid, large object oid', proname => 'has_largeobject_privilege', procost => '10', provolatile => 's', prorettype => 'bool', proargtypes => 'oid oid text', @@ -5611,19 +5612,19 @@ proname => 'pg_stat_get_autoanalyze_count', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', prosrc => 'pg_stat_get_autoanalyze_count' }, -{ oid => '8406', descr => 'total vacuum time, in milliseconds', +{ oid => '6358', descr => 'total vacuum time, in milliseconds', proname => 'pg_stat_get_total_vacuum_time', provolatile => 's', proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', prosrc => 'pg_stat_get_total_vacuum_time' }, -{ oid => '8407', descr => 'total autovacuum time, in milliseconds', +{ oid => '6359', descr => 'total autovacuum time, in milliseconds', proname => 'pg_stat_get_total_autovacuum_time', provolatile => 's', proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', prosrc => 'pg_stat_get_total_autovacuum_time' }, -{ oid => '8408', descr => 'total analyze time, in milliseconds', +{ oid => '6360', descr => 'total analyze time, in milliseconds', proname => 'pg_stat_get_total_analyze_time', provolatile => 's', proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', prosrc => 'pg_stat_get_total_analyze_time' }, -{ oid => '8409', descr => 'total autoanalyze time, in milliseconds', +{ oid => '6361', descr => 'total autoanalyze time, in milliseconds', proname => 'pg_stat_get_total_autoanalyze_time', provolatile => 's', proparallel => 'r', prorettype => 'float8', proargtypes => 'oid', prosrc => 'pg_stat_get_total_autoanalyze_time' }, @@ -5900,12 +5901,12 @@ proname => 'pg_stat_get_db_sessions_killed', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', prosrc => 'pg_stat_get_db_sessions_killed' }, -{ oid => '8403', +{ oid => '6355', descr => 'statistics: number of parallel workers planned to be launched by queries', proname => 'pg_stat_get_db_parallel_workers_to_launch', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', prosrc => 'pg_stat_get_db_parallel_workers_to_launch' }, -{ oid => '8404', +{ oid => '6356', descr => 'statistics: number of parallel workers effectively launched by queries', proname => 'pg_stat_get_db_parallel_workers_launched', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => 'oid', @@ -5927,7 +5928,7 @@ proname => 'pg_stat_get_checkpointer_num_requested', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_checkpointer_num_requested' }, -{ oid => '8599', +{ oid => '6377', descr => 'statistics: number of checkpoints performed by the checkpointer', proname => 'pg_stat_get_checkpointer_num_performed', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => '', @@ -5954,7 +5955,7 @@ proname => 'pg_stat_get_checkpointer_buffers_written', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_checkpointer_buffers_written' }, -{ oid => '8573', +{ oid => '6366', descr => 'statistics: number of SLRU buffers written during checkpoints and restartpoints', proname => 'pg_stat_get_checkpointer_slru_written', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => '', @@ -6000,7 +6001,7 @@ proargnames => '{backend_type,object,context,reads,read_bytes,read_time,writes,write_bytes,write_time,writebacks,writeback_time,extends,extend_bytes,extend_time,hits,evictions,reuses,fsyncs,fsync_time,stats_reset}', prosrc => 'pg_stat_get_io' }, -{ oid => '8806', descr => 'statistics: backend IO statistics', +{ oid => '6386', descr => 'statistics: backend IO statistics', proname => 'pg_stat_get_backend_io', prorows => '5', proretset => 't', provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => 'int4', @@ -6016,7 +6017,7 @@ proargmodes => '{o,o,o,o,o}', proargnames => '{wal_records,wal_fpi,wal_bytes,wal_buffers_full,stats_reset}', prosrc => 'pg_stat_get_wal' }, -{ oid => '8037', descr => 'statistics: backend WAL activity', +{ oid => '6313', descr => 'statistics: backend WAL activity', proname => 'pg_stat_get_backend_wal', provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => 'int4', proallargtypes => '{int4,int8,int8,numeric,int8,timestamptz}', @@ -6155,7 +6156,7 @@ proname => 'pg_stat_reset_single_function_counters', provolatile => 'v', prorettype => 'void', proargtypes => 'oid', prosrc => 'pg_stat_reset_single_function_counters' }, -{ oid => '8807', descr => 'statistics: reset statistics for a single backend', +{ oid => '6387', descr => 'statistics: reset statistics for a single backend', proname => 'pg_stat_reset_backend_stats', provolatile => 'v', prorettype => 'void', proargtypes => 'int4', prosrc => 'pg_stat_reset_backend_stats' }, @@ -6369,10 +6370,10 @@ { oid => '3411', descr => 'hash', proname => 'timestamp_hash_extended', prorettype => 'int8', proargtypes => 'timestamp int8', prosrc => 'timestamp_hash_extended' }, -{ oid => '9720', descr => 'hash', +{ oid => '6425', descr => 'hash', proname => 'timestamptz_hash', prorettype => 'int4', proargtypes => 'timestamptz', prosrc => 'timestamptz_hash' }, -{ oid => '9721', descr => 'hash', +{ oid => '6426', descr => 'hash', proname => 'timestamptz_hash_extended', prorettype => 'int8', proargtypes => 'timestamptz int8', prosrc => 'timestamptz_hash_extended' }, { oid => '2041', descr => 'intervals overlap?', @@ -6397,7 +6398,7 @@ { oid => '3137', descr => 'sort support', proname => 'timestamp_sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'timestamp_sortsupport' }, -{ oid => '9297', descr => 'skip support', +{ oid => '6409', descr => 'skip support', proname => 'timestamp_skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'timestamp_skipsupport' }, @@ -6593,7 +6594,7 @@ proname => 'pg_describe_object', provolatile => 's', prorettype => 'text', proargtypes => 'oid oid int4', prosrc => 'pg_describe_object' }, -{ oid => '8730', descr => 'get ACL for SQL object', +{ oid => '6385', descr => 'get ACL for SQL object', proname => 'pg_get_acl', provolatile => 's', prorettype => '_aclitem', proargtypes => 'oid oid int4', proargnames => '{classid,objid,objsubid}', prosrc => 'pg_get_acl' }, @@ -6792,7 +6793,7 @@ proargnames => '{rm_id, rm_name, rm_builtin}', prosrc => 'pg_get_wal_resource_managers' }, -{ oid => '8303', descr => 'get info about loaded modules', +{ oid => '6353', descr => 'get info about loaded modules', proname => 'pg_get_loaded_modules', prorows => '10', proretset => 't', provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => '', proallargtypes => '{text,text,text}', @@ -6992,7 +6993,7 @@ proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'anyarray', proargtypes => 'anyarray', prosrc => 'aggregate_dummy' }, -{ oid => '8595', descr => 'maximum value of all record input values', +{ oid => '6373', descr => 'maximum value of all record input values', proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'record', proargtypes => 'record', prosrc => 'aggregate_dummy' }, { oid => '2244', descr => 'maximum value of all bpchar input values', @@ -7010,7 +7011,7 @@ { oid => '5099', descr => 'maximum value of all xid8 input values', proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'xid8', proargtypes => 'xid8', prosrc => 'aggregate_dummy' }, -{ oid => '8922', descr => 'maximum value of all bytea input values', +{ oid => '6395', descr => 'maximum value of all bytea input values', proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'bytea', proargtypes => 'bytea', prosrc => 'aggregate_dummy' }, @@ -7068,7 +7069,7 @@ proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'anyarray', proargtypes => 'anyarray', prosrc => 'aggregate_dummy' }, -{ oid => '8596', descr => 'minimum value of all record input values', +{ oid => '6374', descr => 'minimum value of all record input values', proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'record', proargtypes => 'record', prosrc => 'aggregate_dummy' }, { oid => '2245', descr => 'minimum value of all bpchar input values', @@ -7086,7 +7087,7 @@ { oid => '5100', descr => 'minimum value of all xid8 input values', proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'xid8', proargtypes => 'xid8', prosrc => 'aggregate_dummy' }, -{ oid => '8923', descr => 'minimum value of all bytea input values', +{ oid => '6396', descr => 'minimum value of all bytea input values', proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'bytea', proargtypes => 'bytea', prosrc => 'aggregate_dummy' }, @@ -7949,10 +7950,10 @@ proargtypes => 'internal', prosrc => 'tsm_system_handler' }, # CRC variants -{ oid => '8571', descr => 'CRC-32 value', +{ oid => '6364', descr => 'CRC-32 value', proname => 'crc32', proleakproof => 't', prorettype => 'int8', proargtypes => 'bytea', prosrc => 'crc32_bytea' }, -{ oid => '8572', descr => 'CRC-32C value', +{ oid => '6365', descr => 'CRC-32C value', proname => 'crc32c', proleakproof => 't', prorettype => 'int8', proargtypes => 'bytea', prosrc => 'crc32c_bytea' }, @@ -8496,7 +8497,7 @@ proargmodes => '{o,o,o,o,o,o}', proargnames => '{name,statement,is_holdable,is_binary,is_scrollable,creation_time}', prosrc => 'pg_cursor' }, -{ oid => '9221', descr => 'get abbreviations from current timezone', +{ oid => '6401', descr => 'get abbreviations from current timezone', proname => 'pg_timezone_abbrevs_zone', prorows => '10', proretset => 't', provolatile => 's', prorettype => 'record', proargtypes => '', proallargtypes => '{text,interval,bool}', proargmodes => '{o,o,o}', @@ -8608,7 +8609,7 @@ prosupport => 'generate_series_numeric_support', proretset => 't', prorettype => 'numeric', proargtypes => 'numeric numeric', prosrc => 'generate_series_numeric' }, -{ oid => '8405', descr => 'planner support for generate_series', +{ oid => '6357', descr => 'planner support for generate_series', proname => 'generate_series_numeric_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'generate_series_numeric_support' }, { oid => '938', descr => 'non-persistent series generator', @@ -8628,7 +8629,7 @@ prorettype => 'timestamptz', proargtypes => 'timestamptz timestamptz interval text', prosrc => 'generate_series_timestamptz_at_zone' }, -{ oid => '8402', descr => 'planner support for generate_series', +{ oid => '6354', descr => 'planner support for generate_series', proname => 'generate_series_timestamp_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'generate_series_timestamp_support' }, @@ -9360,8 +9361,8 @@ proname => 'to_json', provolatile => 's', prorettype => 'json', proargtypes => 'anyelement', prosrc => 'to_json' }, { oid => '3261', descr => 'remove object fields with null values from json', - proname => 'json_strip_nulls', prorettype => 'json', proargtypes => 'json bool', - prosrc => 'json_strip_nulls' }, + proname => 'json_strip_nulls', prorettype => 'json', + proargtypes => 'json bool', prosrc => 'json_strip_nulls' }, { oid => '3947', proname => 'json_object_field', prorettype => 'json', @@ -9467,7 +9468,7 @@ { oid => '3300', descr => 'sort support', proname => 'uuid_sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'uuid_sortsupport' }, -{ oid => '9298', descr => 'skip support', +{ oid => '6410', descr => 'skip support', proname => 'uuid_skipsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'uuid_skipsupport' }, { oid => '2961', descr => 'I/O', @@ -9483,17 +9484,19 @@ proname => 'uuid_hash_extended', prorettype => 'int8', proargtypes => 'uuid int8', prosrc => 'uuid_hash_extended' }, { oid => '3432', descr => 'generate random UUID', - proname => 'gen_random_uuid', provolatile => 'v', - prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' }, -{ oid => '9895', descr => 'generate UUID version 4', - proname => 'uuidv4', provolatile => 'v', - prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' }, -{ oid => '9896', descr => 'generate UUID version 7', - proname => 'uuidv7', provolatile => 'v', - prorettype => 'uuid', proargtypes => '', prosrc => 'uuidv7' }, -{ oid => '9897', descr => 'generate UUID version 7 with a timestamp shifted by specified interval', - proname => 'uuidv7', provolatile => 'v', proargnames => '{shift}', - prorettype => 'uuid', proargtypes => 'interval', prosrc => 'uuidv7_interval' }, + proname => 'gen_random_uuid', provolatile => 'v', prorettype => 'uuid', + proargtypes => '', prosrc => 'gen_random_uuid' }, +{ oid => '6428', descr => 'generate UUID version 4', + proname => 'uuidv4', provolatile => 'v', prorettype => 'uuid', + proargtypes => '', prosrc => 'gen_random_uuid' }, +{ oid => '6429', descr => 'generate UUID version 7', + proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid', + proargtypes => '', prosrc => 'uuidv7' }, +{ oid => '6430', + descr => 'generate UUID version 7 with a timestamp shifted by specified interval', + proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid', + proargtypes => 'interval', proargnames => '{shift}', + prosrc => 'uuidv7_interval' }, { oid => '6342', descr => 'extract timestamp from UUID', proname => 'uuid_extract_timestamp', proleakproof => 't', prorettype => 'timestamptz', proargtypes => 'uuid', @@ -10299,8 +10302,8 @@ prorettype => 'jsonb', proargtypes => '', prosrc => 'jsonb_build_object_noargs' }, { oid => '3262', descr => 'remove object fields with null values from jsonb', - proname => 'jsonb_strip_nulls', prorettype => 'jsonb', proargtypes => 'jsonb bool', - prosrc => 'jsonb_strip_nulls' }, + proname => 'jsonb_strip_nulls', prorettype => 'jsonb', + proargtypes => 'jsonb bool', prosrc => 'jsonb_strip_nulls' }, { oid => '3478', proname => 'jsonb_object_field', prorettype => 'jsonb', @@ -10651,10 +10654,10 @@ { oid => '2987', descr => 'less-equal-greater', proname => 'btrecordcmp', prorettype => 'int4', proargtypes => 'record record', prosrc => 'btrecordcmp' }, -{ oid => '8597', descr => 'larger of two', +{ oid => '6375', descr => 'larger of two', proname => 'record_larger', prorettype => 'record', proargtypes => 'record record', prosrc => 'record_larger' }, -{ oid => '8598', descr => 'smaller of two', +{ oid => '6376', descr => 'smaller of two', proname => 'record_smaller', prorettype => 'record', proargtypes => 'record record', prosrc => 'record_smaller' }, @@ -10894,7 +10897,7 @@ { oid => '3870', descr => 'less-equal-greater', proname => 'range_cmp', prorettype => 'int4', proargtypes => 'anyrange anyrange', prosrc => 'range_cmp' }, -{ oid => '8849', descr => 'sort support', +{ oid => '6391', descr => 'sort support', proname => 'range_sortsupport', prorettype => 'void', proargtypes => 'internal', prosrc => 'range_sortsupport' }, { oid => '3871', @@ -12313,7 +12316,7 @@ proname => 'array_subscript_handler', prosupport => 'array_subscript_handler_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'array_subscript_handler' }, -{ oid => '8682', descr => 'planner support for array_subscript_handler', +{ oid => '6380', descr => 'planner support for array_subscript_handler', proname => 'array_subscript_handler_support', prorettype => 'internal', proargtypes => 'internal', prosrc => 'array_subscript_handler_support' }, { oid => '6180', descr => 'raw array subscripting support', @@ -12352,7 +12355,7 @@ provolatile => 'v', prorettype => 'record', proargtypes => '', proallargtypes => '{text,int8,timestamptz}', proargmodes => '{o,o,o}', proargnames => '{name,size,modification}', prosrc => 'pg_ls_waldir' }, -{ oid => '9220', descr => 'list of files in the pg_wal/summaries directory', +{ oid => '6400', descr => 'list of files in the pg_wal/summaries directory', proname => 'pg_ls_summariesdir', procost => '10', prorows => '20', proretset => 't', provolatile => 'v', prorettype => 'record', proargtypes => '', proallargtypes => '{text,int8,timestamptz}', @@ -12508,49 +12511,37 @@ proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}', prosrc => 'pg_get_wal_summarizer_state' }, # Statistics Import -{ oid => '8459', - descr => 'restore statistics on relation', - proname => 'pg_restore_relation_stats', provolatile => 'v', proisstrict => 'f', - provariadic => 'any', - proparallel => 'u', prorettype => 'bool', - proargtypes => 'any', - proargnames => '{kwargs}', - proargmodes => '{v}', - prosrc => 'pg_restore_relation_stats' }, -{ oid => '9160', - descr => 'clear statistics on relation', - proname => 'pg_clear_relation_stats', provolatile => 'v', proisstrict => 'f', - proparallel => 'u', prorettype => 'void', - proargtypes => 'text text', - proargnames => '{schemaname,relname}', - prosrc => 'pg_clear_relation_stats' }, -{ oid => '8461', - descr => 'restore statistics on attribute', - proname => 'pg_restore_attribute_stats', provolatile => 'v', proisstrict => 'f', - provariadic => 'any', - proparallel => 'u', prorettype => 'bool', - proargtypes => 'any', - proargnames => '{kwargs}', - proargmodes => '{v}', - prosrc => 'pg_restore_attribute_stats' }, -{ oid => '9162', - descr => 'clear statistics on attribute', - proname => 'pg_clear_attribute_stats', provolatile => 'v', proisstrict => 'f', +{ oid => '6362', descr => 'restore statistics on relation', + proname => 'pg_restore_relation_stats', provariadic => 'any', + proisstrict => 'f', provolatile => 'v', proparallel => 'u', + prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}', + proargnames => '{kwargs}', prosrc => 'pg_restore_relation_stats' }, +{ oid => '6397', descr => 'clear statistics on relation', + proname => 'pg_clear_relation_stats', proisstrict => 'f', provolatile => 'v', + proparallel => 'u', prorettype => 'void', proargtypes => 'text text', + proargnames => '{schemaname,relname}', prosrc => 'pg_clear_relation_stats' }, +{ oid => '6363', descr => 'restore statistics on attribute', + proname => 'pg_restore_attribute_stats', provariadic => 'any', + proisstrict => 'f', provolatile => 'v', proparallel => 'u', + prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}', + proargnames => '{kwargs}', prosrc => 'pg_restore_attribute_stats' }, +{ oid => '6398', descr => 'clear statistics on attribute', + proname => 'pg_clear_attribute_stats', proisstrict => 'f', provolatile => 'v', proparallel => 'u', prorettype => 'void', proargtypes => 'text text text bool', proargnames => '{schemaname,relname,attname,inherited}', prosrc => 'pg_clear_attribute_stats' }, # GiST stratnum implementations -{ oid => '8047', descr => 'GiST support', +{ oid => '6347', descr => 'GiST support', proname => 'gist_translate_cmptype_common', prorettype => 'int2', - proargtypes => 'int4', - prosrc => 'gist_translate_cmptype_common' }, + proargtypes => 'int4', prosrc => 'gist_translate_cmptype_common' }, # AIO related functions -{ oid => '9200', descr => 'information about in-progress asynchronous IOs', +{ oid => '6399', descr => 'information about in-progress asynchronous IOs', proname => 'pg_get_aios', prorows => '100', proretset => 't', - provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => '', + provolatile => 'v', proparallel => 'r', prorettype => 'record', + proargtypes => '', proallargtypes => '{int4,int4,int8,text,text,int8,int8,text,int2,int4,text,text,bool,bool,bool}', proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', proargnames => '{pid,io_id,io_generation,state,operation,off,length,target,handle_data_len,raw_result,result,target_desc,f_sync,f_localmem,f_buffered}', diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl index 37d96d95a1aeb..a16e9a563f3fd 100644 --- a/src/test/authentication/t/001_password.pl +++ b/src/test/authentication/t/001_password.pl @@ -79,39 +79,40 @@ sub test_conn # other tests are added to this file in the future $node->safe_psql('postgres', "CREATE DATABASE test_log_connections"); -my $log_connections = $node->safe_psql('test_log_connections', q(SHOW log_connections;)); +my $log_connections = + $node->safe_psql('test_log_connections', q(SHOW log_connections;)); is($log_connections, 'on', qq(check log connections has expected value 'on')); -$node->connect_ok('test_log_connections', +$node->connect_ok( + 'test_log_connections', qq(log_connections 'on' works as expected for backwards compatibility), log_like => [ qr/connection received/, qr/connection authenticated/, qr/connection authorized: user=\S+ database=test_log_connections/, ], - log_unlike => [ - qr/connection ready/, - ],); + log_unlike => [ qr/connection ready/, ],); -$node->safe_psql('test_log_connections', +$node->safe_psql( + 'test_log_connections', q[ALTER SYSTEM SET log_connections = receipt,authorization,setup_durations; SELECT pg_reload_conf();]); -$node->connect_ok('test_log_connections', +$node->connect_ok( + 'test_log_connections', q(log_connections with subset of specified options logs only those aspects), log_like => [ qr/connection received/, qr/connection authorized: user=\S+ database=test_log_connections/, qr/connection ready/, ], - log_unlike => [ - qr/connection authenticated/, - ],); + log_unlike => [ qr/connection authenticated/, ],); $node->safe_psql('test_log_connections', qq(ALTER SYSTEM SET log_connections = 'all'; SELECT pg_reload_conf();)); -$node->connect_ok('test_log_connections', +$node->connect_ok( + 'test_log_connections', qq(log_connections 'all' logs all available connection aspects), log_like => [ qr/connection received/, diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl index 61524bdbd8f28..f967885307045 100644 --- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl +++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl @@ -53,7 +53,8 @@ BEGIN $node->command_ok( [ 'libpq_pipeline', @extraargs, - $testname, $node->connstr('postgres') . " max_protocol_version=latest" + $testname, + $node->connstr('postgres') . " max_protocol_version=latest" ], "libpq_pipeline $testname"); @@ -76,7 +77,8 @@ BEGIN # test separately that it still works the old protocol version too. $node->command_ok( [ - 'libpq_pipeline', 'cancel', $node->connstr('postgres') . " max_protocol_version=3.0" + 'libpq_pipeline', 'cancel', + $node->connstr('postgres') . " max_protocol_version=3.0" ], "libpq_pipeline cancel with protocol 3.0"); diff --git a/src/test/modules/test_aio/t/001_aio.pl b/src/test/modules/test_aio/t/001_aio.pl index 4527c70785d34..82ffffc058f75 100644 --- a/src/test/modules/test_aio/t/001_aio.pl +++ b/src/test/modules/test_aio/t/001_aio.pl @@ -1123,7 +1123,8 @@ sub test_zero { # Create a corruption and then read the block without waiting for # completion. - $psql_a->query(qq( + $psql_a->query( + qq( SELECT modify_rel_block('tbl_zero', 1, corrupt_header=>true); SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>true) )); @@ -1133,7 +1134,8 @@ sub test_zero $psql_b, "$persistency: test completing read by other session doesn't generate warning", qq(SELECT count(*) > 0 FROM tbl_zero;), - qr/^t$/, qr/^$/); + qr/^t$/, + qr/^$/); } # Clean up @@ -1355,18 +1357,24 @@ sub test_ignore_checksum )); $psql->query_safe($invalidate_sql); - psql_like($io_method, $psql, + psql_like( + $io_method, + $psql, "reading block w/ wrong checksum with ignore_checksum_failure=off fails", - $count_sql, qr/^$/, qr/ERROR: invalid page in block/); + $count_sql, + qr/^$/, + qr/ERROR: invalid page in block/); $psql->query_safe("SET ignore_checksum_failure=on"); $psql->query_safe($invalidate_sql); - psql_like($io_method, $psql, - "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds", - $count_sql, - qr/^$expect$/, - qr/WARNING: ignoring (checksum failure|\d checksum failures)/); + psql_like( + $io_method, + $psql, + "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds", + $count_sql, + qr/^$expect$/, + qr/WARNING: ignoring (checksum failure|\d checksum failures)/); # Verify that ignore_checksum_failure=off works in multi-block reads @@ -1432,19 +1440,22 @@ sub test_ignore_checksum # file. $node->wait_for_log(qr/LOG: ignoring checksum failure in block 2/, - $log_location); + $log_location); ok(1, "$io_method: found information about checksum failure in block 2"); - $node->wait_for_log(qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/, - $log_location); + $node->wait_for_log( + qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/, + $log_location); ok(1, "$io_method: found information about invalid page in block 3"); - $node->wait_for_log(qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/, - $log_location); + $node->wait_for_log( + qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/, + $log_location); ok(1, "$io_method: found information about checksum failure in block 4"); - $node->wait_for_log(qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/, - $log_location); + $node->wait_for_log( + qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/, + $log_location); ok(1, "$io_method: found information about checksum failure in block 5"); @@ -1462,8 +1473,7 @@ sub test_ignore_checksum qq( SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);), qr/^$/, - qr/^psql::\d+: ERROR: invalid page in block 3 of relation/ - ); + qr/^psql::\d+: ERROR: invalid page in block 3 of relation/); psql_like( $io_method, diff --git a/src/test/postmaster/t/002_connection_limits.pl b/src/test/postmaster/t/002_connection_limits.pl index 6442500fc379a..4a7fb16261f86 100644 --- a/src/test/postmaster/t/002_connection_limits.pl +++ b/src/test/postmaster/t/002_connection_limits.pl @@ -68,7 +68,8 @@ sub connect_fails_wait my $log_location = -s $node->logfile; $node->connect_fails($connstr, $test_name, %params); - $node->wait_for_log(qr/DEBUG: (00000: )?client backend.*exited with exit code 1/, + $node->wait_for_log( + qr/DEBUG: (00000: )?client backend.*exited with exit code 1/, $log_location); ok(1, "$test_name: client backend process exited"); } diff --git a/src/test/recovery/t/040_standby_failover_slots_sync.pl b/src/test/recovery/t/040_standby_failover_slots_sync.pl index 9c8b49e942d88..2c61c51e914df 100644 --- a/src/test/recovery/t/040_standby_failover_slots_sync.pl +++ b/src/test/recovery/t/040_standby_failover_slots_sync.pl @@ -941,8 +941,7 @@ 'synced slot retained on the new primary'); # Commit the prepared transaction -$standby1->safe_psql('postgres', - "COMMIT PREPARED 'test_twophase_slotsync';"); +$standby1->safe_psql('postgres', "COMMIT PREPARED 'test_twophase_slotsync';"); $standby1->wait_for_catchup('regress_mysub1'); # Confirm that the prepared transaction is replicated to the subscriber diff --git a/src/test/recovery/t/048_vacuum_horizon_floor.pl b/src/test/recovery/t/048_vacuum_horizon_floor.pl index d48a6ef7e0f24..e56fce59d58ea 100644 --- a/src/test/recovery/t/048_vacuum_horizon_floor.pl +++ b/src/test/recovery/t/048_vacuum_horizon_floor.pl @@ -47,7 +47,7 @@ $node_primary->background_psql($test_db, on_error_stop => 1); # Long-running Primary Session B -my $psql_primaryB = +my $psql_primaryB = $node_primary->background_psql($test_db, on_error_stop => 1); # Our test relies on two rounds of index vacuuming for reasons elaborated @@ -81,7 +81,8 @@ # insert and delete enough rows that we force at least one round of index # vacuuming before getting to a dead tuple which was killed after the standby # is disconnected. -$node_primary->safe_psql($test_db, qq[ +$node_primary->safe_psql( + $test_db, qq[ CREATE TABLE ${table1}(col1 int) WITH (autovacuum_enabled=false, fillfactor=10); INSERT INTO $table1 VALUES(7); @@ -98,21 +99,24 @@ $node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn); # Test that the WAL receiver is up and running. -$node_replica->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't'); +$node_replica->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't'); # Set primary_conninfo to something invalid on the replica and reload the # config. Once the config is reloaded, the startup process will force the WAL # receiver to restart and it will be unable to reconnect because of the # invalid connection information. -$node_replica->safe_psql($test_db, qq[ +$node_replica->safe_psql( + $test_db, qq[ ALTER SYSTEM SET primary_conninfo = ''; SELECT pg_reload_conf(); ]); # Wait until the WAL receiver has shut down and been unable to start up again. -$node_replica->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f'); +$node_replica->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f'); # Now insert and update a tuple which will be visible to the vacuum on the # primary but which will have xmax newer than the oldest xmin on the standby @@ -123,7 +127,7 @@ UPDATE $table1 SET col1 = 100 WHERE col1 = 99; SELECT 'after_update'; ] - ); +); # Make sure the UPDATE finished like($res, qr/^after_update$/m, "UPDATE occurred on primary session A"); @@ -148,7 +152,7 @@ DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7; FETCH $primary_cursor1; ] - ); +); is($res, 7, qq[Cursor query returned $res. Expected value 7.]); @@ -183,7 +187,8 @@ # just waiting on the lock to start vacuuming. We don't want the standby to # re-establish a connection to the primary and push the horizon back until # we've saved initial values in GlobalVisState and calculated OldestXmin. -$node_primary->poll_query_until($test_db, +$node_primary->poll_query_until( + $test_db, qq[ SELECT count(*) >= 1 FROM pg_stat_activity WHERE pid = $vacuum_pid @@ -192,8 +197,9 @@ 't'); # Ensure the WAL receiver is still not active on the replica. -$node_replica->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 'f'); +$node_replica->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f'); # Allow the WAL receiver connection to re-establish. $node_replica->safe_psql( @@ -203,15 +209,17 @@ ]); # Ensure the new WAL receiver has connected. -$node_replica->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);] , 't'); +$node_replica->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't'); # Once the WAL sender is shown on the primary, the replica should have # connected with the primary and pushed the horizon backward. Primary Session # A won't see that until the VACUUM FREEZE proceeds and does its first round # of index vacuuming. -$node_primary->poll_query_until($test_db, qq[ - SELECT EXISTS (SELECT * FROM pg_stat_replication);] , 't'); +$node_primary->poll_query_until( + $test_db, qq[ + SELECT EXISTS (SELECT * FROM pg_stat_replication);], 't'); # Move the cursor forward to the next 7. We inserted the 7 much later, so # advancing the cursor should allow vacuum to proceed vacuuming most pages of @@ -225,20 +233,21 @@ # Prevent the test from incorrectly passing by confirming that we did indeed # do a pass of index vacuuming. -$node_primary->poll_query_until($test_db, qq[ +$node_primary->poll_query_until( + $test_db, qq[ SELECT index_vacuum_count > 0 FROM pg_stat_progress_vacuum WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass; - ] , 't'); + ], 't'); # Commit the transaction with the open cursor so that the VACUUM can finish. $psql_primaryB->query_until( - qr/^commit$/m, - qq[ + qr/^commit$/m, + qq[ COMMIT; \\echo commit ] - ); +); # VACUUM proceeds with pruning and does a visibility check on each tuple. In # older versions of Postgres, pruning found our final dead tuple @@ -252,7 +261,8 @@ # With the fix, VACUUM should finish successfully, incrementing the table # vacuum_count. -$node_primary->poll_query_until($test_db, +$node_primary->poll_query_until( + $test_db, qq[ SELECT vacuum_count > 0 FROM pg_stat_all_tables WHERE relname = '${table1}'; diff --git a/src/test/ssl/t/SSL/Server.pm b/src/test/ssl/t/SSL/Server.pm index 96f0f201e9c0b..efbd0dafaf60d 100644 --- a/src/test/ssl/t/SSL/Server.pm +++ b/src/test/ssl/t/SSL/Server.pm @@ -318,7 +318,8 @@ sub switch_server_cert $node->append_conf('sslconfig.conf', "ssl=on"); $node->append_conf('sslconfig.conf', $backend->set_server_cert(\%params)); # use lists of ECDH curves and cipher suites for syntax testing - $node->append_conf('sslconfig.conf', 'ssl_groups=X25519:prime256v1:secp521r1'); + $node->append_conf('sslconfig.conf', + 'ssl_groups=X25519:prime256v1:secp521r1'); $node->append_conf('sslconfig.conf', 'ssl_tls13_ciphers=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256'); diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl index 7d12bcbddb687..2a45fb13739b7 100644 --- a/src/test/subscription/t/007_ddl.pl +++ b/src/test/subscription/t/007_ddl.pl @@ -70,7 +70,8 @@ ); # Cleanup -$node_publisher->safe_psql('postgres', qq[ +$node_publisher->safe_psql( + 'postgres', qq[ DROP PUBLICATION mypub; SELECT pg_drop_replication_slot('mysub'); ]); @@ -86,32 +87,38 @@ sub test_swap my ($table_name, $pubname, $appname) = @_; # Confirms tuples can be replicated - $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (1);"); + $node_publisher->safe_psql('postgres', + "INSERT INTO $table_name VALUES (1);"); $node_publisher->wait_for_catchup($appname); my $result = - $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name"); - is($result, qq(1), 'check replication worked well before renaming a publication'); + $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name"); + is($result, qq(1), + 'check replication worked well before renaming a publication'); # Swap the name of publications; $pubname <-> pub_empty - $node_publisher->safe_psql('postgres', qq[ + $node_publisher->safe_psql( + 'postgres', qq[ ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp; ALTER PUBLICATION pub_empty RENAME TO $pubname; ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty; ]); # Insert the data again - $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (2);"); + $node_publisher->safe_psql('postgres', + "INSERT INTO $table_name VALUES (2);"); $node_publisher->wait_for_catchup($appname); # Confirms the second tuple won't be replicated because $pubname does not # contains relations anymore. $result = - $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name ORDER BY a"); + $node_subscriber->safe_psql('postgres', + "SELECT a FROM $table_name ORDER BY a"); is($result, qq(1), 'check the tuple inserted after the RENAME was not replicated'); # Restore the name of publications because it can be called several times - $node_publisher->safe_psql('postgres', qq[ + $node_publisher->safe_psql( + 'postgres', qq[ ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp; ALTER PUBLICATION pub_empty RENAME TO $pubname; ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty; @@ -124,7 +131,8 @@ sub test_swap $node_subscriber->safe_psql('postgres', $ddl); # Create publications and a subscription -$node_publisher->safe_psql('postgres', qq[ +$node_publisher->safe_psql( + 'postgres', qq[ CREATE PUBLICATION pub_empty; CREATE PUBLICATION pub_for_tab FOR TABLE test1; CREATE PUBLICATION pub_for_all_tables FOR ALL TABLES; @@ -139,19 +147,20 @@ sub test_swap # Switches a publication which includes all tables $node_subscriber->safe_psql('postgres', - "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;" -); + "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;"); $node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub'); # Confirms RENAME command works well for ALL TABLES publication test_swap('test2', 'pub_for_all_tables', 'tap_sub'); # Cleanup -$node_publisher->safe_psql('postgres', qq[ +$node_publisher->safe_psql( + 'postgres', qq[ DROP PUBLICATION pub_empty, pub_for_tab, pub_for_all_tables; DROP TABLE test1, test2; ]); -$node_subscriber->safe_psql('postgres', qq[ +$node_subscriber->safe_psql( + 'postgres', qq[ DROP SUBSCRIPTION tap_sub; DROP TABLE test1, test2; ]); diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl index 61b0cb4aa1ac1..4f78dd48815f0 100644 --- a/src/test/subscription/t/013_partition.pl +++ b/src/test/subscription/t/013_partition.pl @@ -51,8 +51,7 @@ ); # make a BRIN index to test aminsertcleanup logic in subscriber $node_subscriber1->safe_psql('postgres', - "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)" -); + "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)"); $node_subscriber1->safe_psql('postgres', "CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)" ); diff --git a/src/test/subscription/t/024_add_drop_pub.pl b/src/test/subscription/t/024_add_drop_pub.pl index 5298d43197900..b396abe559947 100644 --- a/src/test/subscription/t/024_add_drop_pub.pl +++ b/src/test/subscription/t/024_add_drop_pub.pl @@ -108,11 +108,12 @@ my $offset = -s $node_publisher->logfile; -$node_publisher->safe_psql('postgres',"INSERT INTO tab_3 values(1)"); +$node_publisher->safe_psql('postgres', "INSERT INTO tab_3 values(1)"); # Verify that a warning is logged. $node_publisher->wait_for_log( - qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/, $offset); + qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/, + $offset); $node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3"); @@ -128,10 +129,11 @@ # Verify that the insert operation gets replicated to subscriber after # publication is created. -$result = $node_subscriber->safe_psql('postgres', - "SELECT * FROM tab_3"); -is($result, qq(1 -2), 'check that the incremental data is replicated after the publication is created'); +$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_3"); +is( $result, qq(1 +2), + 'check that the incremental data is replicated after the publication is created' +); # shutdown $node_subscriber->stop('fast'); diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl index 2a7a8239a2966..d78a6bac16aeb 100644 --- a/src/test/subscription/t/035_conflicts.pl +++ b/src/test/subscription/t/035_conflicts.pl @@ -26,7 +26,8 @@ "CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"); $node_publisher->safe_psql('postgres', - "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"); + "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);" +); # Create same table on subscriber $node_subscriber->safe_psql('postgres', diff --git a/src/tools/git_changelog b/src/tools/git_changelog index b8bd874f20858..dccf938685a3a 100755 --- a/src/tools/git_changelog +++ b/src/tools/git_changelog @@ -59,6 +59,7 @@ require IPC::Open2; # (We could get this from "git branches", but not worth the trouble.) # NB: master must be first! my @BRANCHES = qw(master + REL_18_STABLE REL_17_STABLE REL_16_STABLE REL_15_STABLE REL_14_STABLE REL_13_STABLE REL_12_STABLE REL_11_STABLE REL_10_STABLE REL9_6_STABLE REL9_5_STABLE REL9_4_STABLE REL9_3_STABLE REL9_2_STABLE REL9_1_STABLE REL9_0_STABLE diff --git a/src/tools/version_stamp.pl b/src/tools/version_stamp.pl index c3509474d83b2..a9d2d0910f3af 100755 --- a/src/tools/version_stamp.pl +++ b/src/tools/version_stamp.pl @@ -25,7 +25,7 @@ # Major version is hard-wired into the script. We update it when we branch # a new development version. -my $majorversion = 18; +my $majorversion = 19; # Validate argument and compute derived variables my $minor = shift;