diff --git a/src/test/thread/thread_test.c b/config/thread_test.c similarity index 93% rename from src/test/thread/thread_test.c rename to config/thread_test.c index e1bec01b81ad7..ff2eace87d841 100644 --- a/src/test/thread/thread_test.c +++ b/config/thread_test.c @@ -1,12 +1,12 @@ /*------------------------------------------------------------------------- * * thread_test.c - * libc thread test program + * libc threading test program * * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * src/test/thread/thread_test.c + * config/thread_test.c * * This program tests to see if your standard libc functions use * pthread_setspecific()/pthread_getspecific() to be thread-safe. @@ -20,12 +20,7 @@ *------------------------------------------------------------------------- */ -#if !defined(IN_CONFIGURE) && !defined(WIN32) -#include "postgres.h" - -/* we want to know what the native strerror does, not pg_strerror */ -#undef strerror -#endif +/* We cannot use c.h, as port.h will not exist yet */ #include #include @@ -36,6 +31,7 @@ #include #include #include +#include /* CYGWIN requires this for MAXHOSTNAMELEN */ #ifdef __CYGWIN__ @@ -47,25 +43,11 @@ #include #endif - /* Test for POSIX.1c 2-arg sigwait() and fail on single-arg version */ #include int sigwait(const sigset_t *set, int *sig); -#if !defined(ENABLE_THREAD_SAFETY) && !defined(IN_CONFIGURE) && !defined(WIN32) -int -main(int argc, char *argv[]) -{ - fprintf(stderr, "This PostgreSQL build does not support threads.\n"); - fprintf(stderr, "Perhaps rerun 'configure' using '--enable-thread-safety'.\n"); - return 1; -} -#else - -/* This must be down here because this is the code that uses threads. */ -#include - #define TEMP_FILENAME_1 "thread_test.1" #define TEMP_FILENAME_2 "thread_test.2" @@ -119,14 +101,12 @@ main(int argc, char *argv[]) return 1; } -#ifdef IN_CONFIGURE /* Send stdout to 'config.log' */ close(1); dup(5); -#endif #ifdef WIN32 - err = WSAStartup(MAKEWORD(1, 1), &wsaData); + err = WSAStartup(MAKEWORD(2, 2), &wsaData); if (err != 0) { fprintf(stderr, "Cannot start the network subsystem - %d**\nexiting\n", err); @@ -455,5 +435,3 @@ func_call_2(void) pthread_mutex_lock(&init_mutex); /* wait for parent to test */ pthread_mutex_unlock(&init_mutex); } - -#endif /* !ENABLE_THREAD_SAFETY && !IN_CONFIGURE */ diff --git a/configure b/configure index 19a3cd09a0a35..ace4ed5decf10 100755 --- a/configure +++ b/configure @@ -16137,6 +16137,12 @@ esac ;; esac + case " $LIBOBJS " in + *" win32stat.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS win32stat.$ac_objext" + ;; +esac + $as_echo "#define HAVE_SYMLINK 1" >>confdefs.h @@ -18986,23 +18992,21 @@ $as_echo_n "checking thread safety of required library functions... " >&6; } _CFLAGS="$CFLAGS" _LIBS="$LIBS" -CFLAGS="$CFLAGS $PTHREAD_CFLAGS -DIN_CONFIGURE" +CFLAGS="$CFLAGS $PTHREAD_CFLAGS" LIBS="$LIBS $PTHREAD_LIBS" if test "$cross_compiling" = yes; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: maybe" >&5 $as_echo "maybe" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: *** Skipping thread test program because of cross-compile build. -*** Run the program in src/test/thread on the target machine. " >&5 $as_echo "$as_me: WARNING: *** Skipping thread test program because of cross-compile build. -*** Run the program in src/test/thread on the target machine. " >&2;} else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include "$srcdir/src/test/thread/thread_test.c" +#include "$srcdir/config/thread_test.c" _ACEOF if ac_fn_c_try_run "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 @@ -19011,9 +19015,8 @@ else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "thread test program failed -This platform is not thread-safe. Check the file 'config.log' or compile -and run src/test/thread/thread_test for the exact reason. -Use --disable-thread-safety to disable thread safety." "$LINENO" 5 +This platform is not thread-safe. Check the file 'config.log' for the +exact reason, or use --disable-thread-safety to disable thread safety." "$LINENO" 5 fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext diff --git a/configure.ac b/configure.ac index 6b9d0487a8db4..5b91c83fd07c2 100644 --- a/configure.ac +++ b/configure.ac @@ -1807,6 +1807,7 @@ if test "$PORTNAME" = "win32"; then AC_LIBOBJ(win32error) AC_LIBOBJ(win32security) AC_LIBOBJ(win32setlocale) + AC_LIBOBJ(win32stat) AC_DEFINE([HAVE_SYMLINK], 1, [Define to 1 if you have the `symlink' function.]) AC_CHECK_TYPES(MINIDUMP_TYPE, [pgac_minidump_type=yes], [pgac_minidump_type=no], [ @@ -2294,20 +2295,18 @@ AC_MSG_CHECKING([thread safety of required library functions]) _CFLAGS="$CFLAGS" _LIBS="$LIBS" -CFLAGS="$CFLAGS $PTHREAD_CFLAGS -DIN_CONFIGURE" +CFLAGS="$CFLAGS $PTHREAD_CFLAGS" LIBS="$LIBS $PTHREAD_LIBS" AC_RUN_IFELSE( - [AC_LANG_SOURCE([[#include "$srcdir/src/test/thread/thread_test.c"]])], + [AC_LANG_SOURCE([[#include "$srcdir/config/thread_test.c"]])], [AC_MSG_RESULT(yes)], [AC_MSG_RESULT(no) AC_MSG_ERROR([thread test program failed -This platform is not thread-safe. Check the file 'config.log' or compile -and run src/test/thread/thread_test for the exact reason. -Use --disable-thread-safety to disable thread safety.])], +This platform is not thread-safe. Check the file 'config.log' for the +exact reason, or use --disable-thread-safety to disable thread safety.])], [AC_MSG_RESULT(maybe) AC_MSG_WARN([ *** Skipping thread test program because of cross-compile build. -*** Run the program in src/test/thread on the target machine. ])]) CFLAGS="$_CFLAGS" LIBS="$_LIBS" diff --git a/contrib/Makefile b/contrib/Makefile index 1846d415b6fe7..7a4866e338db0 100644 --- a/contrib/Makefile +++ b/contrib/Makefile @@ -27,6 +27,7 @@ SUBDIRS = \ lo \ ltree \ oid2name \ + old_snapshot \ pageinspect \ passwordcheck \ pg_buffercache \ @@ -34,6 +35,7 @@ SUBDIRS = \ pg_prewarm \ pg_standby \ pg_stat_statements \ + pg_surgery \ pg_trgm \ pgcrypto \ pgrowlocks \ diff --git a/contrib/amcheck/Makefile b/contrib/amcheck/Makefile index a2b1b1036b3e3..b82f221e50bb4 100644 --- a/contrib/amcheck/Makefile +++ b/contrib/amcheck/Makefile @@ -3,13 +3,16 @@ MODULE_big = amcheck OBJS = \ $(WIN32RES) \ + verify_heapam.o \ verify_nbtree.o EXTENSION = amcheck -DATA = amcheck--1.1--1.2.sql amcheck--1.0--1.1.sql amcheck--1.0.sql +DATA = amcheck--1.2--1.3.sql amcheck--1.1--1.2.sql amcheck--1.0--1.1.sql amcheck--1.0.sql PGFILEDESC = "amcheck - function for verifying relation integrity" -REGRESS = check check_btree +REGRESS = check check_btree check_heap + +TAP_TESTS = 1 ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/amcheck/amcheck--1.2--1.3.sql b/contrib/amcheck/amcheck--1.2--1.3.sql new file mode 100644 index 0000000000000..7237ab738ce72 --- /dev/null +++ b/contrib/amcheck/amcheck--1.2--1.3.sql @@ -0,0 +1,30 @@ +/* contrib/amcheck/amcheck--1.2--1.3.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "ALTER EXTENSION amcheck UPDATE TO '1.3'" to load this file. \quit + +-- +-- verify_heapam() +-- +CREATE FUNCTION verify_heapam(relation regclass, + on_error_stop boolean default false, + check_toast boolean default false, + skip text default 'none', + startblock bigint default null, + endblock bigint default null, + blkno OUT bigint, + offnum OUT integer, + attnum OUT integer, + msg OUT text) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'verify_heapam' +LANGUAGE C; + +-- Don't want this to be available to public +REVOKE ALL ON FUNCTION verify_heapam(regclass, + boolean, + boolean, + text, + bigint, + bigint) +FROM PUBLIC; diff --git a/contrib/amcheck/amcheck.control b/contrib/amcheck/amcheck.control index c6e310046d4e3..ab50931f754a0 100644 --- a/contrib/amcheck/amcheck.control +++ b/contrib/amcheck/amcheck.control @@ -1,5 +1,5 @@ # amcheck extension comment = 'functions for verifying relation integrity' -default_version = '1.2' +default_version = '1.3' module_pathname = '$libdir/amcheck' relocatable = true diff --git a/contrib/amcheck/expected/check_heap.out b/contrib/amcheck/expected/check_heap.out new file mode 100644 index 0000000000000..882f853d56ac3 --- /dev/null +++ b/contrib/amcheck/expected/check_heap.out @@ -0,0 +1,194 @@ +CREATE TABLE heaptest (a integer, b text); +REVOKE ALL ON heaptest FROM PUBLIC; +-- Check that invalid skip option is rejected +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'rope'); +ERROR: invalid skip option +HINT: Valid skip options are "all-visible", "all-frozen", and "none". +-- Check specifying invalid block ranges when verifying an empty table +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 0, endblock := 0); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 5, endblock := 8); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +-- Check that valid options are not rejected nor corruption reported +-- for an empty table, and that skip enum-like parameter is case-insensitive +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-frozen'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-visible'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'None'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'All-Frozen'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'All-Visible'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'NONE'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'ALL-FROZEN'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'ALL-VISIBLE'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +-- Add some data so subsequent tests are not entirely trivial +INSERT INTO heaptest (a, b) + (SELECT gs, repeat('x', gs) + FROM generate_series(1,50) gs); +-- Check that valid options are not rejected nor corruption reported +-- for a non-empty table +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-frozen'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-visible'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 0, endblock := 0); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +CREATE ROLE regress_heaptest_role; +-- verify permissions are checked (error due to function not callable) +SET ROLE regress_heaptest_role; +SELECT * FROM verify_heapam(relation := 'heaptest'); +ERROR: permission denied for function verify_heapam +RESET ROLE; +GRANT EXECUTE ON FUNCTION verify_heapam(regclass, boolean, boolean, text, bigint, bigint) TO regress_heaptest_role; +-- verify permissions are now sufficient +SET ROLE regress_heaptest_role; +SELECT * FROM verify_heapam(relation := 'heaptest'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +RESET ROLE; +-- Check specifying invalid block ranges when verifying a non-empty table. +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 0, endblock := 10000); +ERROR: ending block number must be between 0 and 0 +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 10000, endblock := 11000); +ERROR: starting block number must be between 0 and 0 +-- Vacuum freeze to change the xids encountered in subsequent tests +VACUUM FREEZE heaptest; +-- Check that valid options are not rejected nor corruption reported +-- for a non-empty frozen table +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-frozen'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-visible'); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 0, endblock := 0); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +-- Check that partitioned tables (the parent ones) which don't have visibility +-- maps are rejected +CREATE TABLE test_partitioned (a int, b text default repeat('x', 5000)) + PARTITION BY list (a); +SELECT * FROM verify_heapam('test_partitioned', + startblock := NULL, + endblock := NULL); +ERROR: "test_partitioned" is not a table, materialized view, or TOAST table +-- Check that valid options are not rejected nor corruption reported +-- for an empty partition table (the child one) +CREATE TABLE test_partition partition OF test_partitioned FOR VALUES IN (1); +SELECT * FROM verify_heapam('test_partition', + startblock := NULL, + endblock := NULL); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +-- Check that valid options are not rejected nor corruption reported +-- for a non-empty partition table (the child one) +INSERT INTO test_partitioned (a) (SELECT 1 FROM generate_series(1,1000) gs); +SELECT * FROM verify_heapam('test_partition', + startblock := NULL, + endblock := NULL); + blkno | offnum | attnum | msg +-------+--------+--------+----- +(0 rows) + +-- Check that indexes are rejected +CREATE INDEX test_index ON test_partition (a); +SELECT * FROM verify_heapam('test_index', + startblock := NULL, + endblock := NULL); +ERROR: "test_index" is not a table, materialized view, or TOAST table +-- Check that views are rejected +CREATE VIEW test_view AS SELECT 1; +SELECT * FROM verify_heapam('test_view', + startblock := NULL, + endblock := NULL); +ERROR: "test_view" is not a table, materialized view, or TOAST table +-- Check that sequences are rejected +CREATE SEQUENCE test_sequence; +SELECT * FROM verify_heapam('test_sequence', + startblock := NULL, + endblock := NULL); +ERROR: "test_sequence" is not a table, materialized view, or TOAST table +-- Check that foreign tables are rejected +CREATE FOREIGN DATA WRAPPER dummy; +CREATE SERVER dummy_server FOREIGN DATA WRAPPER dummy; +CREATE FOREIGN TABLE test_foreign_table () SERVER dummy_server; +SELECT * FROM verify_heapam('test_foreign_table', + startblock := NULL, + endblock := NULL); +ERROR: "test_foreign_table" is not a table, materialized view, or TOAST table +-- cleanup +DROP TABLE heaptest; +DROP TABLE test_partition; +DROP TABLE test_partitioned; +DROP OWNED BY regress_heaptest_role; -- permissions +DROP ROLE regress_heaptest_role; diff --git a/contrib/amcheck/sql/check_heap.sql b/contrib/amcheck/sql/check_heap.sql new file mode 100644 index 0000000000000..c10a25f21cb89 --- /dev/null +++ b/contrib/amcheck/sql/check_heap.sql @@ -0,0 +1,116 @@ +CREATE TABLE heaptest (a integer, b text); +REVOKE ALL ON heaptest FROM PUBLIC; + +-- Check that invalid skip option is rejected +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'rope'); + +-- Check specifying invalid block ranges when verifying an empty table +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 0, endblock := 0); +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 5, endblock := 8); + +-- Check that valid options are not rejected nor corruption reported +-- for an empty table, and that skip enum-like parameter is case-insensitive +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-frozen'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-visible'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'None'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'All-Frozen'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'All-Visible'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'NONE'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'ALL-FROZEN'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'ALL-VISIBLE'); + +-- Add some data so subsequent tests are not entirely trivial +INSERT INTO heaptest (a, b) + (SELECT gs, repeat('x', gs) + FROM generate_series(1,50) gs); + +-- Check that valid options are not rejected nor corruption reported +-- for a non-empty table +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-frozen'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-visible'); +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 0, endblock := 0); + +CREATE ROLE regress_heaptest_role; + +-- verify permissions are checked (error due to function not callable) +SET ROLE regress_heaptest_role; +SELECT * FROM verify_heapam(relation := 'heaptest'); +RESET ROLE; + +GRANT EXECUTE ON FUNCTION verify_heapam(regclass, boolean, boolean, text, bigint, bigint) TO regress_heaptest_role; + +-- verify permissions are now sufficient +SET ROLE regress_heaptest_role; +SELECT * FROM verify_heapam(relation := 'heaptest'); +RESET ROLE; + +-- Check specifying invalid block ranges when verifying a non-empty table. +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 0, endblock := 10000); +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 10000, endblock := 11000); + +-- Vacuum freeze to change the xids encountered in subsequent tests +VACUUM FREEZE heaptest; + +-- Check that valid options are not rejected nor corruption reported +-- for a non-empty frozen table +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'none'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-frozen'); +SELECT * FROM verify_heapam(relation := 'heaptest', skip := 'all-visible'); +SELECT * FROM verify_heapam(relation := 'heaptest', startblock := 0, endblock := 0); + +-- Check that partitioned tables (the parent ones) which don't have visibility +-- maps are rejected +CREATE TABLE test_partitioned (a int, b text default repeat('x', 5000)) + PARTITION BY list (a); +SELECT * FROM verify_heapam('test_partitioned', + startblock := NULL, + endblock := NULL); + +-- Check that valid options are not rejected nor corruption reported +-- for an empty partition table (the child one) +CREATE TABLE test_partition partition OF test_partitioned FOR VALUES IN (1); +SELECT * FROM verify_heapam('test_partition', + startblock := NULL, + endblock := NULL); + +-- Check that valid options are not rejected nor corruption reported +-- for a non-empty partition table (the child one) +INSERT INTO test_partitioned (a) (SELECT 1 FROM generate_series(1,1000) gs); +SELECT * FROM verify_heapam('test_partition', + startblock := NULL, + endblock := NULL); + +-- Check that indexes are rejected +CREATE INDEX test_index ON test_partition (a); +SELECT * FROM verify_heapam('test_index', + startblock := NULL, + endblock := NULL); + +-- Check that views are rejected +CREATE VIEW test_view AS SELECT 1; +SELECT * FROM verify_heapam('test_view', + startblock := NULL, + endblock := NULL); + +-- Check that sequences are rejected +CREATE SEQUENCE test_sequence; +SELECT * FROM verify_heapam('test_sequence', + startblock := NULL, + endblock := NULL); + +-- Check that foreign tables are rejected +CREATE FOREIGN DATA WRAPPER dummy; +CREATE SERVER dummy_server FOREIGN DATA WRAPPER dummy; +CREATE FOREIGN TABLE test_foreign_table () SERVER dummy_server; +SELECT * FROM verify_heapam('test_foreign_table', + startblock := NULL, + endblock := NULL); + +-- cleanup +DROP TABLE heaptest; +DROP TABLE test_partition; +DROP TABLE test_partitioned; +DROP OWNED BY regress_heaptest_role; -- permissions +DROP ROLE regress_heaptest_role; diff --git a/contrib/amcheck/t/001_verify_heapam.pl b/contrib/amcheck/t/001_verify_heapam.pl new file mode 100644 index 0000000000000..1581e51f3ca7f --- /dev/null +++ b/contrib/amcheck/t/001_verify_heapam.pl @@ -0,0 +1,196 @@ +use strict; +use warnings; + +use PostgresNode; +use TestLib; + +use Test::More tests => 79; + +my ($node, $result); + +# +# Test set-up +# +$node = get_new_node('test'); +$node->init; +$node->append_conf('postgresql.conf', 'autovacuum=off'); +$node->start; +$node->safe_psql('postgres', q(CREATE EXTENSION amcheck)); + +# +# Check a table with data loaded but no corruption, freezing, etc. +# +fresh_test_table('test'); +check_all_options_uncorrupted('test', 'plain'); + +# +# Check a corrupt table +# +fresh_test_table('test'); +corrupt_first_page('test'); +detects_heap_corruption("verify_heapam('test')", "plain corrupted table"); +detects_heap_corruption( + "verify_heapam('test', skip := 'all-visible')", + "plain corrupted table skipping all-visible"); +detects_heap_corruption( + "verify_heapam('test', skip := 'all-frozen')", + "plain corrupted table skipping all-frozen"); +detects_heap_corruption( + "verify_heapam('test', check_toast := false)", + "plain corrupted table skipping toast"); +detects_heap_corruption( + "verify_heapam('test', startblock := 0, endblock := 0)", + "plain corrupted table checking only block zero"); + +# +# Check a corrupt table with all-frozen data +# +fresh_test_table('test'); +$node->safe_psql('postgres', q(VACUUM FREEZE test)); +corrupt_first_page('test'); +detects_heap_corruption("verify_heapam('test')", + "all-frozen corrupted table"); +detects_no_corruption( + "verify_heapam('test', skip := 'all-frozen')", + "all-frozen corrupted table skipping all-frozen"); + +# Returns the filesystem path for the named relation. +sub relation_filepath +{ + my ($relname) = @_; + + my $pgdata = $node->data_dir; + my $rel = $node->safe_psql('postgres', + qq(SELECT pg_relation_filepath('$relname'))); + die "path not found for relation $relname" unless defined $rel; + return "$pgdata/$rel"; +} + +# Returns the fully qualified name of the toast table for the named relation +sub get_toast_for +{ + my ($relname) = @_; + + return $node->safe_psql( + 'postgres', qq( + SELECT 'pg_toast.' || t.relname + FROM pg_catalog.pg_class c, pg_catalog.pg_class t + WHERE c.relname = '$relname' + AND c.reltoastrelid = t.oid)); +} + +# (Re)create and populate a test table of the given name. +sub fresh_test_table +{ + my ($relname) = @_; + + return $node->safe_psql( + 'postgres', qq( + DROP TABLE IF EXISTS $relname CASCADE; + CREATE TABLE $relname (a integer, b text); + ALTER TABLE $relname SET (autovacuum_enabled=false); + ALTER TABLE $relname ALTER b SET STORAGE external; + INSERT INTO $relname (a, b) + (SELECT gs, repeat('b',gs*10) FROM generate_series(1,1000) gs); + )); +} + +# Stops the test node, corrupts the first page of the named relation, and +# restarts the node. +sub corrupt_first_page +{ + my ($relname) = @_; + my $relpath = relation_filepath($relname); + + $node->stop; + + my $fh; + open($fh, '+<', $relpath) + or BAIL_OUT("open failed: $!"); + binmode $fh; + + # Corrupt some line pointers. The values are chosen to hit the + # various line-pointer-corruption checks in verify_heapam.c + # on both little-endian and big-endian architectures. + seek($fh, 32, 0) + or BAIL_OUT("seek failed: $!"); + syswrite( + $fh, + pack("L*", + 0xAAA15550, 0xAAA0D550, 0x00010000, + 0x00008000, 0x0000800F, 0x001e8000) + ) or BAIL_OUT("syswrite failed: $!"); + close($fh) + or BAIL_OUT("close failed: $!"); + + $node->start; +} + +sub detects_heap_corruption +{ + my ($function, $testname) = @_; + + detects_corruption( + $function, + $testname, + qr/line pointer redirection to item at offset \d+ precedes minimum offset \d+/, + qr/line pointer redirection to item at offset \d+ exceeds maximum offset \d+/, + qr/line pointer to page offset \d+ is not maximally aligned/, + qr/line pointer length \d+ is less than the minimum tuple header size \d+/, + qr/line pointer to page offset \d+ with length \d+ ends beyond maximum page offset \d+/, + ); +} + +sub detects_corruption +{ + my ($function, $testname, @re) = @_; + + my $result = $node->safe_psql('postgres', qq(SELECT * FROM $function)); + like($result, $_, $testname) for (@re); +} + +sub detects_no_corruption +{ + my ($function, $testname) = @_; + + my $result = $node->safe_psql('postgres', qq(SELECT * FROM $function)); + is($result, '', $testname); +} + +# Check various options are stable (don't abort) and do not report corruption +# when running verify_heapam on an uncorrupted test table. +# +# The relname *must* be an uncorrupted table, or this will fail. +# +# The prefix is used to identify the test, along with the options, +# and should be unique. +sub check_all_options_uncorrupted +{ + my ($relname, $prefix) = @_; + + for my $stop (qw(true false)) + { + for my $check_toast (qw(true false)) + { + for my $skip ("'none'", "'all-frozen'", "'all-visible'") + { + for my $startblock (qw(NULL 0)) + { + for my $endblock (qw(NULL 0)) + { + my $opts = + "on_error_stop := $stop, " + . "check_toast := $check_toast, " + . "skip := $skip, " + . "startblock := $startblock, " + . "endblock := $endblock"; + + detects_no_corruption( + "verify_heapam('$relname', $opts)", + "$prefix: $opts"); + } + } + } + } + } +} diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c new file mode 100644 index 0000000000000..8bb890438aa95 --- /dev/null +++ b/contrib/amcheck/verify_heapam.c @@ -0,0 +1,1470 @@ +/*------------------------------------------------------------------------- + * + * verify_heapam.c + * Functions to check postgresql heap relations for corruption + * + * Copyright (c) 2016-2020, PostgreSQL Global Development Group + * + * contrib/amcheck/verify_heapam.c + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/detoast.h" +#include "access/genam.h" +#include "access/heapam.h" +#include "access/heaptoast.h" +#include "access/multixact.h" +#include "access/toast_internals.h" +#include "access/visibilitymap.h" +#include "catalog/pg_am.h" +#include "funcapi.h" +#include "miscadmin.h" +#include "storage/bufmgr.h" +#include "storage/procarray.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" + +PG_FUNCTION_INFO_V1(verify_heapam); + +/* The number of columns in tuples returned by verify_heapam */ +#define HEAPCHECK_RELATION_COLS 4 + +/* + * Despite the name, we use this for reporting problems with both XIDs and + * MXIDs. + */ +typedef enum XidBoundsViolation +{ + XID_INVALID, + XID_IN_FUTURE, + XID_PRECEDES_CLUSTERMIN, + XID_PRECEDES_RELMIN, + XID_BOUNDS_OK +} XidBoundsViolation; + +typedef enum XidCommitStatus +{ + XID_COMMITTED, + XID_IN_PROGRESS, + XID_ABORTED +} XidCommitStatus; + +typedef enum SkipPages +{ + SKIP_PAGES_ALL_FROZEN, + SKIP_PAGES_ALL_VISIBLE, + SKIP_PAGES_NONE +} SkipPages; + +/* + * Struct holding the running context information during + * a lifetime of a verify_heapam execution. + */ +typedef struct HeapCheckContext +{ + /* + * Cached copies of values from ShmemVariableCache and computed values + * from them. + */ + FullTransactionId next_fxid; /* ShmemVariableCache->nextXid */ + TransactionId next_xid; /* 32-bit version of next_fxid */ + TransactionId oldest_xid; /* ShmemVariableCache->oldestXid */ + FullTransactionId oldest_fxid; /* 64-bit version of oldest_xid, computed + * relative to next_fxid */ + + /* + * Cached copy of value from MultiXactState + */ + MultiXactId next_mxact; /* MultiXactState->nextMXact */ + MultiXactId oldest_mxact; /* MultiXactState->oldestMultiXactId */ + + /* + * Cached copies of the most recently checked xid and its status. + */ + TransactionId cached_xid; + XidCommitStatus cached_status; + + /* Values concerning the heap relation being checked */ + Relation rel; + TransactionId relfrozenxid; + FullTransactionId relfrozenfxid; + TransactionId relminmxid; + Relation toast_rel; + Relation *toast_indexes; + Relation valid_toast_index; + int num_toast_indexes; + + /* Values for iterating over pages in the relation */ + BlockNumber blkno; + BufferAccessStrategy bstrategy; + Buffer buffer; + Page page; + + /* Values for iterating over tuples within a page */ + OffsetNumber offnum; + ItemId itemid; + uint16 lp_len; + uint16 lp_off; + HeapTupleHeader tuphdr; + int natts; + + /* Values for iterating over attributes within the tuple */ + uint32 offset; /* offset in tuple data */ + AttrNumber attnum; + + /* Values for iterating over toast for the attribute */ + int32 chunkno; + int32 attrsize; + int32 endchunk; + int32 totalchunks; + + /* Whether verify_heapam has yet encountered any corrupt tuples */ + bool is_corrupt; + + /* The descriptor and tuplestore for verify_heapam's result tuples */ + TupleDesc tupdesc; + Tuplestorestate *tupstore; +} HeapCheckContext; + +/* Internal implementation */ +static void sanity_check_relation(Relation rel); +static void check_tuple(HeapCheckContext *ctx); +static void check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx); + +static bool check_tuple_attribute(HeapCheckContext *ctx); +static bool check_tuple_header_and_visibilty(HeapTupleHeader tuphdr, + HeapCheckContext *ctx); + +static void report_corruption(HeapCheckContext *ctx, char *msg); +static TupleDesc verify_heapam_tupdesc(void); +static FullTransactionId FullTransactionIdFromXidAndCtx(TransactionId xid, + const HeapCheckContext *ctx); +static void update_cached_xid_range(HeapCheckContext *ctx); +static void update_cached_mxid_range(HeapCheckContext *ctx); +static XidBoundsViolation check_mxid_in_range(MultiXactId mxid, + HeapCheckContext *ctx); +static XidBoundsViolation check_mxid_valid_in_rel(MultiXactId mxid, + HeapCheckContext *ctx); +static XidBoundsViolation get_xid_status(TransactionId xid, + HeapCheckContext *ctx, + XidCommitStatus *status); + +/* + * Scan and report corruption in heap pages, optionally reconciling toasted + * attributes with entries in the associated toast table. Intended to be + * called from SQL with the following parameters: + * + * relation: + * The Oid of the heap relation to be checked. + * + * on_error_stop: + * Whether to stop at the end of the first page for which errors are + * detected. Note that multiple rows may be returned. + * + * check_toast: + * Whether to check each toasted attribute against the toast table to + * verify that it can be found there. + * + * skip: + * What kinds of pages in the heap relation should be skipped. Valid + * options are "all-visible", "all-frozen", and "none". + * + * Returns to the SQL caller a set of tuples, each containing the location + * and a description of a corruption found in the heap. + * + * This code goes to some trouble to avoid crashing the server even if the + * table pages are badly corrupted, but it's probably not perfect. If + * check_toast is true, we'll use regular index lookups to try to fetch TOAST + * tuples, which can certainly cause crashes if the right kind of corruption + * exists in the toast table or index. No matter what parameters you pass, + * we can't protect against crashes that might occur trying to look up the + * commit status of transaction IDs (though we avoid trying to do such lookups + * for transaction IDs that can't legally appear in the table). + */ +Datum +verify_heapam(PG_FUNCTION_ARGS) +{ + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + MemoryContext old_context; + bool random_access; + HeapCheckContext ctx; + Buffer vmbuffer = InvalidBuffer; + Oid relid; + bool on_error_stop; + bool check_toast; + SkipPages skip_option = SKIP_PAGES_NONE; + BlockNumber first_block; + BlockNumber last_block; + BlockNumber nblocks; + const char *skip; + + /* Check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsinfo->allowedModes & SFRM_Materialize)) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("materialize mode required, but it is not allowed in this context"))); + + /* Check supplied arguments */ + if (PG_ARGISNULL(0)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation cannot be null"))); + relid = PG_GETARG_OID(0); + + if (PG_ARGISNULL(1)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("on_error_stop cannot be null"))); + on_error_stop = PG_GETARG_BOOL(1); + + if (PG_ARGISNULL(2)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("check_toast cannot be null"))); + check_toast = PG_GETARG_BOOL(2); + + if (PG_ARGISNULL(3)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("skip cannot be null"))); + skip = text_to_cstring(PG_GETARG_TEXT_PP(3)); + if (pg_strcasecmp(skip, "all-visible") == 0) + skip_option = SKIP_PAGES_ALL_VISIBLE; + else if (pg_strcasecmp(skip, "all-frozen") == 0) + skip_option = SKIP_PAGES_ALL_FROZEN; + else if (pg_strcasecmp(skip, "none") == 0) + skip_option = SKIP_PAGES_NONE; + else + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid skip option"), + errhint("Valid skip options are \"all-visible\", \"all-frozen\", and \"none\"."))); + + memset(&ctx, 0, sizeof(HeapCheckContext)); + ctx.cached_xid = InvalidTransactionId; + + /* + * If we report corruption when not examining some individual attribute, + * we need attnum to be reported as NULL. Set that up before any + * corruption reporting might happen. + */ + ctx.attnum = -1; + + /* The tupdesc and tuplestore must be created in ecxt_per_query_memory */ + old_context = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); + random_access = (rsinfo->allowedModes & SFRM_Materialize_Random) != 0; + ctx.tupdesc = verify_heapam_tupdesc(); + ctx.tupstore = tuplestore_begin_heap(random_access, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = ctx.tupstore; + rsinfo->setDesc = ctx.tupdesc; + MemoryContextSwitchTo(old_context); + + /* Open relation, check relkind and access method, and check privileges */ + ctx.rel = relation_open(relid, AccessShareLock); + sanity_check_relation(ctx.rel); + + /* Early exit if the relation is empty */ + nblocks = RelationGetNumberOfBlocks(ctx.rel); + if (!nblocks) + { + relation_close(ctx.rel, AccessShareLock); + PG_RETURN_NULL(); + } + + ctx.bstrategy = GetAccessStrategy(BAS_BULKREAD); + ctx.buffer = InvalidBuffer; + ctx.page = NULL; + + /* Validate block numbers, or handle nulls. */ + if (PG_ARGISNULL(4)) + first_block = 0; + else + { + int64 fb = PG_GETARG_INT64(4); + + if (fb < 0 || fb >= nblocks) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("starting block number must be between 0 and %u", + nblocks - 1))); + first_block = (BlockNumber) fb; + } + if (PG_ARGISNULL(5)) + last_block = nblocks - 1; + else + { + int64 lb = PG_GETARG_INT64(5); + + if (lb < 0 || lb >= nblocks) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("ending block number must be between 0 and %u", + nblocks - 1))); + last_block = (BlockNumber) lb; + } + + /* Optionally open the toast relation, if any. */ + if (ctx.rel->rd_rel->reltoastrelid && check_toast) + { + int offset; + + /* Main relation has associated toast relation */ + ctx.toast_rel = table_open(ctx.rel->rd_rel->reltoastrelid, + AccessShareLock); + offset = toast_open_indexes(ctx.toast_rel, + AccessShareLock, + &(ctx.toast_indexes), + &(ctx.num_toast_indexes)); + ctx.valid_toast_index = ctx.toast_indexes[offset]; + } + else + { + /* + * Main relation has no associated toast relation, or we're + * intentionally skipping it. + */ + ctx.toast_rel = NULL; + ctx.toast_indexes = NULL; + ctx.num_toast_indexes = 0; + } + + update_cached_xid_range(&ctx); + update_cached_mxid_range(&ctx); + ctx.relfrozenxid = ctx.rel->rd_rel->relfrozenxid; + ctx.relfrozenfxid = FullTransactionIdFromXidAndCtx(ctx.relfrozenxid, &ctx); + ctx.relminmxid = ctx.rel->rd_rel->relminmxid; + + if (TransactionIdIsNormal(ctx.relfrozenxid)) + ctx.oldest_xid = ctx.relfrozenxid; + + for (ctx.blkno = first_block; ctx.blkno <= last_block; ctx.blkno++) + { + OffsetNumber maxoff; + + /* Optionally skip over all-frozen or all-visible blocks */ + if (skip_option != SKIP_PAGES_NONE) + { + int32 mapbits; + + mapbits = (int32) visibilitymap_get_status(ctx.rel, ctx.blkno, + &vmbuffer); + if (skip_option == SKIP_PAGES_ALL_FROZEN) + { + if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0) + continue; + } + + if (skip_option == SKIP_PAGES_ALL_VISIBLE) + { + if ((mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0) + continue; + } + } + + /* Read and lock the next page. */ + ctx.buffer = ReadBufferExtended(ctx.rel, MAIN_FORKNUM, ctx.blkno, + RBM_NORMAL, ctx.bstrategy); + LockBuffer(ctx.buffer, BUFFER_LOCK_SHARE); + ctx.page = BufferGetPage(ctx.buffer); + + /* Perform tuple checks */ + maxoff = PageGetMaxOffsetNumber(ctx.page); + for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff; + ctx.offnum = OffsetNumberNext(ctx.offnum)) + { + ctx.itemid = PageGetItemId(ctx.page, ctx.offnum); + + /* Skip over unused/dead line pointers */ + if (!ItemIdIsUsed(ctx.itemid) || ItemIdIsDead(ctx.itemid)) + continue; + + /* + * If this line pointer has been redirected, check that it + * redirects to a valid offset within the line pointer array + */ + if (ItemIdIsRedirected(ctx.itemid)) + { + OffsetNumber rdoffnum = ItemIdGetRedirect(ctx.itemid); + ItemId rditem; + + if (rdoffnum < FirstOffsetNumber) + { + report_corruption(&ctx, + psprintf("line pointer redirection to item at offset %u precedes minimum offset %u", + (unsigned) rdoffnum, + (unsigned) FirstOffsetNumber)); + continue; + } + if (rdoffnum > maxoff) + { + report_corruption(&ctx, + psprintf("line pointer redirection to item at offset %u exceeds maximum offset %u", + (unsigned) rdoffnum, + (unsigned) maxoff)); + continue; + } + rditem = PageGetItemId(ctx.page, rdoffnum); + if (!ItemIdIsUsed(rditem)) + report_corruption(&ctx, + psprintf("line pointer redirection to unused item at offset %u", + (unsigned) rdoffnum)); + continue; + } + + /* Sanity-check the line pointer's offset and length values */ + ctx.lp_len = ItemIdGetLength(ctx.itemid); + ctx.lp_off = ItemIdGetOffset(ctx.itemid); + + if (ctx.lp_off != MAXALIGN(ctx.lp_off)) + { + report_corruption(&ctx, + psprintf("line pointer to page offset %u is not maximally aligned", + ctx.lp_off)); + continue; + } + if (ctx.lp_len < MAXALIGN(SizeofHeapTupleHeader)) + { + report_corruption(&ctx, + psprintf("line pointer length %u is less than the minimum tuple header size %u", + ctx.lp_len, + (unsigned) MAXALIGN(SizeofHeapTupleHeader))); + continue; + } + if (ctx.lp_off + ctx.lp_len > BLCKSZ) + { + report_corruption(&ctx, + psprintf("line pointer to page offset %u with length %u ends beyond maximum page offset %u", + ctx.lp_off, + ctx.lp_len, + (unsigned) BLCKSZ)); + continue; + } + + /* It should be safe to examine the tuple's header, at least */ + ctx.tuphdr = (HeapTupleHeader) PageGetItem(ctx.page, ctx.itemid); + ctx.natts = HeapTupleHeaderGetNatts(ctx.tuphdr); + + /* Ok, ready to check this next tuple */ + check_tuple(&ctx); + } + + /* clean up */ + UnlockReleaseBuffer(ctx.buffer); + + if (on_error_stop && ctx.is_corrupt) + break; + } + + if (vmbuffer != InvalidBuffer) + ReleaseBuffer(vmbuffer); + + /* Close the associated toast table and indexes, if any. */ + if (ctx.toast_indexes) + toast_close_indexes(ctx.toast_indexes, ctx.num_toast_indexes, + AccessShareLock); + if (ctx.toast_rel) + table_close(ctx.toast_rel, AccessShareLock); + + /* Close the main relation */ + relation_close(ctx.rel, AccessShareLock); + + PG_RETURN_NULL(); +} + +/* + * Check that a relation's relkind and access method are both supported, + * and that the caller has select privilege on the relation. + */ +static void +sanity_check_relation(Relation rel) +{ + if (rel->rd_rel->relkind != RELKIND_RELATION && + rel->rd_rel->relkind != RELKIND_MATVIEW && + rel->rd_rel->relkind != RELKIND_TOASTVALUE) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("\"%s\" is not a table, materialized view, or TOAST table", + RelationGetRelationName(rel)))); + if (rel->rd_rel->relam != HEAP_TABLE_AM_OID) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("only heap AM is supported"))); +} + +/* + * Record a single corruption found in the table. The values in ctx should + * reflect the location of the corruption, and the msg argument should contain + * a human readable description of the corruption. + * + * The msg argument is pfree'd by this function. + */ +static void +report_corruption(HeapCheckContext *ctx, char *msg) +{ + Datum values[HEAPCHECK_RELATION_COLS]; + bool nulls[HEAPCHECK_RELATION_COLS]; + HeapTuple tuple; + + MemSet(values, 0, sizeof(values)); + MemSet(nulls, 0, sizeof(nulls)); + values[0] = Int64GetDatum(ctx->blkno); + values[1] = Int32GetDatum(ctx->offnum); + values[2] = Int32GetDatum(ctx->attnum); + nulls[2] = (ctx->attnum < 0); + values[3] = CStringGetTextDatum(msg); + + /* + * In principle, there is nothing to prevent a scan over a large, highly + * corrupted table from using work_mem worth of memory building up the + * tuplestore. That's ok, but if we also leak the msg argument memory + * until the end of the query, we could exceed work_mem by more than a + * trivial amount. Therefore, free the msg argument each time we are + * called rather than waiting for our current memory context to be freed. + */ + pfree(msg); + + tuple = heap_form_tuple(ctx->tupdesc, values, nulls); + tuplestore_puttuple(ctx->tupstore, tuple); + ctx->is_corrupt = true; +} + +/* + * Construct the TupleDesc used to report messages about corruptions found + * while scanning the heap. + */ +static TupleDesc +verify_heapam_tupdesc(void) +{ + TupleDesc tupdesc; + AttrNumber a = 0; + + tupdesc = CreateTemplateTupleDesc(HEAPCHECK_RELATION_COLS); + TupleDescInitEntry(tupdesc, ++a, "blkno", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, ++a, "offnum", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, ++a, "attnum", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, ++a, "msg", TEXTOID, -1, 0); + Assert(a == HEAPCHECK_RELATION_COLS); + + return BlessTupleDesc(tupdesc); +} + +/* + * Check for tuple header corruption and tuple visibility. + * + * Since we do not hold a snapshot, tuple visibility is not a question of + * whether we should be able to see the tuple relative to any particular + * snapshot, but rather a question of whether it is safe and reasonable to + * to check the tuple attributes. + * + * Some kinds of corruption make it unsafe to check the tuple attributes, for + * example when the line pointer refers to a range of bytes outside the page. + * In such cases, we return false (not visible) after recording appropriate + * corruption messages. + * + * Some other kinds of tuple header corruption confuse the question of where + * the tuple attributes begin, or how long the nulls bitmap is, etc., making it + * unreasonable to attempt to check attributes, even if all candidate answers + * to those questions would not result in reading past the end of the line + * pointer or page. In such cases, like above, we record corruption messages + * about the header and then return false. + * + * Other kinds of tuple header corruption do not bear on the question of + * whether the tuple attributes can be checked, so we record corruption + * messages for them but do not base our visibility determination on them. (In + * other words, we do not return false merely because we detected them.) + * + * For visibility determination not specifically related to corruption, what we + * want to know is if a tuple is potentially visible to any running + * transaction. If you are tempted to replace this function's visibility logic + * with a call to another visibility checking function, keep in mind that this + * function does not update hint bits, as it seems imprudent to write hint bits + * (or anything at all) to a table during a corruption check. Nor does this + * function bother classifying tuple visibility beyond a boolean visible vs. + * not visible. + * + * The caller should already have checked that xmin and xmax are not out of + * bounds for the relation. + * + * Returns whether the tuple is both visible and sufficiently sensible to + * undergo attribute checks. + */ +static bool +check_tuple_header_and_visibilty(HeapTupleHeader tuphdr, HeapCheckContext *ctx) +{ + uint16 infomask = tuphdr->t_infomask; + bool header_garbled = false; + unsigned expected_hoff; + + if (ctx->tuphdr->t_hoff > ctx->lp_len) + { + report_corruption(ctx, + psprintf("data begins at offset %u beyond the tuple length %u", + ctx->tuphdr->t_hoff, ctx->lp_len)); + header_garbled = true; + } + if ((ctx->tuphdr->t_infomask & HEAP_XMAX_LOCK_ONLY) && + (ctx->tuphdr->t_infomask2 & HEAP_KEYS_UPDATED)) + { + report_corruption(ctx, + pstrdup("tuple is marked as only locked, but also claims key columns were updated")); + header_garbled = true; + } + + if ((ctx->tuphdr->t_infomask & HEAP_XMAX_COMMITTED) && + (ctx->tuphdr->t_infomask & HEAP_XMAX_IS_MULTI)) + { + report_corruption(ctx, + pstrdup("multixact should not be marked committed")); + + /* + * This condition is clearly wrong, but we do not consider the header + * garbled, because we don't rely on this property for determining if + * the tuple is visible or for interpreting other relevant header + * fields. + */ + } + + if (infomask & HEAP_HASNULL) + expected_hoff = MAXALIGN(SizeofHeapTupleHeader + BITMAPLEN(ctx->natts)); + else + expected_hoff = MAXALIGN(SizeofHeapTupleHeader); + if (ctx->tuphdr->t_hoff != expected_hoff) + { + if ((infomask & HEAP_HASNULL) && ctx->natts == 1) + report_corruption(ctx, + psprintf("tuple data should begin at byte %u, but actually begins at byte %u (1 attribute, has nulls)", + expected_hoff, ctx->tuphdr->t_hoff)); + else if ((infomask & HEAP_HASNULL)) + report_corruption(ctx, + psprintf("tuple data should begin at byte %u, but actually begins at byte %u (%u attributes, has nulls)", + expected_hoff, ctx->tuphdr->t_hoff, ctx->natts)); + else if (ctx->natts == 1) + report_corruption(ctx, + psprintf("tuple data should begin at byte %u, but actually begins at byte %u (1 attribute, no nulls)", + expected_hoff, ctx->tuphdr->t_hoff)); + else + report_corruption(ctx, + psprintf("tuple data should begin at byte %u, but actually begins at byte %u (%u attributes, no nulls)", + expected_hoff, ctx->tuphdr->t_hoff, ctx->natts)); + header_garbled = true; + } + + if (header_garbled) + return false; /* checking of this tuple should not continue */ + + /* + * Ok, we can examine the header for tuple visibility purposes, though we + * still need to be careful about a few remaining types of header + * corruption. This logic roughly follows that of + * HeapTupleSatisfiesVacuum. Where possible the comments indicate which + * HTSV_Result we think that function might return for this tuple. + */ + if (!HeapTupleHeaderXminCommitted(tuphdr)) + { + TransactionId raw_xmin = HeapTupleHeaderGetRawXmin(tuphdr); + + if (HeapTupleHeaderXminInvalid(tuphdr)) + return false; /* HEAPTUPLE_DEAD */ + /* Used by pre-9.0 binary upgrades */ + else if (infomask & HEAP_MOVED_OFF || + infomask & HEAP_MOVED_IN) + { + XidCommitStatus status; + TransactionId xvac = HeapTupleHeaderGetXvac(tuphdr); + + switch (get_xid_status(xvac, ctx, &status)) + { + case XID_INVALID: + report_corruption(ctx, + pstrdup("old-style VACUUM FULL transaction ID is invalid")); + return false; /* corrupt */ + case XID_IN_FUTURE: + report_corruption(ctx, + psprintf("old-style VACUUM FULL transaction ID %u equals or exceeds next valid transaction ID %u:%u", + xvac, + EpochFromFullTransactionId(ctx->next_fxid), + XidFromFullTransactionId(ctx->next_fxid))); + return false; /* corrupt */ + case XID_PRECEDES_RELMIN: + report_corruption(ctx, + psprintf("old-style VACUUM FULL transaction ID %u precedes relation freeze threshold %u:%u", + xvac, + EpochFromFullTransactionId(ctx->relfrozenfxid), + XidFromFullTransactionId(ctx->relfrozenfxid))); + return false; /* corrupt */ + break; + case XID_PRECEDES_CLUSTERMIN: + report_corruption(ctx, + psprintf("old-style VACUUM FULL transaction ID %u precedes oldest valid transaction ID %u:%u", + xvac, + EpochFromFullTransactionId(ctx->oldest_fxid), + XidFromFullTransactionId(ctx->oldest_fxid))); + return false; /* corrupt */ + break; + case XID_BOUNDS_OK: + switch (status) + { + case XID_IN_PROGRESS: + return true; /* HEAPTUPLE_DELETE_IN_PROGRESS */ + case XID_COMMITTED: + case XID_ABORTED: + return false; /* HEAPTUPLE_DEAD */ + } + } + } + else + { + XidCommitStatus status; + + switch (get_xid_status(raw_xmin, ctx, &status)) + { + case XID_INVALID: + report_corruption(ctx, + pstrdup("raw xmin is invalid")); + return false; + case XID_IN_FUTURE: + report_corruption(ctx, + psprintf("raw xmin %u equals or exceeds next valid transaction ID %u:%u", + raw_xmin, + EpochFromFullTransactionId(ctx->next_fxid), + XidFromFullTransactionId(ctx->next_fxid))); + return false; /* corrupt */ + case XID_PRECEDES_RELMIN: + report_corruption(ctx, + psprintf("raw xmin %u precedes relation freeze threshold %u:%u", + raw_xmin, + EpochFromFullTransactionId(ctx->relfrozenfxid), + XidFromFullTransactionId(ctx->relfrozenfxid))); + return false; /* corrupt */ + case XID_PRECEDES_CLUSTERMIN: + report_corruption(ctx, + psprintf("raw xmin %u precedes oldest valid transaction ID %u:%u", + raw_xmin, + EpochFromFullTransactionId(ctx->oldest_fxid), + XidFromFullTransactionId(ctx->oldest_fxid))); + return false; /* corrupt */ + case XID_BOUNDS_OK: + switch (status) + { + case XID_COMMITTED: + break; + case XID_IN_PROGRESS: + return true; /* insert or delete in progress */ + case XID_ABORTED: + return false; /* HEAPTUPLE_DEAD */ + } + } + } + } + + if (!(infomask & HEAP_XMAX_INVALID) && !HEAP_XMAX_IS_LOCKED_ONLY(infomask)) + { + if (infomask & HEAP_XMAX_IS_MULTI) + { + XidCommitStatus status; + TransactionId xmax = HeapTupleGetUpdateXid(tuphdr); + + switch (get_xid_status(xmax, ctx, &status)) + { + /* not LOCKED_ONLY, so it has to have an xmax */ + case XID_INVALID: + report_corruption(ctx, + pstrdup("xmax is invalid")); + return false; /* corrupt */ + case XID_IN_FUTURE: + report_corruption(ctx, + psprintf("xmax %u equals or exceeds next valid transaction ID %u:%u", + xmax, + EpochFromFullTransactionId(ctx->next_fxid), + XidFromFullTransactionId(ctx->next_fxid))); + return false; /* corrupt */ + case XID_PRECEDES_RELMIN: + report_corruption(ctx, + psprintf("xmax %u precedes relation freeze threshold %u:%u", + xmax, + EpochFromFullTransactionId(ctx->relfrozenfxid), + XidFromFullTransactionId(ctx->relfrozenfxid))); + return false; /* corrupt */ + case XID_PRECEDES_CLUSTERMIN: + report_corruption(ctx, + psprintf("xmax %u precedes oldest valid transaction ID %u:%u", + xmax, + EpochFromFullTransactionId(ctx->oldest_fxid), + XidFromFullTransactionId(ctx->oldest_fxid))); + return false; /* corrupt */ + case XID_BOUNDS_OK: + switch (status) + { + case XID_IN_PROGRESS: + return true; /* HEAPTUPLE_DELETE_IN_PROGRESS */ + case XID_COMMITTED: + case XID_ABORTED: + return false; /* HEAPTUPLE_RECENTLY_DEAD or + * HEAPTUPLE_DEAD */ + } + } + + /* Ok, the tuple is live */ + } + else if (!(infomask & HEAP_XMAX_COMMITTED)) + return true; /* HEAPTUPLE_DELETE_IN_PROGRESS or + * HEAPTUPLE_LIVE */ + else + return false; /* HEAPTUPLE_RECENTLY_DEAD or HEAPTUPLE_DEAD */ + } + return true; /* not dead */ +} + +/* + * Check the current toast tuple against the state tracked in ctx, recording + * any corruption found in ctx->tupstore. + * + * This is not equivalent to running verify_heapam on the toast table itself, + * and is not hardened against corruption of the toast table. Rather, when + * validating a toasted attribute in the main table, the sequence of toast + * tuples that store the toasted value are retrieved and checked in order, with + * each toast tuple being checked against where we are in the sequence, as well + * as each toast tuple having its varlena structure sanity checked. + */ +static void +check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx) +{ + int32 curchunk; + Pointer chunk; + bool isnull; + int32 chunksize; + int32 expected_size; + + /* + * Have a chunk, extract the sequence number and the data + */ + curchunk = DatumGetInt32(fastgetattr(toasttup, 2, + ctx->toast_rel->rd_att, &isnull)); + if (isnull) + { + report_corruption(ctx, + pstrdup("toast chunk sequence number is null")); + return; + } + chunk = DatumGetPointer(fastgetattr(toasttup, 3, + ctx->toast_rel->rd_att, &isnull)); + if (isnull) + { + report_corruption(ctx, + pstrdup("toast chunk data is null")); + return; + } + if (!VARATT_IS_EXTENDED(chunk)) + chunksize = VARSIZE(chunk) - VARHDRSZ; + else if (VARATT_IS_SHORT(chunk)) + { + /* + * could happen due to heap_form_tuple doing its thing + */ + chunksize = VARSIZE_SHORT(chunk) - VARHDRSZ_SHORT; + } + else + { + /* should never happen */ + uint32 header = ((varattrib_4b *) chunk)->va_4byte.va_header; + + report_corruption(ctx, + psprintf("corrupt extended toast chunk has invalid varlena header: %0x (sequence number %d)", + header, curchunk)); + return; + } + + /* + * Some checks on the data we've found + */ + if (curchunk != ctx->chunkno) + { + report_corruption(ctx, + psprintf("toast chunk sequence number %u does not match the expected sequence number %u", + curchunk, ctx->chunkno)); + return; + } + if (curchunk > ctx->endchunk) + { + report_corruption(ctx, + psprintf("toast chunk sequence number %u exceeds the end chunk sequence number %u", + curchunk, ctx->endchunk)); + return; + } + + expected_size = curchunk < ctx->totalchunks - 1 ? TOAST_MAX_CHUNK_SIZE + : ctx->attrsize - ((ctx->totalchunks - 1) * TOAST_MAX_CHUNK_SIZE); + if (chunksize != expected_size) + { + report_corruption(ctx, + psprintf("toast chunk size %u differs from the expected size %u", + chunksize, expected_size)); + return; + } +} + +/* + * Check the current attribute as tracked in ctx, recording any corruption + * found in ctx->tupstore. + * + * This function follows the logic performed by heap_deform_tuple(), and in the + * case of a toasted value, optionally continues along the logic of + * detoast_external_attr(), checking for any conditions that would result in + * either of those functions Asserting or crashing the backend. The checks + * performed by Asserts present in those two functions are also performed here. + * In cases where those two functions are a bit cavalier in their assumptions + * about data being correct, we perform additional checks not present in either + * of those two functions. Where some condition is checked in both of those + * functions, we perform it here twice, as we parallel the logical flow of + * those two functions. The presence of duplicate checks seems a reasonable + * price to pay for keeping this code tightly coupled with the code it + * protects. + * + * Returns true if the tuple attribute is sane enough for processing to + * continue on to the next attribute, false otherwise. + */ +static bool +check_tuple_attribute(HeapCheckContext *ctx) +{ + struct varatt_external toast_pointer; + ScanKeyData toastkey; + SysScanDesc toastscan; + SnapshotData SnapshotToast; + HeapTuple toasttup; + bool found_toasttup; + Datum attdatum; + struct varlena *attr; + char *tp; /* pointer to the tuple data */ + uint16 infomask; + Form_pg_attribute thisatt; + + infomask = ctx->tuphdr->t_infomask; + thisatt = TupleDescAttr(RelationGetDescr(ctx->rel), ctx->attnum); + + tp = (char *) ctx->tuphdr + ctx->tuphdr->t_hoff; + + if (ctx->tuphdr->t_hoff + ctx->offset > ctx->lp_len) + { + report_corruption(ctx, + psprintf("attribute %u with length %u starts at offset %u beyond total tuple length %u", + ctx->attnum, + thisatt->attlen, + ctx->tuphdr->t_hoff + ctx->offset, + ctx->lp_len)); + return false; + } + + /* Skip null values */ + if (infomask & HEAP_HASNULL && att_isnull(ctx->attnum, ctx->tuphdr->t_bits)) + return true; + + /* Skip non-varlena values, but update offset first */ + if (thisatt->attlen != -1) + { + ctx->offset = att_align_nominal(ctx->offset, thisatt->attalign); + ctx->offset = att_addlength_pointer(ctx->offset, thisatt->attlen, + tp + ctx->offset); + if (ctx->tuphdr->t_hoff + ctx->offset > ctx->lp_len) + { + report_corruption(ctx, + psprintf("attribute %u with length %u ends at offset %u beyond total tuple length %u", + ctx->attnum, + thisatt->attlen, + ctx->tuphdr->t_hoff + ctx->offset, + ctx->lp_len)); + return false; + } + return true; + } + + /* Ok, we're looking at a varlena attribute. */ + ctx->offset = att_align_pointer(ctx->offset, thisatt->attalign, -1, + tp + ctx->offset); + + /* Get the (possibly corrupt) varlena datum */ + attdatum = fetchatt(thisatt, tp + ctx->offset); + + /* + * We have the datum, but we cannot decode it carelessly, as it may still + * be corrupt. + */ + + /* + * Check that VARTAG_SIZE won't hit a TrapMacro on a corrupt va_tag before + * risking a call into att_addlength_pointer + */ + if (VARATT_IS_EXTERNAL(tp + ctx->offset)) + { + uint8 va_tag = VARTAG_EXTERNAL(tp + ctx->offset); + + if (va_tag != VARTAG_ONDISK) + { + report_corruption(ctx, + psprintf("toasted attribute %u has unexpected TOAST tag %u", + ctx->attnum, + va_tag)); + /* We can't know where the next attribute begins */ + return false; + } + } + + /* Ok, should be safe now */ + ctx->offset = att_addlength_pointer(ctx->offset, thisatt->attlen, + tp + ctx->offset); + + if (ctx->tuphdr->t_hoff + ctx->offset > ctx->lp_len) + { + report_corruption(ctx, + psprintf("attribute %u with length %u ends at offset %u beyond total tuple length %u", + ctx->attnum, + thisatt->attlen, + ctx->tuphdr->t_hoff + ctx->offset, + ctx->lp_len)); + + return false; + } + + /* + * heap_deform_tuple would be done with this attribute at this point, + * having stored it in values[], and would continue to the next attribute. + * We go further, because we need to check if the toast datum is corrupt. + */ + + attr = (struct varlena *) DatumGetPointer(attdatum); + + /* + * Now we follow the logic of detoast_external_attr(), with the same + * caveats about being paranoid about corruption. + */ + + /* Skip values that are not external */ + if (!VARATT_IS_EXTERNAL(attr)) + return true; + + /* It is external, and we're looking at a page on disk */ + + /* The tuple header better claim to contain toasted values */ + if (!(infomask & HEAP_HASEXTERNAL)) + { + report_corruption(ctx, + psprintf("attribute %u is external but tuple header flag HEAP_HASEXTERNAL not set", + ctx->attnum)); + return true; + } + + /* The relation better have a toast table */ + if (!ctx->rel->rd_rel->reltoastrelid) + { + report_corruption(ctx, + psprintf("attribute %u is external but relation has no toast relation", + ctx->attnum)); + return true; + } + + /* If we were told to skip toast checking, then we're done. */ + if (ctx->toast_rel == NULL) + return true; + + /* + * Must copy attr into toast_pointer for alignment considerations + */ + VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr); + + ctx->attrsize = toast_pointer.va_extsize; + ctx->endchunk = (ctx->attrsize - 1) / TOAST_MAX_CHUNK_SIZE; + ctx->totalchunks = ctx->endchunk + 1; + + /* + * Setup a scan key to find chunks in toast table with matching va_valueid + */ + ScanKeyInit(&toastkey, + (AttrNumber) 1, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(toast_pointer.va_valueid)); + + /* + * Check if any chunks for this toasted object exist in the toast table, + * accessible via the index. + */ + init_toast_snapshot(&SnapshotToast); + toastscan = systable_beginscan_ordered(ctx->toast_rel, + ctx->valid_toast_index, + &SnapshotToast, 1, + &toastkey); + ctx->chunkno = 0; + found_toasttup = false; + while ((toasttup = + systable_getnext_ordered(toastscan, + ForwardScanDirection)) != NULL) + { + found_toasttup = true; + check_toast_tuple(toasttup, ctx); + ctx->chunkno++; + } + if (ctx->chunkno != (ctx->endchunk + 1)) + report_corruption(ctx, + psprintf("final toast chunk number %u differs from expected value %u", + ctx->chunkno, (ctx->endchunk + 1))); + if (!found_toasttup) + report_corruption(ctx, + psprintf("toasted value for attribute %u missing from toast table", + ctx->attnum)); + systable_endscan_ordered(toastscan); + + return true; +} + +/* + * Check the current tuple as tracked in ctx, recording any corruption found in + * ctx->tupstore. + */ +static void +check_tuple(HeapCheckContext *ctx) +{ + TransactionId xmin; + TransactionId xmax; + bool fatal = false; + uint16 infomask = ctx->tuphdr->t_infomask; + + /* If xmin is normal, it should be within valid range */ + xmin = HeapTupleHeaderGetXmin(ctx->tuphdr); + switch (get_xid_status(xmin, ctx, NULL)) + { + case XID_INVALID: + case XID_BOUNDS_OK: + break; + case XID_IN_FUTURE: + report_corruption(ctx, + psprintf("xmin %u equals or exceeds next valid transaction ID %u:%u", + xmin, + EpochFromFullTransactionId(ctx->next_fxid), + XidFromFullTransactionId(ctx->next_fxid))); + fatal = true; + break; + case XID_PRECEDES_CLUSTERMIN: + report_corruption(ctx, + psprintf("xmin %u precedes oldest valid transaction ID %u:%u", + xmin, + EpochFromFullTransactionId(ctx->oldest_fxid), + XidFromFullTransactionId(ctx->oldest_fxid))); + fatal = true; + break; + case XID_PRECEDES_RELMIN: + report_corruption(ctx, + psprintf("xmin %u precedes relation freeze threshold %u:%u", + xmin, + EpochFromFullTransactionId(ctx->relfrozenfxid), + XidFromFullTransactionId(ctx->relfrozenfxid))); + fatal = true; + break; + } + + xmax = HeapTupleHeaderGetRawXmax(ctx->tuphdr); + + if (infomask & HEAP_XMAX_IS_MULTI) + { + /* xmax is a multixact, so it should be within valid MXID range */ + switch (check_mxid_valid_in_rel(xmax, ctx)) + { + case XID_INVALID: + report_corruption(ctx, + pstrdup("multitransaction ID is invalid")); + fatal = true; + break; + case XID_PRECEDES_RELMIN: + report_corruption(ctx, + psprintf("multitransaction ID %u precedes relation minimum multitransaction ID threshold %u", + xmax, ctx->relminmxid)); + fatal = true; + break; + case XID_PRECEDES_CLUSTERMIN: + report_corruption(ctx, + psprintf("multitransaction ID %u precedes oldest valid multitransaction ID threshold %u", + xmax, ctx->oldest_mxact)); + fatal = true; + break; + case XID_IN_FUTURE: + report_corruption(ctx, + psprintf("multitransaction ID %u equals or exceeds next valid multitransaction ID %u", + xmax, + ctx->next_mxact)); + fatal = true; + break; + case XID_BOUNDS_OK: + break; + } + } + else + { + /* + * xmax is not a multixact and is normal, so it should be within the + * valid XID range. + */ + switch (get_xid_status(xmax, ctx, NULL)) + { + case XID_INVALID: + case XID_BOUNDS_OK: + break; + case XID_IN_FUTURE: + report_corruption(ctx, + psprintf("xmax %u equals or exceeds next valid transaction ID %u:%u", + xmax, + EpochFromFullTransactionId(ctx->next_fxid), + XidFromFullTransactionId(ctx->next_fxid))); + fatal = true; + break; + case XID_PRECEDES_CLUSTERMIN: + report_corruption(ctx, + psprintf("xmax %u precedes oldest valid transaction ID %u:%u", + xmax, + EpochFromFullTransactionId(ctx->oldest_fxid), + XidFromFullTransactionId(ctx->oldest_fxid))); + fatal = true; + break; + case XID_PRECEDES_RELMIN: + report_corruption(ctx, + psprintf("xmax %u precedes relation freeze threshold %u:%u", + xmax, + EpochFromFullTransactionId(ctx->relfrozenfxid), + XidFromFullTransactionId(ctx->relfrozenfxid))); + fatal = true; + } + } + + /* + * Cannot process tuple data if tuple header was corrupt, as the offsets + * within the page cannot be trusted, leaving too much risk of reading + * garbage if we continue. + * + * We also cannot process the tuple if the xmin or xmax were invalid + * relative to relfrozenxid or relminmxid, as clog entries for the xids + * may already be gone. + */ + if (fatal) + return; + + /* + * Check various forms of tuple header corruption. If the header is too + * corrupt to continue checking, or if the tuple is not visible to anyone, + * we cannot continue with other checks. + */ + if (!check_tuple_header_and_visibilty(ctx->tuphdr, ctx)) + return; + + /* + * The tuple is visible, so it must be compatible with the current version + * of the relation descriptor. It might have fewer columns than are + * present in the relation descriptor, but it cannot have more. + */ + if (RelationGetDescr(ctx->rel)->natts < ctx->natts) + { + report_corruption(ctx, + psprintf("number of attributes %u exceeds maximum expected for table %u", + ctx->natts, + RelationGetDescr(ctx->rel)->natts)); + return; + } + + /* + * Check each attribute unless we hit corruption that confuses what to do + * next, at which point we abort further attribute checks for this tuple. + * Note that we don't abort for all types of corruption, only for those + * types where we don't know how to continue. + */ + ctx->offset = 0; + for (ctx->attnum = 0; ctx->attnum < ctx->natts; ctx->attnum++) + if (!check_tuple_attribute(ctx)) + break; /* cannot continue */ + + /* revert attnum to -1 until we again examine individual attributes */ + ctx->attnum = -1; +} + +/* + * Convert a TransactionId into a FullTransactionId using our cached values of + * the valid transaction ID range. It is the caller's responsibility to have + * already updated the cached values, if necessary. + */ +static FullTransactionId +FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx) +{ + uint32 epoch; + + if (!TransactionIdIsNormal(xid)) + return FullTransactionIdFromEpochAndXid(0, xid); + epoch = EpochFromFullTransactionId(ctx->next_fxid); + if (xid > ctx->next_xid) + epoch--; + return FullTransactionIdFromEpochAndXid(epoch, xid); +} + +/* + * Update our cached range of valid transaction IDs. + */ +static void +update_cached_xid_range(HeapCheckContext *ctx) +{ + /* Make cached copies */ + LWLockAcquire(XidGenLock, LW_SHARED); + ctx->next_fxid = ShmemVariableCache->nextXid; + ctx->oldest_xid = ShmemVariableCache->oldestXid; + LWLockRelease(XidGenLock); + + /* And compute alternate versions of the same */ + ctx->oldest_fxid = FullTransactionIdFromXidAndCtx(ctx->oldest_xid, ctx); + ctx->next_xid = XidFromFullTransactionId(ctx->next_fxid); +} + +/* + * Update our cached range of valid multitransaction IDs. + */ +static void +update_cached_mxid_range(HeapCheckContext *ctx) +{ + ReadMultiXactIdRange(&ctx->oldest_mxact, &ctx->next_mxact); +} + +/* + * Return whether the given FullTransactionId is within our cached valid + * transaction ID range. + */ +static inline bool +fxid_in_cached_range(FullTransactionId fxid, const HeapCheckContext *ctx) +{ + return (FullTransactionIdPrecedesOrEquals(ctx->oldest_fxid, fxid) && + FullTransactionIdPrecedes(fxid, ctx->next_fxid)); +} + +/* + * Checks wheter a multitransaction ID is in the cached valid range, returning + * the nature of the range violation, if any. + */ +static XidBoundsViolation +check_mxid_in_range(MultiXactId mxid, HeapCheckContext *ctx) +{ + if (!TransactionIdIsValid(mxid)) + return XID_INVALID; + if (MultiXactIdPrecedes(mxid, ctx->relminmxid)) + return XID_PRECEDES_RELMIN; + if (MultiXactIdPrecedes(mxid, ctx->oldest_mxact)) + return XID_PRECEDES_CLUSTERMIN; + if (MultiXactIdPrecedesOrEquals(ctx->next_mxact, mxid)) + return XID_IN_FUTURE; + return XID_BOUNDS_OK; +} + +/* + * Checks whether the given mxid is valid to appear in the heap being checked, + * returning the nature of the range violation, if any. + * + * This function attempts to return quickly by caching the known valid mxid + * range in ctx. Callers should already have performed the initial setup of + * the cache prior to the first call to this function. + */ +static XidBoundsViolation +check_mxid_valid_in_rel(MultiXactId mxid, HeapCheckContext *ctx) +{ + XidBoundsViolation result; + + result = check_mxid_in_range(mxid, ctx); + if (result == XID_BOUNDS_OK) + return XID_BOUNDS_OK; + + /* The range may have advanced. Recheck. */ + update_cached_mxid_range(ctx); + return check_mxid_in_range(mxid, ctx); +} + +/* + * Checks whether the given transaction ID is (or was recently) valid to appear + * in the heap being checked, or whether it is too old or too new to appear in + * the relation, returning information about the nature of the bounds violation. + * + * We cache the range of valid transaction IDs. If xid is in that range, we + * conclude that it is valid, even though concurrent changes to the table might + * invalidate it under certain corrupt conditions. (For example, if the table + * contains corrupt all-frozen bits, a concurrent vacuum might skip the page(s) + * containing the xid and then truncate clog and advance the relfrozenxid + * beyond xid.) Reporting the xid as valid under such conditions seems + * acceptable, since if we had checked it earlier in our scan it would have + * truly been valid at that time. + * + * If the status argument is not NULL, and if and only if the transaction ID + * appears to be valid in this relation, the status argument will be set with + * the commit status of the transaction ID. + */ +static XidBoundsViolation +get_xid_status(TransactionId xid, HeapCheckContext *ctx, + XidCommitStatus *status) +{ + FullTransactionId fxid; + FullTransactionId clog_horizon; + + /* Quick check for special xids */ + if (!TransactionIdIsValid(xid)) + return XID_INVALID; + else if (xid == BootstrapTransactionId || xid == FrozenTransactionId) + { + if (status != NULL) + *status = XID_COMMITTED; + return XID_BOUNDS_OK; + } + + /* Check if the xid is within bounds */ + fxid = FullTransactionIdFromXidAndCtx(xid, ctx); + if (!fxid_in_cached_range(fxid, ctx)) + { + /* + * We may have been checking against stale values. Update the cached + * range to be sure, and since we relied on the cached range when we + * performed the full xid conversion, reconvert. + */ + update_cached_xid_range(ctx); + fxid = FullTransactionIdFromXidAndCtx(xid, ctx); + } + + if (FullTransactionIdPrecedesOrEquals(ctx->next_fxid, fxid)) + return XID_IN_FUTURE; + if (FullTransactionIdPrecedes(fxid, ctx->oldest_fxid)) + return XID_PRECEDES_CLUSTERMIN; + if (FullTransactionIdPrecedes(fxid, ctx->relfrozenfxid)) + return XID_PRECEDES_RELMIN; + + /* Early return if the caller does not request clog checking */ + if (status == NULL) + return XID_BOUNDS_OK; + + /* Early return if we just checked this xid in a prior call */ + if (xid == ctx->cached_xid) + { + *status = ctx->cached_status; + return XID_BOUNDS_OK; + } + + *status = XID_COMMITTED; + LWLockAcquire(XactTruncationLock, LW_SHARED); + clog_horizon = + FullTransactionIdFromXidAndCtx(ShmemVariableCache->oldestClogXid, + ctx); + if (FullTransactionIdPrecedesOrEquals(clog_horizon, fxid)) + { + if (TransactionIdIsCurrentTransactionId(xid)) + *status = XID_IN_PROGRESS; + else if (TransactionIdDidCommit(xid)) + *status = XID_COMMITTED; + else if (TransactionIdDidAbort(xid)) + *status = XID_ABORTED; + else + *status = XID_IN_PROGRESS; + } + LWLockRelease(XactTruncationLock); + ctx->cached_xid = xid; + ctx->cached_status = *status; + return XID_BOUNDS_OK; +} diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index 5f3de3c0b7f6a..6d86e3ccdacfb 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -1752,14 +1752,36 @@ bt_right_page_check_scankey(BtreeCheckState *state) * this function is capable to compare pivot keys on different levels. */ static bool -bt_pivot_tuple_identical(IndexTuple itup1, IndexTuple itup2) +bt_pivot_tuple_identical(bool heapkeyspace, IndexTuple itup1, IndexTuple itup2) { if (IndexTupleSize(itup1) != IndexTupleSize(itup2)) return false; - if (memcmp(&itup1->t_tid.ip_posid, &itup2->t_tid.ip_posid, - IndexTupleSize(itup1) - offsetof(ItemPointerData, ip_posid)) != 0) - return false; + if (heapkeyspace) + { + /* + * Offset number will contain important information in heapkeyspace + * indexes: the number of attributes left in the pivot tuple following + * suffix truncation. Don't skip over it (compare it too). + */ + if (memcmp(&itup1->t_tid.ip_posid, &itup2->t_tid.ip_posid, + IndexTupleSize(itup1) - + offsetof(ItemPointerData, ip_posid)) != 0) + return false; + } + else + { + /* + * Cannot rely on offset number field having consistent value across + * levels on pg_upgrade'd !heapkeyspace indexes. Compare contents of + * tuple starting from just after item pointer (i.e. after block + * number and offset number). + */ + if (memcmp(&itup1->t_info, &itup2->t_info, + IndexTupleSize(itup1) - + offsetof(IndexTupleData, t_info)) != 0) + return false; + } return true; } @@ -1913,7 +1935,7 @@ bt_child_highkey_check(BtreeCheckState *state, rightsplit = P_INCOMPLETE_SPLIT(opaque); /* - * If we visit page with high key, check that it is be equal to the + * If we visit page with high key, check that it is equal to the * target key next to corresponding downlink. */ if (!rightsplit && !P_RIGHTMOST(opaque)) @@ -2007,7 +2029,7 @@ bt_child_highkey_check(BtreeCheckState *state, itup = state->lowkey; } - if (!bt_pivot_tuple_identical(highkey, itup)) + if (!bt_pivot_tuple_identical(state->heapkeyspace, highkey, itup)) { ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), diff --git a/contrib/btree_gist/btree_numeric.c b/contrib/btree_gist/btree_numeric.c index d66901680e336..35e466cdd9423 100644 --- a/contrib/btree_gist/btree_numeric.c +++ b/contrib/btree_gist/btree_numeric.c @@ -195,7 +195,7 @@ gbt_numeric_penalty(PG_FUNCTION_ARGS) } else { - Numeric nul = DatumGetNumeric(DirectFunctionCall1(int4_numeric, Int32GetDatum(0))); + Numeric nul = int64_to_numeric(0); *result = 0.0; diff --git a/contrib/jsonb_plperl/jsonb_plperl.c b/contrib/jsonb_plperl/jsonb_plperl.c index b81ba54b809dc..22e90afe1b6ed 100644 --- a/contrib/jsonb_plperl/jsonb_plperl.c +++ b/contrib/jsonb_plperl/jsonb_plperl.c @@ -216,9 +216,7 @@ SV_to_JsonbValue(SV *in, JsonbParseState **jsonb_state, bool is_elem) IV ival = SvIV(in); out.type = jbvNumeric; - out.val.numeric = - DatumGetNumeric(DirectFunctionCall1(int8_numeric, - Int64GetDatum((int64) ival))); + out.val.numeric = int64_to_numeric(ival); } else if (SvNOK(in)) { diff --git a/contrib/old_snapshot/Makefile b/contrib/old_snapshot/Makefile new file mode 100644 index 0000000000000..77c85df3225d6 --- /dev/null +++ b/contrib/old_snapshot/Makefile @@ -0,0 +1,22 @@ +# contrib/old_snapshot/Makefile + +MODULE_big = old_snapshot +OBJS = \ + $(WIN32RES) \ + time_mapping.o +PG_CPPFLAGS = -I$(libpq_srcdir) + +EXTENSION = old_snapshot +DATA = old_snapshot--1.0.sql +PGFILEDESC = "old_snapshot - utilities in support of old_snapshot_threshold" + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = contrib/old_snapshot +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/contrib/old_snapshot/old_snapshot--1.0.sql b/contrib/old_snapshot/old_snapshot--1.0.sql new file mode 100644 index 0000000000000..9ebb8829e3729 --- /dev/null +++ b/contrib/old_snapshot/old_snapshot--1.0.sql @@ -0,0 +1,14 @@ +/* contrib/old_snapshot/old_snapshot--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION old_snapshot" to load this file. \quit + +-- Show visibility map and page-level visibility information for each block. +CREATE FUNCTION pg_old_snapshot_time_mapping(array_offset OUT int4, + end_timestamp OUT timestamptz, + newest_xmin OUT xid) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pg_old_snapshot_time_mapping' +LANGUAGE C STRICT; + +-- XXX. Do we want REVOKE commands here? diff --git a/contrib/old_snapshot/old_snapshot.control b/contrib/old_snapshot/old_snapshot.control new file mode 100644 index 0000000000000..491eec536cd6f --- /dev/null +++ b/contrib/old_snapshot/old_snapshot.control @@ -0,0 +1,5 @@ +# old_snapshot extension +comment = 'utilities in support of old_snapshot_threshold' +default_version = '1.0' +module_pathname = '$libdir/old_snapshot' +relocatable = true diff --git a/contrib/old_snapshot/time_mapping.c b/contrib/old_snapshot/time_mapping.c new file mode 100644 index 0000000000000..37e0055a00860 --- /dev/null +++ b/contrib/old_snapshot/time_mapping.c @@ -0,0 +1,159 @@ +/*------------------------------------------------------------------------- + * + * time_mapping.c + * time to XID mapping information + * + * Copyright (c) 2020, PostgreSQL Global Development Group + * + * contrib/old_snapshot/time_mapping.c + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "funcapi.h" +#include "storage/lwlock.h" +#include "utils/old_snapshot.h" +#include "utils/snapmgr.h" +#include "utils/timestamp.h" + +/* + * Backend-private copy of the information from oldSnapshotControl which relates + * to the time to XID mapping, plus an index so that we can iterate. + * + * Note that the length of the xid_by_minute array is given by + * OLD_SNAPSHOT_TIME_MAP_ENTRIES (which is not a compile-time constant). + */ +typedef struct +{ + int current_index; + int head_offset; + TimestampTz head_timestamp; + int count_used; + TransactionId xid_by_minute[FLEXIBLE_ARRAY_MEMBER]; +} OldSnapshotTimeMapping; + +#define NUM_TIME_MAPPING_COLUMNS 3 + +PG_MODULE_MAGIC; +PG_FUNCTION_INFO_V1(pg_old_snapshot_time_mapping); + +static OldSnapshotTimeMapping *GetOldSnapshotTimeMapping(void); +static TupleDesc MakeOldSnapshotTimeMappingTupleDesc(void); +static HeapTuple MakeOldSnapshotTimeMappingTuple(TupleDesc tupdesc, + OldSnapshotTimeMapping *mapping); + +/* + * SQL-callable set-returning function. + */ +Datum +pg_old_snapshot_time_mapping(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx; + OldSnapshotTimeMapping *mapping; + + if (SRF_IS_FIRSTCALL()) + { + MemoryContext oldcontext; + + funcctx = SRF_FIRSTCALL_INIT(); + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + mapping = GetOldSnapshotTimeMapping(); + funcctx->user_fctx = mapping; + funcctx->tuple_desc = MakeOldSnapshotTimeMappingTupleDesc(); + MemoryContextSwitchTo(oldcontext); + } + + funcctx = SRF_PERCALL_SETUP(); + mapping = (OldSnapshotTimeMapping *) funcctx->user_fctx; + + while (mapping->current_index < mapping->count_used) + { + HeapTuple tuple; + + tuple = MakeOldSnapshotTimeMappingTuple(funcctx->tuple_desc, mapping); + ++mapping->current_index; + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } + + SRF_RETURN_DONE(funcctx); +} + +/* + * Get the old snapshot time mapping data from shared memory. + */ +static OldSnapshotTimeMapping * +GetOldSnapshotTimeMapping(void) +{ + OldSnapshotTimeMapping *mapping; + + mapping = palloc(offsetof(OldSnapshotTimeMapping, xid_by_minute) + + sizeof(TransactionId) * OLD_SNAPSHOT_TIME_MAP_ENTRIES); + mapping->current_index = 0; + + LWLockAcquire(OldSnapshotTimeMapLock, LW_SHARED); + mapping->head_offset = oldSnapshotControl->head_offset; + mapping->head_timestamp = oldSnapshotControl->head_timestamp; + mapping->count_used = oldSnapshotControl->count_used; + for (int i = 0; i < OLD_SNAPSHOT_TIME_MAP_ENTRIES; ++i) + mapping->xid_by_minute[i] = oldSnapshotControl->xid_by_minute[i]; + LWLockRelease(OldSnapshotTimeMapLock); + + return mapping; +} + +/* + * Build a tuple descriptor for the pg_old_snapshot_time_mapping() SRF. + */ +static TupleDesc +MakeOldSnapshotTimeMappingTupleDesc(void) +{ + TupleDesc tupdesc; + + tupdesc = CreateTemplateTupleDesc(NUM_TIME_MAPPING_COLUMNS); + + TupleDescInitEntry(tupdesc, (AttrNumber) 1, "array_offset", + INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 2, "end_timestamp", + TIMESTAMPTZOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 3, "newest_xmin", + XIDOID, -1, 0); + + return BlessTupleDesc(tupdesc); +} + +/* + * Convert one entry from the old snapshot time mapping to a HeapTuple. + */ +static HeapTuple +MakeOldSnapshotTimeMappingTuple(TupleDesc tupdesc, OldSnapshotTimeMapping *mapping) +{ + Datum values[NUM_TIME_MAPPING_COLUMNS]; + bool nulls[NUM_TIME_MAPPING_COLUMNS]; + int array_position; + TimestampTz timestamp; + + /* + * Figure out the array position corresponding to the current index. + * + * Index 0 means the oldest entry in the mapping, which is stored at + * mapping->head_offset. Index 1 means the next-oldest entry, which is a the + * following index, and so on. We wrap around when we reach the end of the array. + */ + array_position = (mapping->head_offset + mapping->current_index) + % OLD_SNAPSHOT_TIME_MAP_ENTRIES; + + /* + * No explicit timestamp is stored for any entry other than the oldest one, + * but each entry corresponds to 1-minute period, so we can just add. + */ + timestamp = TimestampTzPlusMilliseconds(mapping->head_timestamp, + mapping->current_index * 60000); + + /* Initialize nulls and values arrays. */ + memset(nulls, 0, sizeof(nulls)); + values[0] = Int32GetDatum(array_position); + values[1] = TimestampTzGetDatum(timestamp); + values[2] = TransactionIdGetDatum(mapping->xid_by_minute[array_position]); + + return heap_form_tuple(tupdesc, values, nulls); +} diff --git a/contrib/pg_surgery/.gitignore b/contrib/pg_surgery/.gitignore new file mode 100644 index 0000000000000..5dcb3ff972350 --- /dev/null +++ b/contrib/pg_surgery/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/contrib/pg_surgery/Makefile b/contrib/pg_surgery/Makefile new file mode 100644 index 0000000000000..a66776c4c4131 --- /dev/null +++ b/contrib/pg_surgery/Makefile @@ -0,0 +1,23 @@ +# contrib/pg_surgery/Makefile + +MODULE_big = pg_surgery +OBJS = \ + $(WIN32RES) \ + heap_surgery.o + +EXTENSION = pg_surgery +DATA = pg_surgery--1.0.sql +PGFILEDESC = "pg_surgery - perform surgery on a damaged relation" + +REGRESS = heap_surgery + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = contrib/pg_surgery +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/contrib/pg_surgery/expected/heap_surgery.out b/contrib/pg_surgery/expected/heap_surgery.out new file mode 100644 index 0000000000000..d4a757ffa0145 --- /dev/null +++ b/contrib/pg_surgery/expected/heap_surgery.out @@ -0,0 +1,178 @@ +create extension pg_surgery; +-- create a normal heap table and insert some rows. +-- use a temp table so that vacuum behavior doesn't depend on global xmin +create temp table htab (a int); +insert into htab values (100), (200), (300), (400), (500); +-- test empty TID array +select heap_force_freeze('htab'::regclass, ARRAY[]::tid[]); + heap_force_freeze +------------------- + +(1 row) + +-- nothing should be frozen yet +select * from htab where xmin = 2; + a +--- +(0 rows) + +-- freeze forcibly +select heap_force_freeze('htab'::regclass, ARRAY['(0, 4)']::tid[]); + heap_force_freeze +------------------- + +(1 row) + +-- now we should have one frozen tuple +select ctid, xmax from htab where xmin = 2; + ctid | xmax +-------+------ + (0,4) | 0 +(1 row) + +-- kill forcibly +select heap_force_kill('htab'::regclass, ARRAY['(0, 4)']::tid[]); + heap_force_kill +----------------- + +(1 row) + +-- should be gone now +select * from htab where ctid = '(0, 4)'; + a +--- +(0 rows) + +-- should now be skipped because it's already dead +select heap_force_kill('htab'::regclass, ARRAY['(0, 4)']::tid[]); +NOTICE: skipping tid (0, 4) for relation "htab" because it is marked dead + heap_force_kill +----------------- + +(1 row) + +select heap_force_freeze('htab'::regclass, ARRAY['(0, 4)']::tid[]); +NOTICE: skipping tid (0, 4) for relation "htab" because it is marked dead + heap_force_freeze +------------------- + +(1 row) + +-- freeze two TIDs at once while skipping an out-of-range block number +select heap_force_freeze('htab'::regclass, + ARRAY['(0, 1)', '(0, 3)', '(1, 1)']::tid[]); +NOTICE: skipping block 1 for relation "htab" because the block number is out of range + heap_force_freeze +------------------- + +(1 row) + +-- we should now have two frozen tuples +select ctid, xmax from htab where xmin = 2; + ctid | xmax +-------+------ + (0,1) | 0 + (0,3) | 0 +(2 rows) + +-- out-of-range TIDs should be skipped +select heap_force_freeze('htab'::regclass, ARRAY['(0, 0)', '(0, 6)']::tid[]); +NOTICE: skipping tid (0, 0) for relation "htab" because the item number is out of range +NOTICE: skipping tid (0, 6) for relation "htab" because the item number is out of range + heap_force_freeze +------------------- + +(1 row) + +-- set up a new table with a redirected line pointer +-- use a temp table so that vacuum behavior doesn't depend on global xmin +create temp table htab2(a int); +insert into htab2 values (100); +update htab2 set a = 200; +vacuum htab2; +-- redirected TIDs should be skipped +select heap_force_kill('htab2'::regclass, ARRAY['(0, 1)']::tid[]); +NOTICE: skipping tid (0, 1) for relation "htab2" because it redirects to item 2 + heap_force_kill +----------------- + +(1 row) + +-- now create an unused line pointer +select ctid from htab2; + ctid +------- + (0,2) +(1 row) + +update htab2 set a = 300; +select ctid from htab2; + ctid +------- + (0,3) +(1 row) + +vacuum freeze htab2; +-- unused TIDs should be skipped +select heap_force_kill('htab2'::regclass, ARRAY['(0, 2)']::tid[]); +NOTICE: skipping tid (0, 2) for relation "htab2" because it is marked unused + heap_force_kill +----------------- + +(1 row) + +-- multidimensional TID array should be rejected +select heap_force_kill('htab2'::regclass, ARRAY[['(0, 2)']]::tid[]); +ERROR: argument must be empty or one-dimensional array +-- TID array with nulls should be rejected +select heap_force_kill('htab2'::regclass, ARRAY[NULL]::tid[]); +ERROR: array must not contain nulls +-- but we should be able to kill the one tuple we have +select heap_force_kill('htab2'::regclass, ARRAY['(0, 3)']::tid[]); + heap_force_kill +----------------- + +(1 row) + +-- materialized view. +-- note that we don't commit the transaction, so autovacuum can't interfere. +begin; +create materialized view mvw as select a from generate_series(1, 3) a; +select * from mvw where xmin = 2; + a +--- +(0 rows) + +select heap_force_freeze('mvw'::regclass, ARRAY['(0, 3)']::tid[]); + heap_force_freeze +------------------- + +(1 row) + +select * from mvw where xmin = 2; + a +--- + 3 +(1 row) + +select heap_force_kill('mvw'::regclass, ARRAY['(0, 3)']::tid[]); + heap_force_kill +----------------- + +(1 row) + +select * from mvw where ctid = '(0, 3)'; + a +--- +(0 rows) + +rollback; +-- check that it fails on an unsupported relkind +create view vw as select 1; +select heap_force_kill('vw'::regclass, ARRAY['(0, 1)']::tid[]); +ERROR: "vw" is not a table, materialized view, or TOAST table +select heap_force_freeze('vw'::regclass, ARRAY['(0, 1)']::tid[]); +ERROR: "vw" is not a table, materialized view, or TOAST table +-- cleanup. +drop view vw; +drop extension pg_surgery; diff --git a/contrib/pg_surgery/heap_surgery.c b/contrib/pg_surgery/heap_surgery.c new file mode 100644 index 0000000000000..eb96b4bb36d84 --- /dev/null +++ b/contrib/pg_surgery/heap_surgery.c @@ -0,0 +1,428 @@ +/*------------------------------------------------------------------------- + * + * heap_surgery.c + * Functions to perform surgery on the damaged heap table. + * + * Copyright (c) 2020, PostgreSQL Global Development Group + * + * IDENTIFICATION + * contrib/pg_surgery/heap_surgery.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/heapam.h" +#include "access/visibilitymap.h" +#include "catalog/pg_am_d.h" +#include "catalog/pg_proc_d.h" +#include "miscadmin.h" +#include "storage/bufmgr.h" +#include "utils/acl.h" +#include "utils/rel.h" + +PG_MODULE_MAGIC; + +/* Options to forcefully change the state of a heap tuple. */ +typedef enum HeapTupleForceOption +{ + HEAP_FORCE_KILL, + HEAP_FORCE_FREEZE +} HeapTupleForceOption; + +PG_FUNCTION_INFO_V1(heap_force_kill); +PG_FUNCTION_INFO_V1(heap_force_freeze); + +static int32 tidcmp(const void *a, const void *b); +static Datum heap_force_common(FunctionCallInfo fcinfo, + HeapTupleForceOption heap_force_opt); +static void sanity_check_tid_array(ArrayType *ta, int *ntids); +static void sanity_check_relation(Relation rel); +static BlockNumber find_tids_one_page(ItemPointer tids, int ntids, + OffsetNumber *next_start_ptr); + +/*------------------------------------------------------------------------- + * heap_force_kill() + * + * Force kill the tuple(s) pointed to by the item pointer(s) stored in the + * given TID array. + * + * Usage: SELECT heap_force_kill(regclass, tid[]); + *------------------------------------------------------------------------- + */ +Datum +heap_force_kill(PG_FUNCTION_ARGS) +{ + PG_RETURN_DATUM(heap_force_common(fcinfo, HEAP_FORCE_KILL)); +} + +/*------------------------------------------------------------------------- + * heap_force_freeze() + * + * Force freeze the tuple(s) pointed to by the item pointer(s) stored in the + * given TID array. + * + * Usage: SELECT heap_force_freeze(regclass, tid[]); + *------------------------------------------------------------------------- + */ +Datum +heap_force_freeze(PG_FUNCTION_ARGS) +{ + PG_RETURN_DATUM(heap_force_common(fcinfo, HEAP_FORCE_FREEZE)); +} + +/*------------------------------------------------------------------------- + * heap_force_common() + * + * Common code for heap_force_kill and heap_force_freeze + *------------------------------------------------------------------------- + */ +static Datum +heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt) +{ + Oid relid = PG_GETARG_OID(0); + ArrayType *ta = PG_GETARG_ARRAYTYPE_P_COPY(1); + ItemPointer tids; + int ntids, + nblocks; + Relation rel; + OffsetNumber curr_start_ptr, + next_start_ptr; + bool include_this_tid[MaxHeapTuplesPerPage]; + + if (RecoveryInProgress()) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("recovery is in progress"), + errhint("heap surgery functions cannot be executed during recovery."))); + + /* Check inputs. */ + sanity_check_tid_array(ta, &ntids); + + rel = relation_open(relid, RowExclusiveLock); + + /* Check target relation. */ + sanity_check_relation(rel); + + tids = ((ItemPointer) ARR_DATA_PTR(ta)); + + /* + * If there is more than one TID in the array, sort them so that we can + * easily fetch all the TIDs belonging to one particular page from the + * array. + */ + if (ntids > 1) + qsort((void *) tids, ntids, sizeof(ItemPointerData), tidcmp); + + curr_start_ptr = next_start_ptr = 0; + nblocks = RelationGetNumberOfBlocks(rel); + + /* + * Loop, performing the necessary actions for each block. + */ + while (next_start_ptr != ntids) + { + Buffer buf; + Buffer vmbuf = InvalidBuffer; + Page page; + BlockNumber blkno; + OffsetNumber curoff; + OffsetNumber maxoffset; + int i; + bool did_modify_page = false; + bool did_modify_vm = false; + + CHECK_FOR_INTERRUPTS(); + + /* + * Find all the TIDs belonging to one particular page starting from + * next_start_ptr and process them one by one. + */ + blkno = find_tids_one_page(tids, ntids, &next_start_ptr); + + /* Check whether the block number is valid. */ + if (blkno >= nblocks) + { + /* Update the current_start_ptr before moving to the next page. */ + curr_start_ptr = next_start_ptr; + + ereport(NOTICE, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("skipping block %u for relation \"%s\" because the block number is out of range", + blkno, RelationGetRelationName(rel)))); + continue; + } + + buf = ReadBuffer(rel, blkno); + LockBufferForCleanup(buf); + + page = BufferGetPage(buf); + + maxoffset = PageGetMaxOffsetNumber(page); + + /* + * Figure out which TIDs we are going to process and which ones we are + * going to skip. + */ + memset(include_this_tid, 0, sizeof(include_this_tid)); + for (i = curr_start_ptr; i < next_start_ptr; i++) + { + OffsetNumber offno = ItemPointerGetOffsetNumberNoCheck(&tids[i]); + ItemId itemid; + + /* Check whether the offset number is valid. */ + if (offno == InvalidOffsetNumber || offno > maxoffset) + { + ereport(NOTICE, + errmsg("skipping tid (%u, %u) for relation \"%s\" because the item number is out of range", + blkno, offno, RelationGetRelationName(rel))); + continue; + } + + itemid = PageGetItemId(page, offno); + + /* Only accept an item ID that is used. */ + if (ItemIdIsRedirected(itemid)) + { + ereport(NOTICE, + errmsg("skipping tid (%u, %u) for relation \"%s\" because it redirects to item %u", + blkno, offno, RelationGetRelationName(rel), + ItemIdGetRedirect(itemid))); + continue; + } + else if (ItemIdIsDead(itemid)) + { + ereport(NOTICE, + (errmsg("skipping tid (%u, %u) for relation \"%s\" because it is marked dead", + blkno, offno, RelationGetRelationName(rel)))); + continue; + } + else if (!ItemIdIsUsed(itemid)) + { + ereport(NOTICE, + (errmsg("skipping tid (%u, %u) for relation \"%s\" because it is marked unused", + blkno, offno, RelationGetRelationName(rel)))); + continue; + } + + /* Mark it for processing. */ + Assert(offno < MaxHeapTuplesPerPage); + include_this_tid[offno] = true; + } + + /* + * Before entering the critical section, pin the visibility map page + * if it appears to be necessary. + */ + if (heap_force_opt == HEAP_FORCE_KILL && PageIsAllVisible(page)) + visibilitymap_pin(rel, blkno, &vmbuf); + + /* No ereport(ERROR) from here until all the changes are logged. */ + START_CRIT_SECTION(); + + for (curoff = FirstOffsetNumber; curoff <= maxoffset; + curoff = OffsetNumberNext(curoff)) + { + ItemId itemid; + + if (!include_this_tid[curoff]) + continue; + + itemid = PageGetItemId(page, curoff); + Assert(ItemIdIsNormal(itemid)); + + did_modify_page = true; + + if (heap_force_opt == HEAP_FORCE_KILL) + { + ItemIdSetDead(itemid); + + /* + * If the page is marked all-visible, we must clear + * PD_ALL_VISIBLE flag on the page header and an all-visible + * bit on the visibility map corresponding to the page. + */ + if (PageIsAllVisible(page)) + { + PageClearAllVisible(page); + visibilitymap_clear(rel, blkno, vmbuf, + VISIBILITYMAP_VALID_BITS); + did_modify_vm = true; + } + } + else + { + HeapTupleHeader htup; + + Assert(heap_force_opt == HEAP_FORCE_FREEZE); + + htup = (HeapTupleHeader) PageGetItem(page, itemid); + + /* + * Reset all visibility-related fields of the tuple. This + * logic should mimic heap_execute_freeze_tuple(), but we + * choose to reset xmin and ctid just to be sure that no + * potentially-garbled data is left behind. + */ + ItemPointerSet(&htup->t_ctid, blkno, curoff); + HeapTupleHeaderSetXmin(htup, FrozenTransactionId); + HeapTupleHeaderSetXmax(htup, InvalidTransactionId); + if (htup->t_infomask & HEAP_MOVED) + { + if (htup->t_infomask & HEAP_MOVED_OFF) + HeapTupleHeaderSetXvac(htup, InvalidTransactionId); + else + HeapTupleHeaderSetXvac(htup, FrozenTransactionId); + } + + /* + * Clear all the visibility-related bits of this tuple and + * mark it as frozen. Also, get rid of HOT_UPDATED and + * KEYS_UPDATES bits. + */ + htup->t_infomask &= ~HEAP_XACT_MASK; + htup->t_infomask |= (HEAP_XMIN_FROZEN | HEAP_XMAX_INVALID); + htup->t_infomask2 &= ~HEAP_HOT_UPDATED; + htup->t_infomask2 &= ~HEAP_KEYS_UPDATED; + } + } + + /* + * If the page was modified, only then, we mark the buffer dirty or do + * the WAL logging. + */ + if (did_modify_page) + { + /* Mark buffer dirty before we write WAL. */ + MarkBufferDirty(buf); + + /* XLOG stuff */ + if (RelationNeedsWAL(rel)) + log_newpage_buffer(buf, true); + } + + /* WAL log the VM page if it was modified. */ + if (did_modify_vm && RelationNeedsWAL(rel)) + log_newpage_buffer(vmbuf, false); + + END_CRIT_SECTION(); + + UnlockReleaseBuffer(buf); + + if (vmbuf != InvalidBuffer) + ReleaseBuffer(vmbuf); + + /* Update the current_start_ptr before moving to the next page. */ + curr_start_ptr = next_start_ptr; + } + + relation_close(rel, RowExclusiveLock); + + pfree(ta); + + PG_RETURN_VOID(); +} + +/*------------------------------------------------------------------------- + * tidcmp() + * + * Compare two item pointers, return -1, 0, or +1. + * + * See ItemPointerCompare for details. + * ------------------------------------------------------------------------ + */ +static int32 +tidcmp(const void *a, const void *b) +{ + ItemPointer iptr1 = ((const ItemPointer) a); + ItemPointer iptr2 = ((const ItemPointer) b); + + return ItemPointerCompare(iptr1, iptr2); +} + +/*------------------------------------------------------------------------- + * sanity_check_tid_array() + * + * Perform sanity checks on the given tid array, and set *ntids to the + * number of items in the array. + * ------------------------------------------------------------------------ + */ +static void +sanity_check_tid_array(ArrayType *ta, int *ntids) +{ + if (ARR_HASNULL(ta) && array_contains_nulls(ta)) + ereport(ERROR, + (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("array must not contain nulls"))); + + if (ARR_NDIM(ta) > 1) + ereport(ERROR, + (errcode(ERRCODE_DATA_EXCEPTION), + errmsg("argument must be empty or one-dimensional array"))); + + *ntids = ArrayGetNItems(ARR_NDIM(ta), ARR_DIMS(ta)); +} + +/*------------------------------------------------------------------------- + * sanity_check_relation() + * + * Perform sanity checks on the given relation. + * ------------------------------------------------------------------------ + */ +static void +sanity_check_relation(Relation rel) +{ + if (rel->rd_rel->relkind != RELKIND_RELATION && + rel->rd_rel->relkind != RELKIND_MATVIEW && + rel->rd_rel->relkind != RELKIND_TOASTVALUE) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("\"%s\" is not a table, materialized view, or TOAST table", + RelationGetRelationName(rel)))); + + if (rel->rd_rel->relam != HEAP_TABLE_AM_OID) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("only heap AM is supported"))); + + /* Must be owner of the table or superuser. */ + if (!pg_class_ownercheck(RelationGetRelid(rel), GetUserId())) + aclcheck_error(ACLCHECK_NOT_OWNER, + get_relkind_objtype(rel->rd_rel->relkind), + RelationGetRelationName(rel)); +} + +/*------------------------------------------------------------------------- + * find_tids_one_page() + * + * Find all the tids residing in the same page as tids[next_start_ptr], and + * update next_start_ptr so that it points to the first tid in the next page. + * + * NOTE: The input tids[] array must be sorted. + * ------------------------------------------------------------------------ + */ +static BlockNumber +find_tids_one_page(ItemPointer tids, int ntids, OffsetNumber *next_start_ptr) +{ + int i; + BlockNumber prev_blkno, + blkno; + + prev_blkno = blkno = InvalidBlockNumber; + + for (i = *next_start_ptr; i < ntids; i++) + { + ItemPointerData tid = tids[i]; + + blkno = ItemPointerGetBlockNumberNoCheck(&tid); + + if (i == *next_start_ptr) + prev_blkno = blkno; + + if (prev_blkno != blkno) + break; + } + + *next_start_ptr = i; + return prev_blkno; +} diff --git a/contrib/pg_surgery/pg_surgery--1.0.sql b/contrib/pg_surgery/pg_surgery--1.0.sql new file mode 100644 index 0000000000000..2ae7f228c74b8 --- /dev/null +++ b/contrib/pg_surgery/pg_surgery--1.0.sql @@ -0,0 +1,18 @@ +/* contrib/pg_surgery/pg_surgery--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION pg_surgery" to load this file. \quit + +CREATE FUNCTION heap_force_kill(reloid regclass, tids tid[]) +RETURNS VOID +AS 'MODULE_PATHNAME', 'heap_force_kill' +LANGUAGE C STRICT; + +REVOKE EXECUTE ON FUNCTION heap_force_kill(regclass, tid[]) FROM PUBLIC; + +CREATE FUNCTION heap_force_freeze(reloid regclass, tids tid[]) +RETURNS VOID +AS 'MODULE_PATHNAME', 'heap_force_freeze' +LANGUAGE C STRICT; + +REVOKE EXECUTE ON FUNCTION heap_force_freeze(regclass, tid[]) FROM PUBLIC; \ No newline at end of file diff --git a/contrib/pg_surgery/pg_surgery.control b/contrib/pg_surgery/pg_surgery.control new file mode 100644 index 0000000000000..2bcdad1e3f7fe --- /dev/null +++ b/contrib/pg_surgery/pg_surgery.control @@ -0,0 +1,5 @@ +# pg_surgery extension +comment = 'extension to perform surgery on a damaged relation' +default_version = '1.0' +module_pathname = '$libdir/pg_surgery' +relocatable = true diff --git a/contrib/pg_surgery/sql/heap_surgery.sql b/contrib/pg_surgery/sql/heap_surgery.sql new file mode 100644 index 0000000000000..6526b27535de4 --- /dev/null +++ b/contrib/pg_surgery/sql/heap_surgery.sql @@ -0,0 +1,88 @@ +create extension pg_surgery; + +-- create a normal heap table and insert some rows. +-- use a temp table so that vacuum behavior doesn't depend on global xmin +create temp table htab (a int); +insert into htab values (100), (200), (300), (400), (500); + +-- test empty TID array +select heap_force_freeze('htab'::regclass, ARRAY[]::tid[]); + +-- nothing should be frozen yet +select * from htab where xmin = 2; + +-- freeze forcibly +select heap_force_freeze('htab'::regclass, ARRAY['(0, 4)']::tid[]); + +-- now we should have one frozen tuple +select ctid, xmax from htab where xmin = 2; + +-- kill forcibly +select heap_force_kill('htab'::regclass, ARRAY['(0, 4)']::tid[]); + +-- should be gone now +select * from htab where ctid = '(0, 4)'; + +-- should now be skipped because it's already dead +select heap_force_kill('htab'::regclass, ARRAY['(0, 4)']::tid[]); +select heap_force_freeze('htab'::regclass, ARRAY['(0, 4)']::tid[]); + +-- freeze two TIDs at once while skipping an out-of-range block number +select heap_force_freeze('htab'::regclass, + ARRAY['(0, 1)', '(0, 3)', '(1, 1)']::tid[]); + +-- we should now have two frozen tuples +select ctid, xmax from htab where xmin = 2; + +-- out-of-range TIDs should be skipped +select heap_force_freeze('htab'::regclass, ARRAY['(0, 0)', '(0, 6)']::tid[]); + +-- set up a new table with a redirected line pointer +-- use a temp table so that vacuum behavior doesn't depend on global xmin +create temp table htab2(a int); +insert into htab2 values (100); +update htab2 set a = 200; +vacuum htab2; + +-- redirected TIDs should be skipped +select heap_force_kill('htab2'::regclass, ARRAY['(0, 1)']::tid[]); + +-- now create an unused line pointer +select ctid from htab2; +update htab2 set a = 300; +select ctid from htab2; +vacuum freeze htab2; + +-- unused TIDs should be skipped +select heap_force_kill('htab2'::regclass, ARRAY['(0, 2)']::tid[]); + +-- multidimensional TID array should be rejected +select heap_force_kill('htab2'::regclass, ARRAY[['(0, 2)']]::tid[]); + +-- TID array with nulls should be rejected +select heap_force_kill('htab2'::regclass, ARRAY[NULL]::tid[]); + +-- but we should be able to kill the one tuple we have +select heap_force_kill('htab2'::regclass, ARRAY['(0, 3)']::tid[]); + +-- materialized view. +-- note that we don't commit the transaction, so autovacuum can't interfere. +begin; +create materialized view mvw as select a from generate_series(1, 3) a; + +select * from mvw where xmin = 2; +select heap_force_freeze('mvw'::regclass, ARRAY['(0, 3)']::tid[]); +select * from mvw where xmin = 2; + +select heap_force_kill('mvw'::regclass, ARRAY['(0, 3)']::tid[]); +select * from mvw where ctid = '(0, 3)'; +rollback; + +-- check that it fails on an unsupported relkind +create view vw as select 1; +select heap_force_kill('vw'::regclass, ARRAY['(0, 1)']::tid[]); +select heap_force_freeze('vw'::regclass, ARRAY['(0, 1)']::tid[]); + +-- cleanup. +drop view vw; +drop extension pg_surgery; diff --git a/contrib/pgcrypto/crypt-md5.c b/contrib/pgcrypto/crypt-md5.c index b6466d3e3178b..d38721a1010a4 100644 --- a/contrib/pgcrypto/crypt-md5.c +++ b/contrib/pgcrypto/crypt-md5.c @@ -65,11 +65,17 @@ px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen) /* get the length of the true salt */ sl = ep - sp; - /* */ + /* we need two PX_MD objects */ err = px_find_digest("md5", &ctx); if (err) return NULL; err = px_find_digest("md5", &ctx1); + if (err) + { + /* this path is possible under low-memory circumstances */ + px_md_free(ctx); + return NULL; + } /* The password first, since that is what is most unknown */ px_md_update(ctx, (const uint8 *) pw, strlen(pw)); diff --git a/contrib/pgcrypto/imath.c b/contrib/pgcrypto/imath.c index da4cdede76fb4..9deaa797c1a0f 100644 --- a/contrib/pgcrypto/imath.c +++ b/contrib/pgcrypto/imath.c @@ -478,7 +478,7 @@ mp_int_init(mp_int z) mp_int mp_int_alloc(void) { - mp_int out = px_alloc(sizeof(mpz_t)); + mp_int out = palloc(sizeof(mpz_t)); if (out != NULL) mp_int_init(out); @@ -604,7 +604,7 @@ mp_int_free(mp_int z) assert(z != NULL); mp_int_clear(z); - px_free(z); /* note: NOT s_free() */ + pfree(z); /* note: NOT s_free() */ } mp_result @@ -2212,7 +2212,7 @@ static const mp_digit fill = (mp_digit) 0xdeadbeefabad1dea; static mp_digit * s_alloc(mp_size num) { - mp_digit *out = px_alloc(num * sizeof(mp_digit)); + mp_digit *out = palloc(num * sizeof(mp_digit)); assert(out != NULL); @@ -2235,7 +2235,7 @@ s_realloc(mp_digit *old, mp_size osize, mp_size nsize) new[ix] = fill; memcpy(new, old, osize * sizeof(mp_digit)); #else - mp_digit *new = px_realloc(old, nsize * sizeof(mp_digit)); + mp_digit *new = repalloc(old, nsize * sizeof(mp_digit)); assert(new != NULL); #endif @@ -2246,7 +2246,7 @@ s_realloc(mp_digit *old, mp_size osize, mp_size nsize) static void s_free(void *ptr) { - px_free(ptr); + pfree(ptr); } static bool diff --git a/contrib/pgcrypto/internal-sha2.c b/contrib/pgcrypto/internal-sha2.c index e06f55445effc..9fa940b5bbbbb 100644 --- a/contrib/pgcrypto/internal-sha2.c +++ b/contrib/pgcrypto/internal-sha2.c @@ -85,8 +85,8 @@ int_sha224_free(PX_MD *h) pg_sha224_ctx *ctx = (pg_sha224_ctx *) h->p.ptr; px_memset(ctx, 0, sizeof(*ctx)); - px_free(ctx); - px_free(h); + pfree(ctx); + pfree(h); } /* SHA256 */ @@ -133,8 +133,8 @@ int_sha256_free(PX_MD *h) pg_sha256_ctx *ctx = (pg_sha256_ctx *) h->p.ptr; px_memset(ctx, 0, sizeof(*ctx)); - px_free(ctx); - px_free(h); + pfree(ctx); + pfree(h); } /* SHA384 */ @@ -181,8 +181,8 @@ int_sha384_free(PX_MD *h) pg_sha384_ctx *ctx = (pg_sha384_ctx *) h->p.ptr; px_memset(ctx, 0, sizeof(*ctx)); - px_free(ctx); - px_free(h); + pfree(ctx); + pfree(h); } /* SHA512 */ @@ -229,8 +229,8 @@ int_sha512_free(PX_MD *h) pg_sha512_ctx *ctx = (pg_sha512_ctx *) h->p.ptr; px_memset(ctx, 0, sizeof(*ctx)); - px_free(ctx); - px_free(h); + pfree(ctx); + pfree(h); } /* init functions */ @@ -240,8 +240,7 @@ init_sha224(PX_MD *md) { pg_sha224_ctx *ctx; - ctx = px_alloc(sizeof(*ctx)); - memset(ctx, 0, sizeof(*ctx)); + ctx = palloc0(sizeof(*ctx)); md->p.ptr = ctx; @@ -260,8 +259,7 @@ init_sha256(PX_MD *md) { pg_sha256_ctx *ctx; - ctx = px_alloc(sizeof(*ctx)); - memset(ctx, 0, sizeof(*ctx)); + ctx = palloc0(sizeof(*ctx)); md->p.ptr = ctx; @@ -280,8 +278,7 @@ init_sha384(PX_MD *md) { pg_sha384_ctx *ctx; - ctx = px_alloc(sizeof(*ctx)); - memset(ctx, 0, sizeof(*ctx)); + ctx = palloc0(sizeof(*ctx)); md->p.ptr = ctx; @@ -300,8 +297,7 @@ init_sha512(PX_MD *md) { pg_sha512_ctx *ctx; - ctx = px_alloc(sizeof(*ctx)); - memset(ctx, 0, sizeof(*ctx)); + ctx = palloc0(sizeof(*ctx)); md->p.ptr = ctx; diff --git a/contrib/pgcrypto/internal.c b/contrib/pgcrypto/internal.c index a12d7b4178322..06469d41c0a75 100644 --- a/contrib/pgcrypto/internal.c +++ b/contrib/pgcrypto/internal.c @@ -123,8 +123,8 @@ int_md5_free(PX_MD *h) MD5_CTX *ctx = (MD5_CTX *) h->p.ptr; px_memset(ctx, 0, sizeof(*ctx)); - px_free(ctx); - px_free(h); + pfree(ctx); + pfree(h); } /* SHA1 */ @@ -171,8 +171,8 @@ int_sha1_free(PX_MD *h) SHA1_CTX *ctx = (SHA1_CTX *) h->p.ptr; px_memset(ctx, 0, sizeof(*ctx)); - px_free(ctx); - px_free(h); + pfree(ctx); + pfree(h); } /* init functions */ @@ -182,8 +182,7 @@ init_md5(PX_MD *md) { MD5_CTX *ctx; - ctx = px_alloc(sizeof(*ctx)); - memset(ctx, 0, sizeof(*ctx)); + ctx = palloc0(sizeof(*ctx)); md->p.ptr = ctx; @@ -202,8 +201,7 @@ init_sha1(PX_MD *md) { SHA1_CTX *ctx; - ctx = px_alloc(sizeof(*ctx)); - memset(ctx, 0, sizeof(*ctx)); + ctx = palloc0(sizeof(*ctx)); md->p.ptr = ctx; @@ -246,9 +244,9 @@ intctx_free(PX_Cipher *c) if (cx) { px_memset(cx, 0, sizeof *cx); - px_free(cx); + pfree(cx); } - px_free(c); + pfree(c); } /* @@ -373,8 +371,7 @@ rj_load(int mode) PX_Cipher *c; struct int_ctx *cx; - c = px_alloc(sizeof *c); - memset(c, 0, sizeof *c); + c = palloc0(sizeof *c); c->block_size = rj_block_size; c->key_size = rj_key_size; @@ -384,8 +381,7 @@ rj_load(int mode) c->decrypt = rj_decrypt; c->free = intctx_free; - cx = px_alloc(sizeof *cx); - memset(cx, 0, sizeof *cx); + cx = palloc0(sizeof *cx); cx->mode = mode; c->ptr = cx; @@ -482,8 +478,7 @@ bf_load(int mode) PX_Cipher *c; struct int_ctx *cx; - c = px_alloc(sizeof *c); - memset(c, 0, sizeof *c); + c = palloc0(sizeof *c); c->block_size = bf_block_size; c->key_size = bf_key_size; @@ -493,8 +488,7 @@ bf_load(int mode) c->decrypt = bf_decrypt; c->free = intctx_free; - cx = px_alloc(sizeof *cx); - memset(cx, 0, sizeof *cx); + cx = palloc0(sizeof *cx); cx->mode = mode; c->ptr = cx; return c; @@ -564,7 +558,7 @@ px_find_digest(const char *name, PX_MD **res) for (p = int_digest_list; p->name; p++) if (pg_strcasecmp(p->name, name) == 0) { - h = px_alloc(sizeof(*h)); + h = palloc(sizeof(*h)); p->init(h); *res = h; diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c index 548ef6209745a..bc668a0e802ff 100644 --- a/contrib/pgcrypto/mbuf.c +++ b/contrib/pgcrypto/mbuf.c @@ -70,9 +70,9 @@ mbuf_free(MBuf *mbuf) if (mbuf->own_data) { px_memset(mbuf->data, 0, mbuf->buf_end - mbuf->data); - px_free(mbuf->data); + pfree(mbuf->data); } - px_free(mbuf); + pfree(mbuf); return 0; } @@ -88,7 +88,7 @@ prepare_room(MBuf *mbuf, int block_len) newlen = (mbuf->buf_end - mbuf->data) + ((block_len + STEP + STEP - 1) & -STEP); - newbuf = px_realloc(mbuf->data, newlen); + newbuf = repalloc(mbuf->data, newlen); mbuf->buf_end = newbuf + newlen; mbuf->data_end = newbuf + (mbuf->data_end - mbuf->data); @@ -121,8 +121,8 @@ mbuf_create(int len) if (!len) len = 8192; - mbuf = px_alloc(sizeof *mbuf); - mbuf->data = px_alloc(len); + mbuf = palloc(sizeof *mbuf); + mbuf->data = palloc(len); mbuf->buf_end = mbuf->data + len; mbuf->data_end = mbuf->data; mbuf->read_pos = mbuf->data; @@ -138,7 +138,7 @@ mbuf_create_from_data(uint8 *data, int len) { MBuf *mbuf; - mbuf = px_alloc(sizeof *mbuf); + mbuf = palloc(sizeof *mbuf); mbuf->data = (uint8 *) data; mbuf->buf_end = mbuf->data + len; mbuf->data_end = mbuf->data + len; @@ -219,15 +219,14 @@ pullf_create(PullFilter **pf_p, const PullFilterOps *op, void *init_arg, PullFil res = 0; } - pf = px_alloc(sizeof(*pf)); - memset(pf, 0, sizeof(*pf)); + pf = palloc0(sizeof(*pf)); pf->buflen = res; pf->op = op; pf->priv = priv; pf->src = src; if (pf->buflen > 0) { - pf->buf = px_alloc(pf->buflen); + pf->buf = palloc(pf->buflen); pf->pos = 0; } else @@ -248,11 +247,11 @@ pullf_free(PullFilter *pf) if (pf->buf) { px_memset(pf->buf, 0, pf->buflen); - px_free(pf->buf); + pfree(pf->buf); } px_memset(pf, 0, sizeof(*pf)); - px_free(pf); + pfree(pf); } /* may return less data than asked, 0 means eof */ @@ -386,15 +385,14 @@ pushf_create(PushFilter **mp_p, const PushFilterOps *op, void *init_arg, PushFil res = 0; } - mp = px_alloc(sizeof(*mp)); - memset(mp, 0, sizeof(*mp)); + mp = palloc0(sizeof(*mp)); mp->block_size = res; mp->op = op; mp->priv = priv; mp->next = next; if (mp->block_size > 0) { - mp->buf = px_alloc(mp->block_size); + mp->buf = palloc(mp->block_size); mp->pos = 0; } else @@ -415,11 +413,11 @@ pushf_free(PushFilter *mp) if (mp->buf) { px_memset(mp->buf, 0, mp->block_size); - px_free(mp->buf); + pfree(mp->buf); } px_memset(mp, 0, sizeof(*mp)); - px_free(mp); + pfree(mp); } void diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c index 3057afb339097..ed96e4ce5359b 100644 --- a/contrib/pgcrypto/openssl.c +++ b/contrib/pgcrypto/openssl.c @@ -156,7 +156,7 @@ digest_free(PX_MD *h) OSSLDigest *digest = (OSSLDigest *) h->p.ptr; free_openssl_digest(digest); - px_free(h); + pfree(h); } static int px_openssl_initialized = 0; @@ -202,6 +202,7 @@ px_find_digest(const char *name, PX_MD **res) } if (EVP_DigestInit_ex(ctx, md, NULL) == 0) { + EVP_MD_CTX_destroy(ctx); pfree(digest); return -1; } @@ -214,7 +215,7 @@ px_find_digest(const char *name, PX_MD **res) open_digests = digest; /* The PX_MD object is allocated in the current memory context. */ - h = px_alloc(sizeof(*h)); + h = palloc(sizeof(*h)); h->result_size = digest_result_size; h->block_size = digest_block_size; h->reset = digest_reset; @@ -353,7 +354,7 @@ gen_ossl_free(PX_Cipher *c) OSSLCipher *od = (OSSLCipher *) c->ptr; free_openssl_cipher(od); - px_free(c); + pfree(c); } static int @@ -790,7 +791,7 @@ px_find_cipher(const char *name, PX_Cipher **res) od->evp_ciph = i->ciph->cipher_func(); /* The PX_Cipher is allocated in current memory context */ - c = px_alloc(sizeof(*c)); + c = palloc(sizeof(*c)); c->block_size = gen_ossl_block_size; c->key_size = gen_ossl_key_size; c->iv_size = gen_ossl_iv_size; diff --git a/contrib/pgcrypto/pgp-cfb.c b/contrib/pgcrypto/pgp-cfb.c index 8ae7c8608fb5c..dafa562daa128 100644 --- a/contrib/pgcrypto/pgp-cfb.c +++ b/contrib/pgcrypto/pgp-cfb.c @@ -67,8 +67,7 @@ pgp_cfb_create(PGP_CFB **ctx_p, int algo, const uint8 *key, int key_len, return res; } - ctx = px_alloc(sizeof(*ctx)); - memset(ctx, 0, sizeof(*ctx)); + ctx = palloc0(sizeof(*ctx)); ctx->ciph = ciph; ctx->block_size = px_cipher_block_size(ciph); ctx->resync = resync; @@ -85,7 +84,7 @@ pgp_cfb_free(PGP_CFB *ctx) { px_cipher_free(ctx->ciph); px_memset(ctx, 0, sizeof(*ctx)); - px_free(ctx); + pfree(ctx); } /* diff --git a/contrib/pgcrypto/pgp-compress.c b/contrib/pgcrypto/pgp-compress.c index 3636a662b0769..7e8ddba187351 100644 --- a/contrib/pgcrypto/pgp-compress.c +++ b/contrib/pgcrypto/pgp-compress.c @@ -57,13 +57,13 @@ struct ZipStat static void * z_alloc(void *priv, unsigned n_items, unsigned item_len) { - return px_alloc(n_items * item_len); + return palloc(n_items * item_len); } static void z_free(void *priv, void *addr) { - px_free(addr); + pfree(addr); } static int @@ -80,8 +80,7 @@ compress_init(PushFilter *next, void *init_arg, void **priv_p) /* * init */ - st = px_alloc(sizeof(*st)); - memset(st, 0, sizeof(*st)); + st = palloc0(sizeof(*st)); st->buf_len = ZIP_OUT_BUF; st->stream.zalloc = z_alloc; st->stream.zfree = z_free; @@ -93,7 +92,7 @@ compress_init(PushFilter *next, void *init_arg, void **priv_p) res = deflateInit(&st->stream, ctx->compress_level); if (res != Z_OK) { - px_free(st); + pfree(st); return PXE_PGP_COMPRESSION_ERROR; } *priv_p = st; @@ -174,7 +173,7 @@ compress_free(void *priv) deflateEnd(&st->stream); px_memset(st, 0, sizeof(*st)); - px_free(st); + pfree(st); } static const PushFilterOps @@ -212,8 +211,7 @@ decompress_init(void **priv_p, void *arg, PullFilter *src) && ctx->compress_algo != PGP_COMPR_ZIP) return PXE_PGP_UNSUPPORTED_COMPR; - dec = px_alloc(sizeof(*dec)); - memset(dec, 0, sizeof(*dec)); + dec = palloc0(sizeof(*dec)); dec->buf_len = ZIP_OUT_BUF; *priv_p = dec; @@ -226,7 +224,7 @@ decompress_init(void **priv_p, void *arg, PullFilter *src) res = inflateInit(&dec->stream); if (res != Z_OK) { - px_free(dec); + pfree(dec); px_debug("decompress_init: inflateInit error"); return PXE_PGP_COMPRESSION_ERROR; } @@ -318,7 +316,7 @@ decompress_free(void *priv) inflateEnd(&dec->stream); px_memset(dec, 0, sizeof(*dec)); - px_free(dec); + pfree(dec); } static const PullFilterOps diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c index 3ecbf9c0c2596..d12dcad19452d 100644 --- a/contrib/pgcrypto/pgp-decrypt.c +++ b/contrib/pgcrypto/pgp-decrypt.c @@ -211,7 +211,7 @@ pktreader_free(void *priv) struct PktData *pkt = priv; px_memset(pkt, 0, sizeof(*pkt)); - px_free(pkt); + pfree(pkt); } static struct PullFilterOps pktreader_filter = { @@ -224,13 +224,13 @@ pgp_create_pkt_reader(PullFilter **pf_p, PullFilter *src, int len, int pkttype, PGP_Context *ctx) { int res; - struct PktData *pkt = px_alloc(sizeof(*pkt)); + struct PktData *pkt = palloc(sizeof(*pkt)); pkt->type = pkttype; pkt->len = len; res = pullf_create(pf_p, &pktreader_filter, pkt, src); if (res < 0) - px_free(pkt); + pfree(pkt); return res; } @@ -447,8 +447,7 @@ mdcbuf_init(void **priv_p, void *arg, PullFilter *src) PGP_Context *ctx = arg; struct MDCBufData *st; - st = px_alloc(sizeof(*st)); - memset(st, 0, sizeof(*st)); + st = palloc0(sizeof(*st)); st->buflen = sizeof(st->buf); st->ctx = ctx; *priv_p = st; @@ -576,7 +575,7 @@ mdcbuf_free(void *priv) px_md_free(st->ctx->mdc_ctx); st->ctx->mdc_ctx = NULL; px_memset(st, 0, sizeof(*st)); - px_free(st); + pfree(st); } static struct PullFilterOps mdcbuf_filter = { diff --git a/contrib/pgcrypto/pgp-encrypt.c b/contrib/pgcrypto/pgp-encrypt.c index 46518942ac2a3..f7467c9b1cb1c 100644 --- a/contrib/pgcrypto/pgp-encrypt.c +++ b/contrib/pgcrypto/pgp-encrypt.c @@ -178,8 +178,7 @@ encrypt_init(PushFilter *next, void *init_arg, void **priv_p) if (res < 0) return res; - st = px_alloc(sizeof(*st)); - memset(st, 0, sizeof(*st)); + st = palloc0(sizeof(*st)); st->ciph = ciph; *priv_p = st; @@ -219,7 +218,7 @@ encrypt_free(void *priv) if (st->ciph) pgp_cfb_free(st->ciph); px_memset(st, 0, sizeof(*st)); - px_free(st); + pfree(st); } static const PushFilterOps encrypt_filter = { @@ -241,7 +240,7 @@ pkt_stream_init(PushFilter *next, void *init_arg, void **priv_p) { struct PktStreamStat *st; - st = px_alloc(sizeof(*st)); + st = palloc(sizeof(*st)); st->final_done = 0; st->pkt_block = 1 << STREAM_BLOCK_SHIFT; *priv_p = st; @@ -301,7 +300,7 @@ pkt_stream_free(void *priv) struct PktStreamStat *st = priv; px_memset(st, 0, sizeof(*st)); - px_free(st); + pfree(st); } static const PushFilterOps pkt_stream_filter = { diff --git a/contrib/pgcrypto/pgp-mpi-internal.c b/contrib/pgcrypto/pgp-mpi-internal.c index 0cea514180584..5b94e654521bb 100644 --- a/contrib/pgcrypto/pgp-mpi-internal.c +++ b/contrib/pgcrypto/pgp-mpi-internal.c @@ -60,10 +60,10 @@ mp_px_rand(uint32 bits, mpz_t *res) int last_bits = bits & 7; uint8 *buf; - buf = px_alloc(bytes); + buf = palloc(bytes); if (!pg_strong_random(buf, bytes)) { - px_free(buf); + pfree(buf); return PXE_NO_RANDOM; } @@ -78,7 +78,7 @@ mp_px_rand(uint32 bits, mpz_t *res) mp_int_read_unsigned(res, buf, bytes); - px_free(buf); + pfree(buf); return 0; } diff --git a/contrib/pgcrypto/pgp-mpi.c b/contrib/pgcrypto/pgp-mpi.c index 36a6d361ab316..03be27973bec8 100644 --- a/contrib/pgcrypto/pgp-mpi.c +++ b/contrib/pgcrypto/pgp-mpi.c @@ -44,7 +44,7 @@ pgp_mpi_alloc(int bits, PGP_MPI **mpi) px_debug("pgp_mpi_alloc: unreasonable request: bits=%d", bits); return PXE_PGP_CORRUPT_DATA; } - n = px_alloc(sizeof(*n) + len); + n = palloc(sizeof(*n) + len); n->bits = bits; n->bytes = len; n->data = (uint8 *) (n) + sizeof(*n); @@ -72,7 +72,7 @@ pgp_mpi_free(PGP_MPI *mpi) if (mpi == NULL) return 0; px_memset(mpi, 0, sizeof(*mpi) + mpi->bytes); - px_free(mpi); + pfree(mpi); return 0; } diff --git a/contrib/pgcrypto/pgp-pubenc.c b/contrib/pgcrypto/pgp-pubenc.c index 9fdcf7c31c775..c254a3727506a 100644 --- a/contrib/pgcrypto/pgp-pubenc.c +++ b/contrib/pgcrypto/pgp-pubenc.c @@ -46,12 +46,12 @@ pad_eme_pkcs1_v15(uint8 *data, int data_len, int res_len, uint8 **res_p) if (pad_len < 8) return PXE_BUG; - buf = px_alloc(res_len); + buf = palloc(res_len); buf[0] = 0x02; if (!pg_strong_random(buf + 1, pad_len)) { - px_free(buf); + pfree(buf); return PXE_NO_RANDOM; } @@ -64,7 +64,7 @@ pad_eme_pkcs1_v15(uint8 *data, int data_len, int res_len, uint8 **res_p) if (!pg_strong_random(p, 1)) { px_memset(buf, 0, res_len); - px_free(buf); + pfree(buf); return PXE_NO_RANDOM; } } @@ -97,7 +97,7 @@ create_secmsg(PGP_Context *ctx, PGP_MPI **msg_p, int full_bytes) /* * create "secret message" */ - secmsg = px_alloc(klen + 3); + secmsg = palloc(klen + 3); secmsg[0] = ctx->cipher_algo; memcpy(secmsg + 1, ctx->sess_key, klen); secmsg[klen + 1] = (cksum >> 8) & 0xFF; @@ -118,10 +118,10 @@ create_secmsg(PGP_Context *ctx, PGP_MPI **msg_p, int full_bytes) if (padded) { px_memset(padded, 0, full_bytes); - px_free(padded); + pfree(padded); } px_memset(secmsg, 0, klen + 3); - px_free(secmsg); + pfree(secmsg); if (res >= 0) *msg_p = m; diff --git a/contrib/pgcrypto/pgp-pubkey.c b/contrib/pgcrypto/pgp-pubkey.c index d447e5fd4fed8..9a6561caf9dde 100644 --- a/contrib/pgcrypto/pgp-pubkey.c +++ b/contrib/pgcrypto/pgp-pubkey.c @@ -39,8 +39,7 @@ pgp_key_alloc(PGP_PubKey **pk_p) { PGP_PubKey *pk; - pk = px_alloc(sizeof(*pk)); - memset(pk, 0, sizeof(*pk)); + pk = palloc0(sizeof(*pk)); *pk_p = pk; return 0; } @@ -78,7 +77,7 @@ pgp_key_free(PGP_PubKey *pk) break; } px_memset(pk, 0, sizeof(*pk)); - px_free(pk); + pfree(pk); } static int diff --git a/contrib/pgcrypto/pgp.c b/contrib/pgcrypto/pgp.c index 9b245fee61bbd..3e9c2fef9bc69 100644 --- a/contrib/pgcrypto/pgp.c +++ b/contrib/pgcrypto/pgp.c @@ -200,8 +200,7 @@ pgp_init(PGP_Context **ctx_p) { PGP_Context *ctx; - ctx = px_alloc(sizeof *ctx); - memset(ctx, 0, sizeof *ctx); + ctx = palloc0(sizeof *ctx); ctx->cipher_algo = def_cipher_algo; ctx->s2k_cipher_algo = def_s2k_cipher_algo; @@ -226,7 +225,7 @@ pgp_free(PGP_Context *ctx) if (ctx->pub_key) pgp_key_free(ctx->pub_key); px_memset(ctx, 0, sizeof *ctx); - px_free(ctx); + pfree(ctx); return 0; } diff --git a/contrib/pgcrypto/px-hmac.c b/contrib/pgcrypto/px-hmac.c index 06e5148f1b427..99174d265517b 100644 --- a/contrib/pgcrypto/px-hmac.c +++ b/contrib/pgcrypto/px-hmac.c @@ -57,8 +57,7 @@ hmac_init(PX_HMAC *h, const uint8 *key, unsigned klen) PX_MD *md = h->md; bs = px_md_block_size(md); - keybuf = px_alloc(bs); - memset(keybuf, 0, bs); + keybuf = palloc0(bs); if (klen > bs) { @@ -76,7 +75,7 @@ hmac_init(PX_HMAC *h, const uint8 *key, unsigned klen) } px_memset(keybuf, 0, bs); - px_free(keybuf); + pfree(keybuf); px_md_update(md, h->p.ipad, bs); } @@ -108,7 +107,7 @@ hmac_finish(PX_HMAC *h, uint8 *dst) bs = px_md_block_size(md); hlen = px_md_result_size(md); - buf = px_alloc(hlen); + buf = palloc(hlen); px_md_finish(md, buf); @@ -118,7 +117,7 @@ hmac_finish(PX_HMAC *h, uint8 *dst) px_md_finish(md, dst); px_memset(buf, 0, hlen); - px_free(buf); + pfree(buf); } static void @@ -131,9 +130,9 @@ hmac_free(PX_HMAC *h) px_memset(h->p.ipad, 0, bs); px_memset(h->p.opad, 0, bs); - px_free(h->p.ipad); - px_free(h->p.opad); - px_free(h); + pfree(h->p.ipad); + pfree(h->p.opad); + pfree(h); } @@ -158,9 +157,9 @@ px_find_hmac(const char *name, PX_HMAC **res) return PXE_HASH_UNUSABLE_FOR_HMAC; } - h = px_alloc(sizeof(*h)); - h->p.ipad = px_alloc(bs); - h->p.opad = px_alloc(bs); + h = palloc(sizeof(*h)); + h->p.ipad = palloc(bs); + h->p.opad = palloc(bs); h->md = md; h->result_size = hmac_result_size; diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c index 0f02fb56c4fb9..6a4681dae989e 100644 --- a/contrib/pgcrypto/px.c +++ b/contrib/pgcrypto/px.c @@ -196,8 +196,7 @@ combo_init(PX_Combo *cx, const uint8 *key, unsigned klen, ivs = px_cipher_iv_size(c); if (ivs > 0) { - ivbuf = px_alloc(ivs); - memset(ivbuf, 0, ivs); + ivbuf = palloc0(ivs); if (ivlen > ivs) memcpy(ivbuf, iv, ivs); else @@ -206,15 +205,15 @@ combo_init(PX_Combo *cx, const uint8 *key, unsigned klen, if (klen > ks) klen = ks; - keybuf = px_alloc(ks); + keybuf = palloc0(ks); memset(keybuf, 0, ks); memcpy(keybuf, key, klen); err = px_cipher_init(c, keybuf, klen, ivbuf); if (ivbuf) - px_free(ivbuf); - px_free(keybuf); + pfree(ivbuf); + pfree(keybuf); return err; } @@ -238,7 +237,7 @@ combo_encrypt(PX_Combo *cx, const uint8 *data, unsigned dlen, /* encrypt */ if (bs > 1) { - bbuf = px_alloc(bs * 4); + bbuf = palloc(bs * 4); bpos = dlen % bs; *rlen = dlen - bpos; memcpy(bbuf, data + *rlen, bpos); @@ -283,7 +282,7 @@ combo_encrypt(PX_Combo *cx, const uint8 *data, unsigned dlen, } out: if (bbuf) - px_free(bbuf); + pfree(bbuf); return err; } @@ -351,7 +350,7 @@ combo_free(PX_Combo *cx) if (cx->cipher) px_cipher_free(cx->cipher); px_memset(cx, 0, sizeof(*cx)); - px_free(cx); + pfree(cx); } /* PARSER */ @@ -408,17 +407,14 @@ px_find_combo(const char *name, PX_Combo **res) PX_Combo *cx; - cx = px_alloc(sizeof(*cx)); - memset(cx, 0, sizeof(*cx)); - - buf = px_alloc(strlen(name) + 1); - strcpy(buf, name); + cx = palloc0(sizeof(*cx)); + buf = pstrdup(name); err = parse_cipher_name(buf, &s_cipher, &s_pad); if (err) { - px_free(buf); - px_free(cx); + pfree(buf); + pfree(cx); return err; } @@ -445,7 +441,7 @@ px_find_combo(const char *name, PX_Combo **res) cx->decrypt_len = combo_decrypt_len; cx->free = combo_free; - px_free(buf); + pfree(buf); *res = cx; @@ -454,7 +450,7 @@ px_find_combo(const char *name, PX_Combo **res) err1: if (cx->cipher) px_cipher_free(cx->cipher); - px_free(cx); - px_free(buf); + pfree(cx); + pfree(buf); return PXE_NO_CIPHER; } diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h index 0d4722a04a0a4..5487923edb3e9 100644 --- a/contrib/pgcrypto/px.h +++ b/contrib/pgcrypto/px.h @@ -37,19 +37,6 @@ /* keep debug messages? */ #define PX_DEBUG -/* a way to disable palloc - * - useful if compiled into standalone - */ -#ifndef PX_OWN_ALLOC -#define px_alloc(s) palloc(s) -#define px_realloc(p, s) repalloc(p, s) -#define px_free(p) pfree(p) -#else -void *px_alloc(size_t s); -void *px_realloc(void *p, size_t s); -void px_free(void *p); -#endif - /* max salt returned */ #define PX_MAX_SALT_LEN 128 diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index 08daf26fdf085..2f411cf2f7b1c 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -74,6 +74,7 @@ static unsigned int prep_stmt_number = 0; static bool xact_got_connection = false; /* prototypes of private functions */ +static void make_new_connection(ConnCacheEntry *entry, UserMapping *user); static PGconn *connect_pg_server(ForeignServer *server, UserMapping *user); static void disconnect_pg_server(ConnCacheEntry *entry); static void check_conn_params(const char **keywords, const char **values, UserMapping *user); @@ -108,8 +109,10 @@ PGconn * GetConnection(UserMapping *user, bool will_prep_stmt) { bool found; + bool retry = false; ConnCacheEntry *entry; ConnCacheKey key; + MemoryContext ccxt = CurrentMemoryContext; /* First time through, initialize connection cache hashtable */ if (ConnectionHash == NULL) @@ -170,45 +173,85 @@ GetConnection(UserMapping *user, bool will_prep_stmt) disconnect_pg_server(entry); } - /* - * We don't check the health of cached connection here, because it would - * require some overhead. Broken connection will be detected when the - * connection is actually used. - */ - /* * If cache entry doesn't have a connection, we have to establish a new * connection. (If connect_pg_server throws an error, the cache entry * will remain in a valid empty state, ie conn == NULL.) */ if (entry->conn == NULL) + make_new_connection(entry, user); + + /* + * We check the health of the cached connection here when starting a new + * remote transaction. If a broken connection is detected, we try to + * reestablish a new connection later. + */ + PG_TRY(); { - ForeignServer *server = GetForeignServer(user->serverid); + /* Start a new transaction or subtransaction if needed. */ + begin_remote_xact(entry); + } + PG_CATCH(); + { + MemoryContext ecxt = MemoryContextSwitchTo(ccxt); + ErrorData *errdata = CopyErrorData(); - /* Reset all transient state fields, to be sure all are clean */ - entry->xact_depth = 0; - entry->have_prep_stmt = false; - entry->have_error = false; - entry->changing_xact_state = false; - entry->invalidated = false; - entry->server_hashvalue = - GetSysCacheHashValue1(FOREIGNSERVEROID, - ObjectIdGetDatum(server->serverid)); - entry->mapping_hashvalue = - GetSysCacheHashValue1(USERMAPPINGOID, - ObjectIdGetDatum(user->umid)); - - /* Now try to make the connection */ - entry->conn = connect_pg_server(server, user); - - elog(DEBUG3, "new postgres_fdw connection %p for server \"%s\" (user mapping oid %u, userid %u)", - entry->conn, server->servername, user->umid, user->userid); + /* + * If connection failure is reported when starting a new remote + * transaction (not subtransaction), new connection will be + * reestablished later. + * + * After a broken connection is detected in libpq, any error other + * than connection failure (e.g., out-of-memory) can be thrown + * somewhere between return from libpq and the expected ereport() call + * in pgfdw_report_error(). In this case, since PQstatus() indicates + * CONNECTION_BAD, checking only PQstatus() causes the false detection + * of connection failure. To avoid this, we also verify that the + * error's sqlstate is ERRCODE_CONNECTION_FAILURE. Note that also + * checking only the sqlstate can cause another false detection + * because pgfdw_report_error() may report ERRCODE_CONNECTION_FAILURE + * for any libpq-originated error condition. + */ + if (errdata->sqlerrcode != ERRCODE_CONNECTION_FAILURE || + PQstatus(entry->conn) != CONNECTION_BAD || + entry->xact_depth > 0) + { + MemoryContextSwitchTo(ecxt); + PG_RE_THROW(); + } + + /* Clean up the error state */ + FlushErrorState(); + FreeErrorData(errdata); + errdata = NULL; + + retry = true; } + PG_END_TRY(); /* - * Start a new transaction or subtransaction if needed. + * If a broken connection is detected, disconnect it, reestablish a new + * connection and retry a new remote transaction. If connection failure is + * reported again, we give up getting a connection. */ - begin_remote_xact(entry); + if (retry) + { + Assert(entry->xact_depth == 0); + + ereport(DEBUG3, + (errmsg_internal("could not start remote transaction on connection %p", + entry->conn)), + errdetail_internal("%s", pchomp(PQerrorMessage(entry->conn)))); + + elog(DEBUG3, "closing connection %p to reestablish a new one", + entry->conn); + disconnect_pg_server(entry); + + if (entry->conn == NULL) + make_new_connection(entry, user); + + begin_remote_xact(entry); + } /* Remember if caller will prepare statements */ entry->have_prep_stmt |= will_prep_stmt; @@ -216,6 +259,37 @@ GetConnection(UserMapping *user, bool will_prep_stmt) return entry->conn; } +/* + * Reset all transient state fields in the cached connection entry and + * establish new connection to the remote server. + */ +static void +make_new_connection(ConnCacheEntry *entry, UserMapping *user) +{ + ForeignServer *server = GetForeignServer(user->serverid); + + Assert(entry->conn == NULL); + + /* Reset all transient state fields, to be sure all are clean */ + entry->xact_depth = 0; + entry->have_prep_stmt = false; + entry->have_error = false; + entry->changing_xact_state = false; + entry->invalidated = false; + entry->server_hashvalue = + GetSysCacheHashValue1(FOREIGNSERVEROID, + ObjectIdGetDatum(server->serverid)); + entry->mapping_hashvalue = + GetSysCacheHashValue1(USERMAPPINGOID, + ObjectIdGetDatum(user->umid)); + + /* Now try to make the connection */ + entry->conn = connect_pg_server(server, user); + + elog(DEBUG3, "new postgres_fdw connection %p for server \"%s\" (user mapping oid %u, userid %u)", + entry->conn, server->servername, user->umid, user->userid); +} + /* * Connect to remote server using specified server and user mapping properties. */ diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c index ad37a7422133e..2d44df19fee09 100644 --- a/contrib/postgres_fdw/deparse.c +++ b/contrib/postgres_fdw/deparse.c @@ -2706,7 +2706,6 @@ deparseOpExpr(OpExpr *node, deparse_expr_cxt *context) HeapTuple tuple; Form_pg_operator form; char oprkind; - ListCell *arg; /* Retrieve information about the operator from system catalog. */ tuple = SearchSysCache1(OPEROID, ObjectIdGetDatum(node->opno)); @@ -2716,18 +2715,16 @@ deparseOpExpr(OpExpr *node, deparse_expr_cxt *context) oprkind = form->oprkind; /* Sanity check. */ - Assert((oprkind == 'r' && list_length(node->args) == 1) || - (oprkind == 'l' && list_length(node->args) == 1) || + Assert((oprkind == 'l' && list_length(node->args) == 1) || (oprkind == 'b' && list_length(node->args) == 2)); /* Always parenthesize the expression. */ appendStringInfoChar(buf, '('); - /* Deparse left operand. */ - if (oprkind == 'r' || oprkind == 'b') + /* Deparse left operand, if any. */ + if (oprkind == 'b') { - arg = list_head(node->args); - deparseExpr(lfirst(arg), context); + deparseExpr(linitial(node->args), context); appendStringInfoChar(buf, ' '); } @@ -2735,12 +2732,8 @@ deparseOpExpr(OpExpr *node, deparse_expr_cxt *context) deparseOperatorName(buf, form); /* Deparse right operand. */ - if (oprkind == 'l' || oprkind == 'b') - { - arg = list_tail(node->args); - appendStringInfoChar(buf, ' '); - deparseExpr(lfirst(arg), context); - } + appendStringInfoChar(buf, ' '); + deparseExpr(llast(node->args), context); appendStringInfoChar(buf, ')'); diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index 84bc0ee381715..2d88d0635837b 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -653,14 +653,6 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- Op Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = (- "C 1"))) (3 rows) -EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r) - QUERY PLAN ----------------------------------------------------------------------------------------------------------- - Foreign Scan on public.ft1 t1 - Output: c1, c2, c3, c4, c5, c6, c7, c8 - Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((1::numeric = ("C 1" !))) -(3 rows) - EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------------- @@ -8995,3 +8987,51 @@ PREPARE TRANSACTION 'fdw_tpc'; ERROR: cannot PREPARE a transaction that has operated on postgres_fdw foreign tables ROLLBACK; WARNING: there is no transaction in progress +-- =================================================================== +-- reestablish new connection +-- =================================================================== +-- Terminate the backend having the specified application_name and wait for +-- the termination to complete. +CREATE OR REPLACE PROCEDURE terminate_backend_and_wait(appname text) AS $$ +BEGIN + PERFORM pg_terminate_backend(pid) FROM pg_stat_activity + WHERE application_name = appname; + LOOP + PERFORM * FROM pg_stat_activity WHERE application_name = appname; + EXIT WHEN NOT FOUND; + PERFORM pg_sleep(1), pg_stat_clear_snapshot(); + END LOOP; +END; +$$ LANGUAGE plpgsql; +-- Change application_name of remote connection to special one +-- so that we can easily terminate the connection later. +ALTER SERVER loopback OPTIONS (application_name 'fdw_retry_check'); +SELECT 1 FROM ft1 LIMIT 1; + ?column? +---------- + 1 +(1 row) + +-- Terminate the remote connection. +CALL terminate_backend_and_wait('fdw_retry_check'); +-- This query should detect the broken connection when starting new remote +-- transaction, reestablish new connection, and then succeed. +BEGIN; +SELECT 1 FROM ft1 LIMIT 1; + ?column? +---------- + 1 +(1 row) + +-- If the query detects the broken connection when starting new remote +-- subtransaction, it doesn't reestablish new connection and should fail. +-- The text of the error might vary across platforms, so don't show it. +CALL terminate_backend_and_wait('fdw_retry_check'); +SAVEPOINT s; +\set VERBOSITY sqlstate +SELECT 1 FROM ft1 LIMIT 1; -- should fail +ERROR: 08006 +\set VERBOSITY default +COMMIT; +-- Clean up +DROP PROCEDURE terminate_backend_and_wait(text); diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index a31abce7c9960..9c5aaacc51566 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -451,6 +451,7 @@ static void init_returning_filter(PgFdwDirectModifyState *dmstate, List *fdw_scan_tlist, Index rtindex); static TupleTableSlot *apply_returning_filter(PgFdwDirectModifyState *dmstate, + ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate); static void prepare_query_params(PlanState *node, @@ -2287,9 +2288,10 @@ postgresPlanDirectModify(PlannerInfo *root, } /* - * Update the operation info. + * Update the operation and target relation info. */ fscan->operation = operation; + fscan->resultRelation = resultRelation; /* * Update the fdw_exprs list that will be available to the executor. @@ -2355,7 +2357,7 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags) * Identify which user to do the remote access as. This should match what * ExecCheckRTEPerms() does. */ - rtindex = estate->es_result_relation_info->ri_RangeTableIndex; + rtindex = node->resultRelInfo->ri_RangeTableIndex; rte = exec_rt_fetch(rtindex, estate); userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); @@ -2450,7 +2452,7 @@ postgresIterateDirectModify(ForeignScanState *node) { PgFdwDirectModifyState *dmstate = (PgFdwDirectModifyState *) node->fdw_state; EState *estate = node->ss.ps.state; - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; + ResultRelInfo *resultRelInfo = node->resultRelInfo; /* * If this is the first call after Begin, execute the statement. @@ -2589,8 +2591,8 @@ postgresExplainForeignScan(ForeignScanState *node, ExplainState *es) quote_identifier(relname)); } else - appendStringInfo(relations, "%s", - quote_identifier(relname)); + appendStringInfoString(relations, + quote_identifier(relname)); refname = (char *) list_nth(es->rtable_names, rti - 1); if (refname == NULL) refname = rte->eref->aliasname; @@ -4086,7 +4088,7 @@ get_returning_data(ForeignScanState *node) { PgFdwDirectModifyState *dmstate = (PgFdwDirectModifyState *) node->fdw_state; EState *estate = node->ss.ps.state; - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; + ResultRelInfo *resultRelInfo = node->resultRelInfo; TupleTableSlot *slot = node->ss.ss_ScanTupleSlot; TupleTableSlot *resultSlot; @@ -4141,7 +4143,7 @@ get_returning_data(ForeignScanState *node) if (dmstate->rel) resultSlot = slot; else - resultSlot = apply_returning_filter(dmstate, slot, estate); + resultSlot = apply_returning_filter(dmstate, resultRelInfo, slot, estate); } dmstate->next_tuple++; @@ -4230,10 +4232,10 @@ init_returning_filter(PgFdwDirectModifyState *dmstate, */ static TupleTableSlot * apply_returning_filter(PgFdwDirectModifyState *dmstate, + ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate) { - ResultRelInfo *relInfo = estate->es_result_relation_info; TupleDesc resultTupType = RelationGetDescr(dmstate->resultRel); TupleTableSlot *resultSlot; Datum *values; @@ -4245,7 +4247,7 @@ apply_returning_filter(PgFdwDirectModifyState *dmstate, /* * Use the return tuple slot as a place to store the result tuple. */ - resultSlot = ExecGetReturningSlot(estate, relInfo); + resultSlot = ExecGetReturningSlot(estate, resultRelInfo); /* * Extract all the values of the scan tuple. diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index d452d063430ad..7581c5417b90d 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -307,7 +307,6 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- Nu EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l) -EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r) EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- SubscriptingRef @@ -2654,3 +2653,47 @@ SELECT count(*) FROM ft1; -- error here PREPARE TRANSACTION 'fdw_tpc'; ROLLBACK; + +-- =================================================================== +-- reestablish new connection +-- =================================================================== + +-- Terminate the backend having the specified application_name and wait for +-- the termination to complete. +CREATE OR REPLACE PROCEDURE terminate_backend_and_wait(appname text) AS $$ +BEGIN + PERFORM pg_terminate_backend(pid) FROM pg_stat_activity + WHERE application_name = appname; + LOOP + PERFORM * FROM pg_stat_activity WHERE application_name = appname; + EXIT WHEN NOT FOUND; + PERFORM pg_sleep(1), pg_stat_clear_snapshot(); + END LOOP; +END; +$$ LANGUAGE plpgsql; + +-- Change application_name of remote connection to special one +-- so that we can easily terminate the connection later. +ALTER SERVER loopback OPTIONS (application_name 'fdw_retry_check'); +SELECT 1 FROM ft1 LIMIT 1; + +-- Terminate the remote connection. +CALL terminate_backend_and_wait('fdw_retry_check'); + +-- This query should detect the broken connection when starting new remote +-- transaction, reestablish new connection, and then succeed. +BEGIN; +SELECT 1 FROM ft1 LIMIT 1; + +-- If the query detects the broken connection when starting new remote +-- subtransaction, it doesn't reestablish new connection and should fail. +-- The text of the error might vary across platforms, so don't show it. +CALL terminate_backend_and_wait('fdw_retry_check'); +SAVEPOINT s; +\set VERBOSITY sqlstate +SELECT 1 FROM ft1 LIMIT 1; -- should fail +\set VERBOSITY default +COMMIT; + +-- Clean up +DROP PROCEDURE terminate_backend_and_wait(text); diff --git a/contrib/test_decoding/Makefile b/contrib/test_decoding/Makefile index ed9a3d6c0edee..9a4c76f013645 100644 --- a/contrib/test_decoding/Makefile +++ b/contrib/test_decoding/Makefile @@ -5,9 +5,9 @@ PGFILEDESC = "test_decoding - example of a logical decoding output plugin" REGRESS = ddl xact rewrite toast permissions decoding_in_xact \ decoding_into_rel binary prepared replorigin time messages \ - spill slot truncate stream + spill slot truncate stream stats ISOLATION = mxact delayed_startup ondisk_startup concurrent_ddl_dml \ - oldest_xmin snapshot_transfer subxact_without_top + oldest_xmin snapshot_transfer subxact_without_top concurrent_stream REGRESS_OPTS = --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf ISOLATION_OPTS = --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf diff --git a/contrib/test_decoding/expected/concurrent_stream.out b/contrib/test_decoding/expected/concurrent_stream.out new file mode 100644 index 0000000000000..e731d13d8fa97 --- /dev/null +++ b/contrib/test_decoding/expected/concurrent_stream.out @@ -0,0 +1,19 @@ +Parsed test spec with 2 sessions + +starting permutation: s0_begin s0_ddl s1_ddl s1_begin s1_toast_insert s1_commit s1_get_stream_changes +step s0_begin: BEGIN; +step s0_ddl: CREATE TABLE stream_test1(data text); +step s1_ddl: CREATE TABLE stream_test(data text); +step s1_begin: BEGIN; +step s1_toast_insert: INSERT INTO stream_test SELECT large_val(); +step s1_commit: COMMIT; +step s1_get_stream_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL,NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'stream-changes', '1'); +data + +opening a streamed block for transaction +streaming change for transaction +closing a streamed block for transaction +committing streamed transaction +?column? + +stop diff --git a/contrib/test_decoding/expected/stats.out b/contrib/test_decoding/expected/stats.out new file mode 100644 index 0000000000000..dafca965201d0 --- /dev/null +++ b/contrib/test_decoding/expected/stats.out @@ -0,0 +1,111 @@ +-- predictability +SET synchronous_commit = on; +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + ?column? +---------- + init +(1 row) + +CREATE TABLE stats_test(data text); +-- function to wait for counters to advance +CREATE FUNCTION wait_for_decode_stats(check_reset bool) RETURNS void AS $$ +DECLARE + start_time timestamptz := clock_timestamp(); + updated bool; +BEGIN + -- we don't want to wait forever; loop will exit after 30 seconds + FOR i IN 1 .. 300 LOOP + + -- check to see if all updates have been reset/updated + SELECT CASE WHEN check_reset THEN (spill_txns = 0) + ELSE (spill_txns > 0) + END + INTO updated + FROM pg_stat_replication_slots WHERE slot_name='regression_slot'; + + exit WHEN updated; + + -- wait a little + perform pg_sleep_for('100 milliseconds'); + + -- reset stats snapshot so we can test again + perform pg_stat_clear_snapshot(); + + END LOOP; + + -- report time waited in postmaster log (where it won't change test output) + RAISE LOG 'wait_for_decode_stats delayed % seconds', + extract(epoch from clock_timestamp() - start_time); +END +$$ LANGUAGE plpgsql; +-- spilling the xact +BEGIN; +INSERT INTO stats_test SELECT 'serialize-topbig--1:'||g.i FROM generate_series(1, 5000) g(i); +COMMIT; +SELECT count(*) FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'skip-empty-xacts', '1'); + count +------- + 5002 +(1 row) + +-- Check stats, wait for the stats collector to update. We can't test the +-- exact stats count as that can vary if any background transaction (say by +-- autovacuum) happens in parallel to the main transaction. +SELECT wait_for_decode_stats(false); + wait_for_decode_stats +----------------------- + +(1 row) + +SELECT slot_name, spill_txns > 0 AS spill_txns, spill_count > 0 AS spill_count FROM pg_stat_replication_slots; + slot_name | spill_txns | spill_count +-----------------+------------+------------- + regression_slot | t | t +(1 row) + +-- reset the slot stats, and wait for stats collector to reset +SELECT pg_stat_reset_replication_slot('regression_slot'); + pg_stat_reset_replication_slot +-------------------------------- + +(1 row) + +SELECT wait_for_decode_stats(true); + wait_for_decode_stats +----------------------- + +(1 row) + +SELECT slot_name, spill_txns, spill_count FROM pg_stat_replication_slots; + slot_name | spill_txns | spill_count +-----------------+------------+------------- + regression_slot | 0 | 0 +(1 row) + +-- decode and check stats again. +SELECT count(*) FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'skip-empty-xacts', '1'); + count +------- + 5002 +(1 row) + +SELECT wait_for_decode_stats(false); + wait_for_decode_stats +----------------------- + +(1 row) + +SELECT slot_name, spill_txns > 0 AS spill_txns, spill_count > 0 AS spill_count FROM pg_stat_replication_slots; + slot_name | spill_txns | spill_count +-----------------+------------+------------- + regression_slot | t | t +(1 row) + +DROP FUNCTION wait_for_decode_stats(bool); +DROP TABLE stats_test; +SELECT pg_drop_replication_slot('regression_slot'); + pg_drop_replication_slot +-------------------------- + +(1 row) + diff --git a/contrib/test_decoding/expected/stream.out b/contrib/test_decoding/expected/stream.out index d7e32f8185469..e1c3bc838d5e3 100644 --- a/contrib/test_decoding/expected/stream.out +++ b/contrib/test_decoding/expected/stream.out @@ -29,10 +29,7 @@ COMMIT; SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL,NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'stream-changes', '1'); data ---------------------------------------------------------- - opening a streamed block for transaction streaming message: transactional: 1 prefix: test, sz: 50 - closing a streamed block for transaction - aborting streamed (sub)transaction opening a streamed block for transaction streaming change for transaction streaming change for transaction @@ -56,7 +53,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL,NULL, 'incl streaming change for transaction closing a streamed block for transaction committing streamed transaction -(27 rows) +(24 rows) -- streaming test for toast changes ALTER TABLE stream_test ALTER COLUMN data set storage external; diff --git a/contrib/test_decoding/specs/concurrent_stream.spec b/contrib/test_decoding/specs/concurrent_stream.spec new file mode 100644 index 0000000000000..ad9fde9c28447 --- /dev/null +++ b/contrib/test_decoding/specs/concurrent_stream.spec @@ -0,0 +1,37 @@ +# Test decoding of in-progress transaction containing dml and a concurrent +# transaction with ddl operation. The transaction containing ddl operation +# should not get streamed as it doesn't have any changes. + +setup +{ + SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); + + -- consume DDL + SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS 'select array_agg(md5(g::text))::text from generate_series(1, 80000) g'; +} + +teardown +{ + DROP TABLE IF EXISTS stream_test; + DROP TABLE IF EXISTS stream_test1; + SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot'); +} + +session "s0" +setup { SET synchronous_commit=on; } +step "s0_begin" { BEGIN; } +step "s0_ddl" {CREATE TABLE stream_test1(data text);} + +# The transaction commit for s1_ddl will add the INTERNAL_SNAPSHOT change to +# the currently running s0_ddl and we want to test that s0_ddl should not get +# streamed when user asked to skip-empty-xacts. +session "s1" +setup { SET synchronous_commit=on; } +step "s1_ddl" { CREATE TABLE stream_test(data text); } +step "s1_begin" { BEGIN; } +step "s1_toast_insert" {INSERT INTO stream_test SELECT large_val();} +step "s1_commit" { COMMIT; } +step "s1_get_stream_changes" { SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL,NULL, 'include-xids', '0', 'skip-empty-xacts', '1', 'stream-changes', '1');} + +permutation "s0_begin" "s0_ddl" "s1_ddl" "s1_begin" "s1_toast_insert" "s1_commit" "s1_get_stream_changes" diff --git a/contrib/test_decoding/sql/stats.sql b/contrib/test_decoding/sql/stats.sql new file mode 100644 index 0000000000000..182df84030d0d --- /dev/null +++ b/contrib/test_decoding/sql/stats.sql @@ -0,0 +1,64 @@ +-- predictability +SET synchronous_commit = on; + +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + +CREATE TABLE stats_test(data text); + +-- function to wait for counters to advance +CREATE FUNCTION wait_for_decode_stats(check_reset bool) RETURNS void AS $$ +DECLARE + start_time timestamptz := clock_timestamp(); + updated bool; +BEGIN + -- we don't want to wait forever; loop will exit after 30 seconds + FOR i IN 1 .. 300 LOOP + + -- check to see if all updates have been reset/updated + SELECT CASE WHEN check_reset THEN (spill_txns = 0) + ELSE (spill_txns > 0) + END + INTO updated + FROM pg_stat_replication_slots WHERE slot_name='regression_slot'; + + exit WHEN updated; + + -- wait a little + perform pg_sleep_for('100 milliseconds'); + + -- reset stats snapshot so we can test again + perform pg_stat_clear_snapshot(); + + END LOOP; + + -- report time waited in postmaster log (where it won't change test output) + RAISE LOG 'wait_for_decode_stats delayed % seconds', + extract(epoch from clock_timestamp() - start_time); +END +$$ LANGUAGE plpgsql; + +-- spilling the xact +BEGIN; +INSERT INTO stats_test SELECT 'serialize-topbig--1:'||g.i FROM generate_series(1, 5000) g(i); +COMMIT; +SELECT count(*) FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'skip-empty-xacts', '1'); + +-- Check stats, wait for the stats collector to update. We can't test the +-- exact stats count as that can vary if any background transaction (say by +-- autovacuum) happens in parallel to the main transaction. +SELECT wait_for_decode_stats(false); +SELECT slot_name, spill_txns > 0 AS spill_txns, spill_count > 0 AS spill_count FROM pg_stat_replication_slots; + +-- reset the slot stats, and wait for stats collector to reset +SELECT pg_stat_reset_replication_slot('regression_slot'); +SELECT wait_for_decode_stats(true); +SELECT slot_name, spill_txns, spill_count FROM pg_stat_replication_slots; + +-- decode and check stats again. +SELECT count(*) FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'skip-empty-xacts', '1'); +SELECT wait_for_decode_stats(false); +SELECT slot_name, spill_txns > 0 AS spill_txns, spill_count > 0 AS spill_count FROM pg_stat_replication_slots; + +DROP FUNCTION wait_for_decode_stats(bool); +DROP TABLE stats_test; +SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index 34745150e9ba4..8e33614f14424 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -64,6 +64,10 @@ static void pg_decode_message(LogicalDecodingContext *ctx, Size sz, const char *message); static void pg_decode_stream_start(LogicalDecodingContext *ctx, ReorderBufferTXN *txn); +static void pg_output_stream_start(LogicalDecodingContext *ctx, + TestDecodingData *data, + ReorderBufferTXN *txn, + bool last_write); static void pg_decode_stream_stop(LogicalDecodingContext *ctx, ReorderBufferTXN *txn); static void pg_decode_stream_abort(LogicalDecodingContext *ctx, @@ -583,46 +587,46 @@ pg_decode_message(LogicalDecodingContext *ctx, OutputPluginWrite(ctx, true); } -/* - * We never try to stream any empty xact so we don't need any special handling - * for skip_empty_xacts in streaming mode APIs. - */ static void pg_decode_stream_start(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { TestDecodingData *data = ctx->output_plugin_private; - OutputPluginPrepareWrite(ctx, true); + data->xact_wrote_changes = false; + if (data->skip_empty_xacts) + return; + pg_output_stream_start(ctx, data, txn, true); +} + +static void +pg_output_stream_start(LogicalDecodingContext *ctx, TestDecodingData *data, ReorderBufferTXN *txn, bool last_write) +{ + OutputPluginPrepareWrite(ctx, last_write); if (data->include_xids) appendStringInfo(ctx->out, "opening a streamed block for transaction TXN %u", txn->xid); else - appendStringInfo(ctx->out, "opening a streamed block for transaction"); - OutputPluginWrite(ctx, true); + appendStringInfoString(ctx->out, "opening a streamed block for transaction"); + OutputPluginWrite(ctx, last_write); } -/* - * We never try to stream any empty xact so we don't need any special handling - * for skip_empty_xacts in streaming mode APIs. - */ static void pg_decode_stream_stop(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { TestDecodingData *data = ctx->output_plugin_private; + if (data->skip_empty_xacts && !data->xact_wrote_changes) + return; + OutputPluginPrepareWrite(ctx, true); if (data->include_xids) appendStringInfo(ctx->out, "closing a streamed block for transaction TXN %u", txn->xid); else - appendStringInfo(ctx->out, "closing a streamed block for transaction"); + appendStringInfoString(ctx->out, "closing a streamed block for transaction"); OutputPluginWrite(ctx, true); } -/* - * We never try to stream any empty xact so we don't need any special handling - * for skip_empty_xacts in streaming mode APIs. - */ static void pg_decode_stream_abort(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, @@ -630,18 +634,17 @@ pg_decode_stream_abort(LogicalDecodingContext *ctx, { TestDecodingData *data = ctx->output_plugin_private; + if (data->skip_empty_xacts && !data->xact_wrote_changes) + return; + OutputPluginPrepareWrite(ctx, true); if (data->include_xids) appendStringInfo(ctx->out, "aborting streamed (sub)transaction TXN %u", txn->xid); else - appendStringInfo(ctx->out, "aborting streamed (sub)transaction"); + appendStringInfoString(ctx->out, "aborting streamed (sub)transaction"); OutputPluginWrite(ctx, true); } -/* - * We never try to stream any empty xact so we don't need any special handling - * for skip_empty_xacts in streaming mode APIs. - */ static void pg_decode_stream_commit(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, @@ -649,12 +652,15 @@ pg_decode_stream_commit(LogicalDecodingContext *ctx, { TestDecodingData *data = ctx->output_plugin_private; + if (data->skip_empty_xacts && !data->xact_wrote_changes) + return; + OutputPluginPrepareWrite(ctx, true); if (data->include_xids) appendStringInfo(ctx->out, "committing streamed transaction TXN %u", txn->xid); else - appendStringInfo(ctx->out, "committing streamed transaction"); + appendStringInfoString(ctx->out, "committing streamed transaction"); if (data->include_timestamp) appendStringInfo(ctx->out, " (at %s)", @@ -676,11 +682,18 @@ pg_decode_stream_change(LogicalDecodingContext *ctx, { TestDecodingData *data = ctx->output_plugin_private; + /* output stream start if we haven't yet */ + if (data->skip_empty_xacts && !data->xact_wrote_changes) + { + pg_output_stream_start(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + OutputPluginPrepareWrite(ctx, true); if (data->include_xids) appendStringInfo(ctx->out, "streaming change for TXN %u", txn->xid); else - appendStringInfo(ctx->out, "streaming change for transaction"); + appendStringInfoString(ctx->out, "streaming change for transaction"); OutputPluginWrite(ctx, true); } @@ -722,10 +735,16 @@ pg_decode_stream_truncate(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, { TestDecodingData *data = ctx->output_plugin_private; + if (data->skip_empty_xacts && !data->xact_wrote_changes) + { + pg_output_stream_start(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + OutputPluginPrepareWrite(ctx, true); if (data->include_xids) appendStringInfo(ctx->out, "streaming truncate for TXN %u", txn->xid); else - appendStringInfo(ctx->out, "streaming truncate for transaction"); + appendStringInfoString(ctx->out, "streaming truncate for transaction"); OutputPluginWrite(ctx, true); } diff --git a/doc/src/sgml/amcheck.sgml b/doc/src/sgml/amcheck.sgml index a9df2c1a9d224..25e4bb2bfec2f 100644 --- a/doc/src/sgml/amcheck.sgml +++ b/doc/src/sgml/amcheck.sgml @@ -9,12 +9,11 @@ The amcheck module provides functions that allow you to - verify the logical consistency of the structure of relations. If the - structure appears to be valid, no error is raised. + verify the logical consistency of the structure of relations. - The functions verify various invariants in the + The B-Tree checking functions verify various invariants in the structure of the representation of particular relations. The correctness of the access method functions behind index scans and other important operations relies on these invariants always @@ -24,7 +23,7 @@ collated lexical order). If that particular invariant somehow fails to hold, we can expect binary searches on the affected page to incorrectly guide index scans, resulting in wrong answers to SQL - queries. + queries. If the structure appears to be valid, no error is raised. Verification is performed using the same procedures as those used by @@ -35,7 +34,22 @@ functions. - amcheck functions may only be used by superusers. + Unlike the B-Tree checking functions which report corruption by raising + errors, the heap checking function verify_heapam checks + a table and attempts to return a set of rows, one row per corruption + detected. Despite this, if facilities that + verify_heapam relies upon are themselves corrupted, the + function may be unable to continue and may instead raise an error. + + + Permission to execute amcheck functions may be granted + to non-superusers, but before granting such permissions careful consideration + should be given to data security and privacy concerns. Although the + corruption reports generated by these functions do not focus on the contents + of the corrupted data so much as on the structure of that data and the nature + of the corruptions found, an attacker who gains permission to execute these + functions, particularly if the attacker can also induce corruption, might be + able to infer something of the data itself from such messages. @@ -187,12 +201,221 @@ SET client_min_messages = DEBUG1; + + + + + verify_heapam(relation regclass, + on_error_stop boolean, + check_toast boolean, + skip cstring, + startblock bigint, + endblock bigint, + blkno OUT bigint, + offnum OUT integer, + attnum OUT integer, + msg OUT text) + returns record + + + + + Checks a table for structural corruption, where pages in the relation + contain data that is invalidly formatted, and for logical corruption, + where pages are structurally valid but inconsistent with the rest of the + database cluster. Example usage: + +test=# select * from verify_heapam('mytable', check_toast := true); + blkno | offnum | attnum | msg +-------+--------+--------+-------------------------------------------------------------------------------------------------- + 17 | 12 | | xmin 4294967295 precedes relation freeze threshold 17:1134217582 + 960 | 4 | | data begins at offset 152 beyond the tuple length 58 + 960 | 4 | | tuple data should begin at byte 24, but actually begins at byte 152 (3 attributes, no nulls) + 960 | 5 | | tuple data should begin at byte 24, but actually begins at byte 27 (3 attributes, no nulls) + 960 | 6 | | tuple data should begin at byte 24, but actually begins at byte 16 (3 attributes, no nulls) + 960 | 7 | | tuple data should begin at byte 24, but actually begins at byte 21 (3 attributes, no nulls) + 1147 | 2 | | number of attributes 2047 exceeds maximum expected for table 3 + 1147 | 10 | | tuple data should begin at byte 280, but actually begins at byte 24 (2047 attributes, has nulls) + 1147 | 15 | | number of attributes 67 exceeds maximum expected for table 3 + 1147 | 16 | 1 | attribute 1 with length 4294967295 ends at offset 416848000 beyond total tuple length 58 + 1147 | 18 | 2 | final toast chunk number 0 differs from expected value 6 + 1147 | 19 | 2 | toasted value for attribute 2 missing from toast table + 1147 | 21 | | tuple is marked as only locked, but also claims key columns were updated + 1147 | 22 | | multitransaction ID 1775655 is from before relation cutoff 2355572 +(14 rows) + + As this example shows, the Tuple ID (TID) of the corrupt tuple is given + in the (blkno, offnum) columns, and + for corruptions specific to a particular attribute in the tuple, the + attnum field shows which one. + + + Structural corruption can happen due to faulty storage hardware, or + relation files being overwritten or modified by unrelated software. + This kind of corruption can also be detected with + data page + checksums. + + + Relation pages which are correctly formatted, internally consistent, and + correct relative to their own internal checksums may still contain + logical corruption. As such, this kind of corruption cannot be detected + with checksums. Examples include toasted + values in the main table which lack a corresponding entry in the toast + table, and tuples in the main table with a Transaction ID that is older + than the oldest valid Transaction ID in the database or cluster. + + + Multiple causes of logical corruption have been observed in production + systems, including bugs in the PostgreSQL + server software, faulty and ill-conceived backup and restore tools, and + user error. + + + Corrupt relations are most concerning in live production environments, + precisely the same environments where high risk activities are least + welcome. For this reason, verify_heapam has been + designed to diagnose corruption without undue risk. It cannot guard + against all causes of backend crashes, as even executing the calling + query could be unsafe on a badly corrupted system. Access to catalog tables are performed and could + be problematic if the catalogs themselves are corrupted. + + + The design principle adhered to in verify_heapam is + that, if the rest of the system and server hardware are correct, under + default options, verify_heapam will not crash the + server due merely to structural or logical corruption in the target + table. + + + The check_toast attempts to reconcile the target + table against entries in its corresponding toast table. This option is + disabled by default and is known to be slow. + If the target relation's corresponding toast table or toast index is + corrupt, reconciling the target table against toast values could + conceivably crash the server, although in many cases this would + just produce an error. + + + The following optional arguments are recognized: + + + + on_error_stop + + + If true, corruption checking stops at the end of the first block on + which any corruptions are found. + + + Defaults to false. + + + + + check_toast + + + If true, toasted values are checked gainst the corresponding + TOAST table. + + + Defaults to false. + + + + + skip + + + If not none, corruption checking skips blocks that + are marked as all-visible or all-frozen, as given. + Valid options are all-visible, + all-frozen and none. + + + Defaults to none. + + + + + startblock + + + If specified, corruption checking begins at the specified block, + skipping all previous blocks. It is an error to specify a + startblock outside the range of blocks in the + target table. + + + By default, does not skip any blocks. + + + + + endblock + + + If specified, corruption checking ends at the specified block, + skipping all remaining blocks. It is an error to specify an + endblock outside the range of blocks in the target + table. + + + By default, does not skip any blocks. + + + + + + For each corruption detected, verify_heapam returns + a row with the following columns: + + + + blkno + + + The number of the block containing the corrupt page. + + + + + offnum + + + The OffsetNumber of the corrupt tuple. + + + + + attnum + + + The attribute number of the corrupt column in the tuple, if the + corruption is specific to a column and not the tuple as a whole. + + + + + msg + + + A human readable message describing the corruption in the page. + + + + + + + Optional <parameter>heapallindexed</parameter> Verification - When the heapallindexed argument to + When the heapallindexed argument to B-Tree verification functions is true, an additional phase of verification is performed against the table associated with the target index relation. This consists of a dummy diff --git a/doc/src/sgml/backup.sgml b/doc/src/sgml/backup.sgml index b9331830f7d0a..42a8ed328d886 100644 --- a/doc/src/sgml/backup.sgml +++ b/doc/src/sgml/backup.sgml @@ -177,8 +177,8 @@ pg_dump -h host1 dbname | - After restoring a backup, it is wise to run on each + After restoring a backup, it is wise to run ANALYZE on each database so the query optimizer has useful statistics; see and for more information. @@ -1594,7 +1594,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"' - If a + If a CREATE DATABASE command is executed while a base backup is being taken, and then the template database that the CREATE DATABASE copied is modified while the base backup is still in progress, it is @@ -1607,7 +1607,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"' - + CREATE TABLESPACE commands are WAL-logged with the literal absolute path, and will therefore be replayed as tablespace creations with the same absolute path. This might be undesirable if the log is being diff --git a/doc/src/sgml/bgworker.sgml b/doc/src/sgml/bgworker.sgml index 6e1cf121de08b..7fd673ab54ee4 100644 --- a/doc/src/sgml/bgworker.sgml +++ b/doc/src/sgml/bgworker.sgml @@ -34,14 +34,18 @@ PostgreSQL is started by including the module name in shared_preload_libraries. A module wishing to run a background worker can register it by calling - RegisterBackgroundWorker(BackgroundWorker *worker) - from its _PG_init(). Background workers can also be started - after the system is up and running by calling the function - RegisterDynamicBackgroundWorker(BackgroundWorker - *worker, BackgroundWorkerHandle **handle). Unlike - RegisterBackgroundWorker, which can only be called from within - the postmaster, RegisterDynamicBackgroundWorker must be - called from a regular backend or another background worker. + RegisterBackgroundWorker(BackgroundWorker + *worker) + from its _PG_init() function. + Background workers can also be started + after the system is up and running by calling + RegisterDynamicBackgroundWorker(BackgroundWorker + *worker, BackgroundWorkerHandle + **handle). Unlike + RegisterBackgroundWorker, which can only be called from + within the postmaster process, + RegisterDynamicBackgroundWorker must be called + from a regular backend or another background worker. diff --git a/doc/src/sgml/biblio.sgml b/doc/src/sgml/biblio.sgml index 128072ded9be9..73a21b6add1b2 100644 --- a/doc/src/sgml/biblio.sgml +++ b/doc/src/sgml/biblio.sgml @@ -121,8 +121,8 @@ - Principles of Database and Knowledge - Base Systems + Principles of Database and Knowledge-Base Systems + Classical Database Systems Jeffrey D. diff --git a/doc/src/sgml/bki.sgml b/doc/src/sgml/bki.sgml index 4e696d1d3ed02..62fc9cbea3dc7 100644 --- a/doc/src/sgml/bki.sgml +++ b/doc/src/sgml/bki.sgml @@ -752,8 +752,8 @@ $ perl rewrite_dat_with_prokind.pl pg_proc.dat next token that syntactically cannot belong to the preceding command starts a new one. (Usually you would put a new command on a new line, for clarity.) Tokens can be certain key words, special - characters (parentheses, commas, etc.), numbers, or double-quoted - strings. Everything is case sensitive. + characters (parentheses, commas, etc.), identifiers, numbers, or + single-quoted strings. Everything is case sensitive. @@ -876,7 +876,9 @@ $ perl rewrite_dat_with_prokind.pl pg_proc.dat NULL values can be specified using the special key word _null_. Values that do not look like - identifiers or digit strings must be double quoted. + identifiers or digit strings must be single-quoted. + (To include a single quote in a value, write it twice. + Escape-string-style backslash escapes are allowed in the string, too.) @@ -1046,7 +1048,7 @@ $ perl rewrite_dat_with_prokind.pl pg_proc.dat create test_table 420 (oid = oid, cola = int4, colb = text) open test_table -insert ( 421 1 "value1" ) +insert ( 421 1 'value 1' ) insert ( 422 2 _null_ ) close test_table diff --git a/doc/src/sgml/btree.sgml b/doc/src/sgml/btree.sgml index 435b7cb24da94..bb395e6a85c15 100644 --- a/doc/src/sgml/btree.sgml +++ b/doc/src/sgml/btree.sgml @@ -263,7 +263,7 @@ - inrange + in_range in_range support functions @@ -642,7 +642,7 @@ options(relopts local_relopts *) returns Deduplication works by periodically merging groups of duplicate - tuples together, forming a single posting list tuple for each + tuples together, forming a single posting list tuple for each group. The column key value(s) only appear once in this representation. This is followed by a sorted array of TIDs that point to rows in the table. This diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index 508bea3bc6442..5bd54cb21832e 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -639,8 +639,8 @@ - New aggregate functions are registered with the + New aggregate functions are registered with the CREATE AGGREGATE command. See for more information about writing aggregate functions and the meaning of the transition functions, etc. @@ -1161,7 +1161,7 @@ attstattarget controls the level of detail of statistics accumulated for this column by - . + ANALYZE. A zero value indicates that no statistics should be collected. A negative value says to use the system default statistics target. The exact meaning of positive values is data type-dependent. @@ -1525,7 +1525,7 @@ Role can log in. That is, this role can be given as the initial - session authorization identifier + session authorization identifier. @@ -1966,9 +1966,9 @@ SCRAM-SHA-256$<iteration count>:&l Size of the on-disk representation of this table in pages (of size BLCKSZ). This is only an estimate used by the - planner. It is updated by , - , and a few DDL commands such as - . + planner. It is updated by VACUUM, + ANALYZE, and a few DDL commands such as + CREATE INDEX. @@ -1978,9 +1978,9 @@ SCRAM-SHA-256$<iteration count>:&l Number of live rows in the table. This is only an estimate used by - the planner. It is updated by , - , and a few DDL commands such as - . + the planner. It is updated by VACUUM, + ANALYZE, and a few DDL commands such as + CREATE INDEX. If the table has never yet been vacuumed or analyzed, reltuples contains -1 indicating that the row count is @@ -1995,9 +1995,9 @@ SCRAM-SHA-256$<iteration count>:&l Number of pages that are marked all-visible in the table's visibility map. This is only an estimate used by the - planner. It is updated by , - , and a few DDL commands such as - . + planner. It is updated by VACUUM, + ANALYZE, and a few DDL commands such as + CREATE INDEX. @@ -2241,8 +2241,8 @@ SCRAM-SHA-256$<iteration count>:&l lazily: they are guaranteed to be true if that's the correct state, but may not be reset to false immediately when the condition is no longer true. For example, relhasindex is set by - , but it is never cleared by - . Instead, clears + CREATE INDEX, but it is never cleared by + DROP INDEX. Instead, VACUUM clears relhasindex if it finds the table has no indexes. This arrangement avoids race conditions and improves concurrency. @@ -2848,8 +2848,8 @@ SCRAM-SHA-256$<iteration count>:&l The catalog pg_database stores information about - the available databases. Databases are created with the command. + the available databases. Databases are created with the CREATE DATABASE command. Consult for details about the meaning of some of the parameters. @@ -3425,7 +3425,7 @@ SCRAM-SHA-256$<iteration count>:&l the referenced object (see pg_extension). The dependent object can be dropped only via - on the referenced object. + DROP EXTENSION on the referenced object. Functionally this dependency type acts the same as an INTERNAL dependency, but it's kept separate for clarity and to simplify pg_dump. @@ -3492,7 +3492,7 @@ SCRAM-SHA-256$<iteration count>:&l The catalog pg_description stores optional descriptions (comments) for each database object. Descriptions can be manipulated - with the command and viewed with + with the COMMENT command and viewed with psql's \d commands. Descriptions of many built-in system objects are provided in the initial contents of pg_description. @@ -4285,7 +4285,7 @@ SCRAM-SHA-256$<iteration count>:&l If true, the index is currently valid for queries. False means the index is possibly incomplete: it must still be modified by - / operations, but it cannot safely + INSERT/UPDATE operations, but it cannot safely be used for queries. If it is unique, the uniqueness property is not guaranteed true either. @@ -4309,7 +4309,7 @@ SCRAM-SHA-256$<iteration count>:&l If true, the index is currently ready for inserts. False means the - index must be ignored by / + index must be ignored by INSERT/UPDATE operations. @@ -4504,11 +4504,11 @@ SCRAM-SHA-256$<iteration count>:&l Objects can have initial privileges either by having those privileges set when the system is initialized (by initdb) or when the - object is created during a and the - extension script sets initial privileges using the + object is created during a CREATE EXTENSION and the + extension script sets initial privileges using the GRANT system. Note that the system will automatically handle recording of the privileges during the extension script and that extension authors need - only use the and + only use the GRANT and REVOKE statements in their script to have the privileges recorded. The privtype column indicates if the initial privilege was set by initdb or during a @@ -5159,8 +5159,8 @@ SCRAM-SHA-256$<iteration count>:&l oprkind char - b = infix (both), l = prefix - (left), r = postfix (right) + b = infix operator (both), + or l = prefix operator (left) @@ -5188,7 +5188,7 @@ SCRAM-SHA-256$<iteration count>:&l (references pg_type.oid) - Type of the left operand + Type of the left operand (0 if none) @@ -5266,7 +5266,7 @@ SCRAM-SHA-256$<iteration count>:&l - Unused column contain zeroes. For example, oprleft + Unused columns contain zeroes. For example, oprleft is zero for a prefix operator. @@ -5875,8 +5875,9 @@ SCRAM-SHA-256$<iteration count>:&l An array with the data types of the function arguments. This includes only input arguments (including INOUT and - VARIADIC arguments), and thus represents - the call signature of the function. + VARIADIC arguments), as well as + OUT parameters of procedures, and thus represents + the call signature of the function or procedure. @@ -6481,7 +6482,7 @@ SCRAM-SHA-256$<iteration count>:&l The catalog pg_seclabel stores security labels on database objects. Security labels can be manipulated - with the command. For an easier + with the SECURITY LABEL command. For an easier way to view security labels, see . @@ -6855,7 +6856,7 @@ SCRAM-SHA-256$<iteration count>:&l The catalog pg_shdescription stores optional descriptions (comments) for shared database objects. Descriptions can be - manipulated with the command and viewed with + manipulated with the COMMENT command and viewed with psql's \d commands. @@ -6931,7 +6932,7 @@ SCRAM-SHA-256$<iteration count>:&l The catalog pg_shseclabel stores security labels on shared database objects. Security labels can be manipulated - with the command. For an easier + with the SECURITY LABEL command. For an easier way to view security labels, see . @@ -7015,7 +7016,7 @@ SCRAM-SHA-256$<iteration count>:&l The catalog pg_statistic stores statistical data about the contents of the database. Entries are - created by + created by ANALYZE and subsequently used by the query planner. Note that all the statistical data is inherently approximate, even assuming that it is up-to-date. @@ -7223,7 +7224,7 @@ SCRAM-SHA-256$<iteration count>:&l The catalog pg_statistic_ext holds definitions of extended planner statistics. Each row in this catalog corresponds to a statistics object - created with . + created with CREATE STATISTICS. @@ -7296,9 +7297,10 @@ SCRAM-SHA-256$<iteration count>:&l stxstattarget controls the level of detail of statistics accumulated for this statistics object by - . + ANALYZE. A zero value indicates that no statistics should be collected. - A negative value says to use the system default statistics target. + A negative value says to use the maximum of the statistics targets of + the referenced columns, if set, or the system default statistics target. Positive values of stxstattarget determine the target number of most common values to collect. @@ -7336,9 +7338,9 @@ SCRAM-SHA-256$<iteration count>:&l The pg_statistic_ext entry is filled in - completely during , but the actual + completely during CREATE STATISTICS, but the actual statistical values are not computed then. - Subsequent commands compute the desired values + Subsequent ANALYZE commands compute the desired values and populate an entry in the pg_statistic_ext_data catalog. @@ -7357,7 +7359,7 @@ SCRAM-SHA-256$<iteration count>:&l holds data for extended planner statistics defined in pg_statistic_ext. Each row in this catalog corresponds to a statistics object - created with . + created with CREATE STATISTICS. @@ -7590,7 +7592,7 @@ SCRAM-SHA-256$<iteration count>:&l This catalog only contains tables known to the subscription after running - either or + either CREATE SUBSCRIPTION or ALTER SUBSCRIPTION ... REFRESH PUBLICATION. @@ -8568,9 +8570,9 @@ SCRAM-SHA-256$<iteration count>:&l The catalog pg_type stores information about data types. Base types and enum types (scalar types) are created with - , and + CREATE TYPE, and domains with - . + CREATE DOMAIN. A composite type is automatically created for each table in the database, to represent the row structure of the table. It is also possible to create composite types with CREATE TYPE AS. @@ -9790,7 +9792,7 @@ SCRAM-SHA-256$<iteration count>:&l - via the + via the DECLARE statement in SQL @@ -10916,7 +10918,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx pg_prepared_statements contains one row for each prepared statement. Rows are added to the view when a new prepared statement is created and removed when a prepared statement - is released (for example, via the command). + is released (for example, via the DEALLOCATE command).
@@ -10950,7 +10952,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The query string submitted by the client to create this prepared statement. For prepared statements created via SQL, - this is the statement submitted by + this is the PREPARE statement submitted by the client. For prepared statements created via the frontend/backend protocol, this is the text of the prepared statement itself. @@ -10984,7 +10986,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx true if the prepared statement was created - via the SQL command; + via the PREPARE SQL command; false if the statement was prepared via the frontend/backend protocol @@ -11966,10 +11968,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The view pg_settings provides access to run-time parameters of the server. It is essentially an alternative - interface to the - and commands. + interface to the SHOW + and SET commands. It also provides access to some facts about each parameter that are - not directly available from SHOW, such as minimum and + not directly available from SHOW, such as minimum and maximum values. @@ -12115,7 +12117,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx reset_valtext - Value that would reset the parameter to + Value that RESET would reset the parameter to in the current session @@ -12248,7 +12250,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx These settings can be set from postgresql.conf, - or within a session via the command; but only superusers + or within a session via the SET command; but only superusers can change them via SET. Changes in postgresql.conf will affect existing sessions only if no session-local value has been established with SET. @@ -12261,7 +12263,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx These settings can be set from postgresql.conf, - or within a session via the command. Any user is + or within a session via the SET command. Any user is allowed to change their session-local value. Changes in postgresql.conf will affect existing sessions only if no session-local value has been established with SET. @@ -12277,9 +12279,9 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The pg_settings view cannot be inserted into or - deleted from, but it can be updated. An applied + deleted from, but it can be updated. An UPDATE applied to a row of pg_settings is equivalent to executing - the command on that named + the SET command on that named parameter. The change only affects the value used by the current session. If an UPDATE is issued within a transaction that is later aborted, the effects of the UPDATE command @@ -12621,7 +12623,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx If greater than zero, the estimated number of distinct values in the column. If less than zero, the negative of the number of distinct values divided by the number of rows. (The negated form is used when - believes that the number of distinct values is + ANALYZE believes that the number of distinct values is likely to increase as the table grows; the positive form is used when the column seems to have a fixed number of possible values.) For example, -1 indicates a unique column in which the number of distinct @@ -12845,7 +12847,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx than zero, the estimated number of distinct values in the combination. If less than zero, the negative of the number of distinct values divided by the number of rows. - (The negated form is used when believes that + (The negated form is used when ANALYZE believes that the number of distinct values is likely to increase as the table grows; the positive form is used when the column seems to have a fixed number of possible values.) For example, -1 indicates a unique combination of diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml index 4b4563c5b9d80..2745b44417654 100644 --- a/doc/src/sgml/charset.sgml +++ b/doc/src/sgml/charset.sgml @@ -791,11 +791,11 @@ CREATE COLLATION german (provider = libc, locale = 'de_DE'); - CREATE COLLATION digitslast (provider = icu, locale = 'en-u-kr-latn-digit'); - CREATE COLLATION digitslast (provider = icu, locale = 'en@colReorder=latn-digit'); + CREATE COLLATION latinlast (provider = icu, locale = 'en-u-kr-grek-latn'); + CREATE COLLATION latinlast (provider = icu, locale = 'en@colReorder=grek-latn'); - Sort digits after Latin letters. (The default is digits before letters.) + Sort Greek letters before Latin ones. (The default is Latin before Greek.) @@ -811,9 +811,9 @@ CREATE COLLATION german (provider = libc, locale = 'de_DE'); - - CREATE COLLATION special (provider = icu, locale = 'en-u-kf-upper-kr-latn-digit'); - CREATE COLLATION special (provider = icu, locale = 'en@colCaseFirst=upper;colReorder=latn-digit'); + + CREATE COLLATION special (provider = icu, locale = 'en-u-kf-upper-kr-grek-latn'); + CREATE COLLATION special (provider = icu, locale = 'en@colCaseFirst=upper;colReorder=grek-latn'); Combines both of the above options. diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml index d62d1a061c9c1..bad3c3469c951 100644 --- a/doc/src/sgml/client-auth.sgml +++ b/doc/src/sgml/client-auth.sgml @@ -2044,13 +2044,10 @@ host ... radius radiusservers="server1,server2" radiussecrets="""secret one"","" - In a pg_hba.conf record specifying certificate - authentication, the authentication option clientcert is - assumed to be verify-ca or verify-full, - and it cannot be turned off since a client certificate is necessary for this - method. What the cert method adds to the basic - clientcert certificate validity test is a check that the - cn attribute matches the database user name. + It is redundant to use the clientcert option with + cert authentication because cert + authentication is effectively trust authentication + with clientcert=verify-full. diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index c4ba49ffaf58a..f043433e3185f 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -189,7 +189,7 @@ shared_buffers = 128MB postgresql.auto.confpostgresql.auto.conf, which has the same format as postgresql.conf but is intended to be edited automatically, not manually. This file holds - settings provided through the command. + settings provided through the ALTER SYSTEM command. This file is read whenever postgresql.conf is, and its settings take effect in the same way. Settings in postgresql.auto.conf override those @@ -221,7 +221,7 @@ shared_buffers = 128MB PostgreSQL provides three SQL commands to establish configuration defaults. - The already-mentioned command + The already-mentioned ALTER SYSTEM command provides a SQL-accessible means of changing global defaults; it is functionally equivalent to editing postgresql.conf. In addition, there are two commands that allow setting of defaults @@ -231,14 +231,14 @@ shared_buffers = 128MB - The command allows global + The ALTER DATABASE command allows global settings to be overridden on a per-database basis. - The command allows both global and + The ALTER ROLE command allows both global and per-database settings to be overridden with user-specific values. @@ -262,7 +262,7 @@ shared_buffers = 128MB - The command allows inspection of the + The SHOW command allows inspection of the current value of all parameters. The corresponding function is current_setting(setting_name text). @@ -270,7 +270,7 @@ shared_buffers = 128MB - The command allows modification of the + The SET command allows modification of the current value of those parameters that can be set locally to a session; it has no effect on other sessions. The corresponding function is @@ -296,7 +296,7 @@ shared_buffers = 128MB - Using on this view, specifically + Using UPDATE on this view, specifically updating the setting column, is the equivalent of issuing SET commands. For example, the equivalent of @@ -1261,10 +1261,11 @@ include_dir 'conf.d' +3DES - The OpenSSL default order for HIGH is problematic - because it orders 3DES higher than AES128. This is wrong because - 3DES offers less security than AES128, and it is also much - slower. +3DES reorders it after all other + The OpenSSL default order for + HIGH is problematic because it orders 3DES + higher than AES128. This is wrong because 3DES offers less + security than AES128, and it is also much slower. + +3DES reorders it after all other HIGH and MEDIUM ciphers. @@ -1284,8 +1285,8 @@ include_dir 'conf.d' - Available cipher suite details will vary across OpenSSL versions. Use - the command + Available cipher suite details will vary across + OpenSSL versions. Use the command openssl ciphers -v 'HIGH:MEDIUM:+3DES:!aNULL' to see actual details for the currently installed OpenSSL version. Note that this list is filtered at run time based on the @@ -1337,7 +1338,8 @@ include_dir 'conf.d' - OpenSSL names for the most common curves are: + OpenSSL names for the most common curves + are: prime256v1 (NIST P-256), secp384r1 (NIST P-384), secp521r1 (NIST P-521). @@ -2365,7 +2367,7 @@ include_dir 'conf.d' When changing this value, consider also adjusting , - , and + , and . @@ -2413,7 +2415,7 @@ include_dir 'conf.d' - + max_parallel_maintenance_workers (integer) max_parallel_maintenance_workers configuration parameter @@ -2462,7 +2464,7 @@ include_dir 'conf.d' Sets the maximum number of workers that the system can support for parallel operations. The default value is 8. When increasing or decreasing this value, consider also adjusting - and + and . Also, note that a setting for this value which is higher than will have no effect, @@ -2701,14 +2703,26 @@ include_dir 'conf.d' - Specifies whether transaction commit will wait for WAL records - to be written to disk before the command returns a success - indication to the client. Valid values are on, - remote_apply, remote_write, local, - and off. The default, and safe, setting - is on. When off, there can be a delay between - when success is reported to the client and when the transaction is - really guaranteed to be safe against a server crash. (The maximum + Specifies how much WAL processing must complete before + the database server returns a success + indication to the client. Valid values are + remote_apply, on + (the default), remote_write, + local, and off. + + + + If synchronous_standby_names is empty, + the only meaningful settings are on and + off; remote_apply, + remote_write and local + all provide the same local synchronization level + as on. The local behavior of all + non-off modes is to wait for local flush of WAL + to disk. In off mode, there is no waiting, + so there can be a delay between when success is reported to the + client and when the transaction is later guaranteed to be safe + against a server crash. (The maximum delay is three times .) Unlike , setting this parameter to off does not create any risk of database inconsistency: an operating @@ -2720,38 +2734,40 @@ include_dir 'conf.d' exact certainty about the durability of a transaction. For more discussion see . + + + If is non-empty, + synchronous_commit also controls whether + transaction commits will wait for their WAL records to be + processed on the standby server(s). + + - If is non-empty, this - parameter also controls whether or not transaction commits will wait - for their WAL records to be replicated to the standby server(s). - When set to on, commits will wait until replies + When set to remote_apply, commits will wait + until replies from the current synchronous standby(s) indicate they + have received the commit record of the transaction and applied + it, so that it has become visible to queries on the standby(s), + and also written to durable storage on the standbys. This will + cause much larger commit delays than previous settings since + it waits for WAL replay. When set to on, + commits wait until replies from the current synchronous standby(s) indicate they have received - the commit record of the transaction and flushed it to disk. This + the commit record of the transaction and flushed it to durable storage. This ensures the transaction will not be lost unless both the primary and all synchronous standbys suffer corruption of their database storage. - When set to remote_apply, commits will wait until replies - from the current synchronous standby(s) indicate they have received the - commit record of the transaction and applied it, so that it has become - visible to queries on the standby(s). When set to remote_write, commits will wait until replies from the current synchronous standby(s) indicate they have - received the commit record of the transaction and written it out to - their operating system. This setting is sufficient to - ensure data preservation even if a standby instance of - PostgreSQL were to crash, but not if the standby - suffers an operating-system-level crash, since the data has not + received the commit record of the transaction and written it to + their file systems. This setting ensures data preservation if a standby instance of + PostgreSQL crashes, but not if the standby + suffers an operating-system-level crash because the data has not necessarily reached durable storage on the standby. - Finally, the setting local causes commits to wait for - local flush to disk, but not for replication. This is not usually + The setting local causes commits to wait for + local flush to disk, but not for replication. This is usually not desirable when synchronous replication is in use, but is provided for completeness. - - If synchronous_standby_names is empty, the settings - on, remote_apply, remote_write - and local all provide the same synchronization level: - transaction commits only wait for local flush to disk. - + This parameter can be changed at any time; the behavior for any one transaction is determined by the setting in effect when it @@ -2761,6 +2777,76 @@ include_dir 'conf.d' asynchronously when the default is the opposite, issue SET LOCAL synchronous_commit TO OFF within the transaction. + + + summarizes the + capabilities of the synchronous_commit settings. + + +
+ synchronous_commit Modes + + + + + + + + + synchronous_commit setting + local durable commit + standby durable commit after PG crash + standby durable commit after OS crash + standby query consistency + + + + + + + remote_apply + + + + + + + + on + + + + + + + + remote_write + + + + + + + + local + + + + + + + + off + + + + + + + + +
+ @@ -3811,7 +3897,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows servers or streaming base backup clients (i.e., the maximum number of simultaneously running WAL sender processes). The default is 10. The value 0 means - replication is disabled. Abrupt streaming client disconnection might + replication is disabled. Abrupt disconnection of a streaming client might leave an orphaned connection slot behind until a timeout is reached, so this parameter should be set slightly higher than the maximum number of expected clients so disconnected clients can immediately @@ -3900,9 +3986,9 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows slots are allowed to retain in the pg_wal directory at checkpoint time. If max_slot_wal_keep_size is -1 (the default), - replication slots retain unlimited amount of WAL files. If - restart_lsn of a replication slot gets behind more than that megabytes - from the current LSN, the standby using the slot may no longer be able + replication slots may retain an unlimited amount of WAL files. Otherwise, if + restart_lsn of a replication slot falls behind the current LSN by more + than the given size, the standby using the slot may no longer be able to continue replication due to removal of required WAL files. You can see the WAL availability of replication slots in pg_replication_slots. @@ -4606,7 +4692,7 @@ ANY num_sync ( ), - running manually, increasing + running ANALYZE manually, increasing the value of the configuration parameter, and increasing the amount of statistics collected for @@ -6845,9 +6931,9 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' - If greater than zero, each bind parameter value reported in - non-error statement-logging messages is trimmed to this many bytes. - Zero disables logging bind parameters with statements. + If greater than zero, each bind parameter value logged with a + non-error statement-logging message is trimmed to this many bytes. + Zero disables logging of bind parameters for non-error statement logs. -1 (the default) allows bind parameters to be logged in full. If this value is specified without units, it is taken as bytes. @@ -8093,7 +8179,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; set it to replica when they are applying replicated changes. The effect of that will be that triggers and rules (that have not been altered from their default configuration) will not fire - on the replica. See the clauses + on the replica. See the ALTER TABLE clauses ENABLE TRIGGER and ENABLE RULE for more information. @@ -8222,10 +8308,10 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; a regular VACUUM in that it visits every page that might contain unfrozen XIDs or MXIDs, not just those that might contain dead tuples. The default is 150 million transactions. Although users can - set this value anywhere from zero to two billions, VACUUM + set this value anywhere from zero to two billion, VACUUM will silently limit the effective value to 95% of , so that a - periodical manual VACUUM has a chance to run before an + periodic manual VACUUM has a chance to run before an anti-wraparound autovacuum is launched for the table. For more information see . @@ -8269,10 +8355,10 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; a regular VACUUM in that it visits every page that might contain unfrozen XIDs or MXIDs, not just those that might contain dead tuples. The default is 150 million multixacts. - Although users can set this value anywhere from zero to two billions, + Although users can set this value anywhere from zero to two billion, VACUUM will silently limit the effective value to 95% of , so that a - periodical manual VACUUM has a chance to run before an + periodic manual VACUUM has a chance to run before an anti-wraparound is launched for the table. For more information see .
@@ -8779,7 +8865,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; This variable specifies one or more shared libraries that are to be preloaded at connection start. It contains a comma-separated list of library names, where each name - is interpreted as for the command. + is interpreted as for the LOAD command. Whitespace between entries is ignored; surround a library name with double quotes if you need to include whitespace or commas in the name. The parameter value only takes effect at the start of the connection. @@ -8830,7 +8916,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; This variable specifies one or more shared libraries that are to be preloaded at connection start. It contains a comma-separated list of library names, where each name - is interpreted as for the command. + is interpreted as for the LOAD command. Whitespace between entries is ignored; surround a library name with double quotes if you need to include whitespace or commas in the name. The parameter value only takes effect at the start of the connection. @@ -8872,7 +8958,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; This variable specifies one or more shared libraries to be preloaded at server start. It contains a comma-separated list of library names, where each name - is interpreted as for the command. + is interpreted as for the LOAD command. Whitespace between entries is ignored; surround a library name with double quotes if you need to include whitespace or commas in the name. This parameter can only be set at server start. If a specified @@ -10082,8 +10168,8 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) - If set, do not trace locks for tables below this OID. (use to avoid - output on system tables) + If set, do not trace locks for tables below this OID (used to avoid + output on system tables). This parameter is only available if the LOCK_DEBUG diff --git a/doc/src/sgml/contrib.sgml b/doc/src/sgml/contrib.sgml index 261a559e81c37..4e833d79ef9be 100644 --- a/doc/src/sgml/contrib.sgml +++ b/doc/src/sgml/contrib.sgml @@ -116,6 +116,7 @@ CREATE EXTENSION module_name; &isn; &lo; <ree; + &oldsnapshot; &pageinspect; &passwordcheck; &pgbuffercache; @@ -125,6 +126,7 @@ CREATE EXTENSION module_name; &pgrowlocks; &pgstatstatements; &pgstattuple; + &pgsurgery; &pgtrgm; &pgvisibility; &postgres-fdw; diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 49ea0003aad8e..c2951854b5ce3 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -2197,7 +2197,7 @@ TIMESTAMP WITH TIME ZONE '2004-10-19 10:23:54+02' - + Special Values @@ -2285,12 +2285,26 @@ TIMESTAMP WITH TIME ZONE '2004-10-19 10:23:54+02' type: CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, LOCALTIME, - LOCALTIMESTAMP. The latter four accept an - optional subsecond precision specification. (See LOCALTIMESTAMP. (See .) Note that these are SQL functions and are not recognized in data input strings. + + + While the input strings now, + today, tomorrow, + and yesterday are fine to use in interactive SQL + commands, they can have surprising behavior when the command is + saved to be executed later, for example in prepared statements, + views, and function definitions. The string can be converted to a + specific time value that continues to be used long after it becomes + stale. Use one of the SQL functions instead in such contexts. + For example, CURRENT_DATE + 1 is safer than + 'tomorrow'::date. + + +
diff --git a/doc/src/sgml/dblink.sgml b/doc/src/sgml/dblink.sgml index eba7fcfb989f3..bcf623117c4bd 100644 --- a/doc/src/sgml/dblink.sgml +++ b/doc/src/sgml/dblink.sgml @@ -166,7 +166,7 @@ SELECT dblink_connect('myconn', 'fdtest'); OK (1 row) -SELECT * FROM dblink('myconn','SELECT * FROM foo') AS t(a int, b text, c text[]); +SELECT * FROM dblink('myconn', 'SELECT * FROM foo') AS t(a int, b text, c text[]); a | b | c ----+---+--------------- 0 | a | {a0,b0,c0} @@ -615,7 +615,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text The SQL command that you wish to execute in the remote database, for example - insert into foo values(0,'a','{"a0","b0","c0"}'). + insert into foo values(0, 'a', '{"a0","b0","c0"}'). @@ -652,7 +652,7 @@ SELECT dblink_connect('dbname=dblink_test_standby'); OK (1 row) -SELECT dblink_exec('insert into foo values(21,''z'',''{"a0","b0","c0"}'');'); +SELECT dblink_exec('insert into foo values(21, ''z'', ''{"a0","b0","c0"}'');'); dblink_exec ----------------- INSERT 943366 1 @@ -664,7 +664,7 @@ SELECT dblink_connect('myconn', 'dbname=regression'); OK (1 row) -SELECT dblink_exec('myconn', 'insert into foo values(21,''z'',''{"a0","b0","c0"}'');'); +SELECT dblink_exec('myconn', 'insert into foo values(21, ''z'', ''{"a0","b0","c0"}'');'); dblink_exec ------------------ INSERT 6432584 1 diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml index 6004dd1def9a0..c4897d68c9b9e 100644 --- a/doc/src/sgml/ddl.sgml +++ b/doc/src/sgml/ddl.sgml @@ -1675,12 +1675,12 @@ REVOKE ALL ON accounts FROM PUBLIC; SELECT - Allows from + Allows SELECT from any column, or specific column(s), of a table, view, materialized view, or other table-like object. - Also allows use of TO. + Also allows use of COPY TO. This privilege is also needed to reference existing column values in - or . + UPDATE or DELETE. For sequences, this privilege also allows use of the currval function. For large objects, this privilege allows the object to be read. @@ -1692,11 +1692,11 @@ REVOKE ALL ON accounts FROM PUBLIC; INSERT - Allows of a new row into a table, view, + Allows INSERT of a new row into a table, view, etc. Can be granted on specific column(s), in which case only those columns may be assigned to in the INSERT command (other columns will therefore receive default values). - Also allows use of FROM. + Also allows use of COPY FROM. @@ -1705,7 +1705,7 @@ REVOKE ALL ON accounts FROM PUBLIC; UPDATE - Allows of any + Allows UPDATE of any column, or specific column(s), of a table, view, etc. (In practice, any nontrivial UPDATE command will require SELECT privilege as well, since it must @@ -1727,7 +1727,7 @@ REVOKE ALL ON accounts FROM PUBLIC; DELETE - Allows of a row from a table, view, etc. + Allows DELETE of a row from a table, view, etc. (In practice, any nontrivial DELETE command will require SELECT privilege as well, since it must reference table columns to determine which rows to delete.) @@ -1739,7 +1739,7 @@ REVOKE ALL ON accounts FROM PUBLIC; TRUNCATE - Allows on a table, view, etc. + Allows TRUNCATE on a table, view, etc. @@ -3370,11 +3370,11 @@ VALUES ('Albany', NULL, NULL, 'NY'); Table inheritance is typically established when the child table is created, using the INHERITS clause of the - + CREATE TABLE statement. Alternatively, a table which is already defined in a compatible way can have a new parent relationship added, using the INHERIT - variant of . + variant of ALTER TABLE. To do this the new child table must already include columns with the same names and types as the columns of the parent. It must also include check constraints with the same names and check expressions as those of the @@ -3406,7 +3406,7 @@ VALUES ('Albany', NULL, NULL, 'NY'); - will + ALTER TABLE will propagate any changes in column data definitions and check constraints down the inheritance hierarchy. Again, dropping columns that are depended on by other tables is only possible when using @@ -3992,7 +3992,7 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 Before running the ATTACH PARTITION command, it is recommended to create a CHECK constraint on the table to be attached matching the desired partition constraint. That way, - the system will be able to skip the scan to validate the implicit + the system will be able to skip the scan which is otherwise needed to validate the implicit partition constraint. Without the CHECK constraint, the table will be scanned to validate the partition constraint while holding an ACCESS EXCLUSIVE lock on that partition @@ -4056,8 +4056,8 @@ ALTER INDEX measurement_city_id_logdate_key - Unique constraints on partitioned tables must include all the - partition key columns. This limitation exists because + Unique constraints (and hence primary keys) on partitioned tables must + include all the partition key columns. This limitation exists because PostgreSQL can only enforce uniqueness in each partition individually. diff --git a/doc/src/sgml/dml.sgml b/doc/src/sgml/dml.sgml index 3844e34a7dcce..971e6a36b5159 100644 --- a/doc/src/sgml/dml.sgml +++ b/doc/src/sgml/dml.sgml @@ -245,7 +245,7 @@ UPDATE mytable SET a = 5, b = 3, c = 1 WHERE a > 0; You use the command to remove rows; the syntax is very similar to the - UPDATE command. For instance, to remove all + command. For instance, to remove all rows from the products table that have a price of 10, use: DELETE FROM products WHERE price = 10; diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index 7266e229a47d8..14dcbdb4e32a7 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -31,7 +31,7 @@ specially marked sections. To build the program, the source code (*.pgc) is first passed through the embedded SQL preprocessor, which converts it to an ordinary C program (*.c), and afterwards it can be processed by a C - compiler. (For details about the compiling and linking see ). + compiler. (For details about the compiling and linking see .) Converted ECPG applications call functions in the libpq library through the embedded SQL library (ecpglib), and communicate with the PostgreSQL server using the normal frontend-backend protocol. @@ -63,11 +63,22 @@ EXEC SQL ...; These statements syntactically take the place of a C statement. Depending on the particular statement, they can appear at the - global level or within a function. Embedded + global level or within a function. + + + + Embedded SQL statements follow the case-sensitivity rules of normal SQL code, and not those of C. Also they allow nested - C-style comments that are part of the SQL standard. The C part of the + C-style comments as per the SQL standard. The C part of the program, however, follows the C standard of not accepting nested comments. + Embedded SQL statements likewise use SQL rules, not + C rules, for parsing quoted strings and identifiers. + (See and + respectively. Note that + ECPG assumes that standard_conforming_strings + is on.) + Of course, the C part of the program follows C quoting rules. @@ -479,10 +490,9 @@ EXEC SQL COMMIT; - For more details about declaration of the cursor, - see , and - see for FETCH command - details. + For more details about declaring a cursor, see ; for more details about fetching rows from a + cursor, see . @@ -7056,7 +7066,7 @@ EXECUTE IMMEDIATE string string - A literal C string or a host variable containing the SQL + A literal string or a host variable containing the SQL statement to be executed. @@ -7064,6 +7074,30 @@ EXECUTE IMMEDIATE string + + Notes + + + In typical usage, the string is a host + variable reference to a string containing a dynamically-constructed + SQL statement. The case of a literal string is not very useful; + you might as well just write the SQL statement directly, without + the extra typing of EXECUTE IMMEDIATE. + + + + If you do use a literal string, keep in mind that any double quotes + you might wish to include in the SQL statement must be written as + octal escapes (\042) not the usual C + idiom \". This is because the string is inside + an EXEC SQL section, so the ECPG lexer parses it + according to SQL rules not C rules. Any embedded backslashes will + later be handled according to C rules; but \" + causes an immediate syntax error because it is seen as ending the + literal. + + + Examples @@ -7378,7 +7412,7 @@ EXEC SQL OPEN :curname1; -PREPARE name FROM string +PREPARE prepared_name FROM string @@ -7411,15 +7445,40 @@ PREPARE name FROM string - A literal C string or a host variable containing a preparable - statement, one of the SELECT, INSERT, UPDATE, or - DELETE. + A literal string or a host variable containing a preparable + SQL statement, one of SELECT, INSERT, UPDATE, or DELETE. + Use question marks (?) for parameter values + to be supplied at execution. + + Notes + + + In typical usage, the string is a host + variable reference to a string containing a dynamically-constructed + SQL statement. The case of a literal string is not very useful; + you might as well just write a direct SQL PREPARE + statement. + + + + If you do use a literal string, keep in mind that any double quotes + you might wish to include in the SQL statement must be written as + octal escapes (\042) not the usual C + idiom \". This is because the string is inside + an EXEC SQL section, so the ECPG lexer parses it + according to SQL rules not C rules. Any embedded backslashes will + later be handled according to C rules; but \" + causes an immediate syntax error because it is seen as ending the + literal. + + + Examples diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml index e486006224cc5..1c37026bb0508 100644 --- a/doc/src/sgml/extend.sgml +++ b/doc/src/sgml/extend.sgml @@ -504,7 +504,7 @@ RETURNS anycompatible AS ... of the extension itself. If the extension includes C code, there will typically also be a shared library file into which the C code has been built. Once you have these files, a simple - command loads the objects into + CREATE EXTENSION command loads the objects into your database. @@ -513,7 +513,7 @@ RETURNS anycompatible AS ... SQL script to load a bunch of loose objects into your database, is that PostgreSQL will then understand that the objects of the extension go together. You can - drop all the objects with a single + drop all the objects with a single DROP EXTENSION command (no need to maintain a separate uninstall script). Even more useful, pg_dump knows that it should not dump the individual member objects of the extension — it will @@ -572,7 +572,7 @@ RETURNS anycompatible AS ... The kinds of SQL objects that can be members of an extension are shown in - the description of . Notably, objects + the description of ALTER EXTENSION. Notably, objects that are database-cluster-wide, such as databases, roles, and tablespaces, cannot be extension members since an extension is only known within one database. (Although an extension script is not prohibited from creating @@ -605,7 +605,7 @@ RETURNS anycompatible AS ... - The command relies on a control + The CREATE EXTENSION command relies on a control file for each extension, which must be named the same as the extension with a suffix of .control, and must be placed in the installation's SHAREDIR/extension directory. There @@ -1373,7 +1373,7 @@ include $(PGXS) Once the files are installed, use the - command to load the objects into + CREATE EXTENSION command to load the objects into any particular database. diff --git a/doc/src/sgml/external-projects.sgml b/doc/src/sgml/external-projects.sgml index 4627adc18fcb1..bf590aba5d9da 100644 --- a/doc/src/sgml/external-projects.sgml +++ b/doc/src/sgml/external-projects.sgml @@ -199,7 +199,7 @@ PL/Lua Lua - + diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml index 72fa1272120d8..9c9293414c581 100644 --- a/doc/src/sgml/fdwhandler.sgml +++ b/doc/src/sgml/fdwhandler.sgml @@ -861,11 +861,15 @@ PlanDirectModify(PlannerInfo *root, To execute the direct modification on the remote server, this function must rewrite the target subplan with a ForeignScan plan node that executes the direct modification on the remote server. The - operation field of the ForeignScan must - be set to the CmdType enumeration appropriately; that is, + operation and resultRelation fields + of the ForeignScan must be set appropriately. + operation must be set to the CmdType + enumeration corresponding to the statement kind (that is, CMD_UPDATE for UPDATE, CMD_INSERT for INSERT, and - CMD_DELETE for DELETE. + CMD_DELETE for DELETE), and the + resultRelation argument must be copied to the + resultRelation field. @@ -925,9 +929,8 @@ IterateDirectModify(ForeignScanState *node); needed for the RETURNING calculation, returning it in a tuple table slot (the node's ScanTupleSlot should be used for this purpose). The data that was actually inserted, updated - or deleted must be stored in the - es_result_relation_info->ri_projectReturning->pi_exprContext->ecxt_scantuple - of the node's EState. + or deleted must be stored in + node->resultRelInfo->ri_projectReturning->pi_exprContext->ecxt_scantuple. Return NULL if no more rows are available. Note that this is called in a short-lived memory context that will be reset between invocations. Create a memory context in diff --git a/doc/src/sgml/file-fdw.sgml b/doc/src/sgml/file-fdw.sgml index 29d79832a33cd..8831f5911f129 100644 --- a/doc/src/sgml/file-fdw.sgml +++ b/doc/src/sgml/file-fdw.sgml @@ -265,9 +265,10 @@ CREATE FOREIGN TABLE pglog ( query_pos integer, location text, application_name text, - backend_type text + backend_type text, + leader_pid integer ) SERVER pglog -OPTIONS ( filename log/pglog.csv', format 'csv' ); +OPTIONS ( filename 'log/pglog.csv', format 'csv' ); diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml index 64b5da0070c5c..38e8aa0bbf90f 100644 --- a/doc/src/sgml/filelist.sgml +++ b/doc/src/sgml/filelist.sgml @@ -36,6 +36,7 @@ + @@ -129,6 +130,7 @@ + @@ -139,6 +141,7 @@ + diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index e2e618791ee01..c99499e52bdba 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -1048,36 +1048,6 @@ repeat('Pg', 4) PgPgPgPg - - - bigint ! - numeric - - - Factorial - (deprecated, use factorial() instead) - - - 5 ! - 120 - - - - - - !! bigint - numeric - - - Factorial as a prefix operator - (deprecated, use factorial() instead) - - - !! 5 - 120 - - - @ numeric_type @@ -7708,6 +7678,15 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); + + + In to_timestamp and to_date, + negative years are treated as signifying BC. If you write both a + negative year and an explicit BC field, you get AD + again. An input of year zero is treated as 1 BC. + + + In to_timestamp and to_date, @@ -8969,6 +8948,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Create date from year, month and day fields + (negative years signify BC) make_date(2013, 7, 15) @@ -8980,13 +8960,13 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); make_interval - make_interval ( year int - , month int - , week int - , day int - , hour int - , min int - , sec double precision + make_interval ( years int + , months int + , weeks int + , days int + , hours int + , mins int + , secs double precision ) interval @@ -9034,6 +9014,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Create timestamp from year, month, day, hour, minute and seconds fields + (negative years signify BC) make_timestamp(2013, 7, 15, 8, 15, 23.5) @@ -9057,12 +9038,18 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Create timestamp with time zone from year, month, day, hour, minute - and seconds fields; if timezone is not - specified, the current time zone is used + and seconds fields (negative years signify BC). + If timezone is not + specified, the current time zone is used; the examples assume the + session time zone is Europe/London make_timestamptz(2013, 7, 15, 8, 15, 23.5) 2013-07-15 08:15:23.5+01 + + + make_timestamptz(2013, 7, 15, 8, 15, 23.5, 'America/New_York') + 2013-07-15 13:15:23.5+01 @@ -10057,20 +10044,22 @@ now() SELECT CURRENT_TIMESTAMP; SELECT now(); -SELECT TIMESTAMP 'now'; -- incorrect for use with DEFAULT +SELECT TIMESTAMP 'now'; -- but see tip below - You do not want to use the third form when specifying a DEFAULT - clause while creating a table. The system will convert now + Do not use the third form when specifying a value to be evaluated later, + for example in a DEFAULT clause for a table column. + The system will convert now to a timestamp as soon as the constant is parsed, so that when the default value is needed, the time of the table creation would be used! The first two forms will not be evaluated until the default value is used, because they are function calls. Thus they will give the desired behavior of defaulting to the time of row insertion. + (See also .) @@ -15045,7 +15034,7 @@ table2-mapping per to_json or to_jsonb. - json_build_array(1,2,'foo',4,5) + json_build_array(1, 2, 'foo', 4, 5) [1, 2, "foo", 4, 5] @@ -15072,7 +15061,7 @@ table2-mapping per to_json or to_jsonb. - json_build_object('foo',1,2,row(3,'bar')) + json_build_object('foo', 1, 2, row(3,'bar')) {"foo" : 1, "2" : {"f1":3,"f2":"bar"}} @@ -15104,7 +15093,7 @@ table2-mapping json_object('{a, 1, b, "def", c, 3.5}') {"a" : "1", "b" : "def", "c" : "3.5"} - json_object('{{a, 1},{b, "def"},{c, 3.5}}') + json_object('{{a, 1}, {b, "def"}, {c, 3.5}}') {"a" : "1", "b" : "def", "c" : "3.5"} @@ -15124,7 +15113,7 @@ table2-mapping the one-argument form. - json_object('{a, b}', '{1,2}') + json_object('{a,b}', '{1,2}') {"a": "1", "b": "2"} @@ -15515,7 +15504,7 @@ table2-mapping create type twoints as (a int, b int); - select * from json_populate_recordset(null::twoints, '[{"a":1,"b":2},{"a":3,"b":4}]') + select * from json_populate_recordset(null::twoints, '[{"a":1,"b":2}, {"a":3,"b":4}]') a | b @@ -15590,7 +15579,7 @@ table2-mapping for json[b]_populate_record. - select * from json_to_recordset('[{"a":1,"b":"foo"},{"a":"2","c":"bar"}]') as x(a int, b text) + select * from json_to_recordset('[{"a":1,"b":"foo"}, {"a":"2","c":"bar"}]') as x(a int, b text) a | b @@ -15628,11 +15617,11 @@ table2-mapping or at the end of the array if it is positive. - jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0,f1}','[2,3,4]', false) + jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0,f1}', '[2,3,4]', false) [{"f1": [2, 3, 4], "f2": null}, 2, null, 3] - jsonb_set('[{"f1":1,"f2":null},2]', '{0,f3}','[2,3,4]') + jsonb_set('[{"f1":1,"f2":null},2]', '{0,f3}', '[2,3,4]') [{"f1": 1, "f2": null, "f3": [2, 3, 4]}, 2] @@ -15656,11 +15645,11 @@ table2-mapping 'use_json_null'. - jsonb_set_lax('[{"f1":1,"f2":null},2,null,3]', '{0,f1}',null) + jsonb_set_lax('[{"f1":1,"f2":null},2,null,3]', '{0,f1}', null) [{"f1":null,"f2":null},2,null,3] - jsonb_set_lax('[{"f1":99,"f2":null},2]', '{0,f3}',null, true, 'return_target') + jsonb_set_lax('[{"f1":99,"f2":null},2]', '{0,f3}', null, true, 'return_target') [{"f1": 99, "f2": null}, 2] @@ -15724,7 +15713,7 @@ table2-mapping untouched. - json_strip_nulls('[{"f1":1,"f2":null},2,null,3]') + json_strip_nulls('[{"f1":1, "f2":null}, 2, null, 3]') [{"f1":1},2,null,3] @@ -15748,7 +15737,7 @@ table2-mapping as the @? and @@ operators do. - jsonb_path_exists('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2,"max":4}') + jsonb_path_exists('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') t @@ -15770,7 +15759,7 @@ table2-mapping for jsonb_path_exists. - jsonb_path_match('{"a":[1,2,3,4,5]}', 'exists($.a[*] ? (@ >= $min && @ <= $max))', '{"min":2,"max":4}') + jsonb_path_match('{"a":[1,2,3,4,5]}', 'exists($.a[*] ? (@ >= $min && @ <= $max))', '{"min":2, "max":4}') t @@ -15791,7 +15780,7 @@ table2-mapping for jsonb_path_exists. - select * from jsonb_path_query('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2,"max":4}') + select * from jsonb_path_query('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') jsonb_path_query @@ -15819,7 +15808,7 @@ table2-mapping for jsonb_path_exists. - jsonb_path_query_array('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2,"max":4}') + jsonb_path_query_array('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') [2, 3, 4] @@ -15841,7 +15830,7 @@ table2-mapping for jsonb_path_exists. - jsonb_path_query_first('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2,"max":4}') + jsonb_path_query_first('{"a":[1,2,3,4,5]}', '$.a[*] ? (@ >= $min && @ <= $max)', '{"min":2, "max":4}') 2 @@ -15913,7 +15902,7 @@ table2-mapping Converts the given JSON value to pretty-printed, indented text. - jsonb_pretty('[{"f1":1,"f2":null},2]') + jsonb_pretty('[{"f1":1,"f2":null}, 2]') [ @@ -22203,7 +22192,9 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); setof record ( word text, catcode "char", - catdesc text ) + barelabel boolean, + catdesc text, + baredesc text ) Returns a set of records describing the SQL keywords recognized by the @@ -22213,8 +22204,15 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); keyword, C for a keyword that can be a column name, T for a keyword that can be a type or function name, or R for a fully reserved keyword. + The barelabel column + contains true if the keyword can be used as + a bare column label in SELECT lists, + or false if it can only be used + after AS. The catdesc column contains a - possibly-localized string describing the category. + possibly-localized string describing the keyword's category. + The baredesc column contains a + possibly-localized string describing the keyword's column label status. @@ -22420,7 +22418,7 @@ SELECT currval(pg_get_serial_sequence('sometable', 'id')); pg_options_to_table - pg_options_to_table ( reloptions text[] ) + pg_options_to_table ( options_array text[] ) setof record ( option_name text, option_value text ) @@ -26601,7 +26599,7 @@ BEGIN obj.object_name, obj.object_identity; END LOOP; -END +END; $$; CREATE EVENT TRIGGER test_event_trigger_for_drops ON sql_drop diff --git a/doc/src/sgml/generate-errcodes-table.pl b/doc/src/sgml/generate-errcodes-table.pl index 9a1886059b2bf..66a3ee00298a1 100644 --- a/doc/src/sgml/generate-errcodes-table.pl +++ b/doc/src/sgml/generate-errcodes-table.pl @@ -3,8 +3,8 @@ # Generate the errcodes-table.sgml file from errcodes.txt # Copyright (c) 2000-2020, PostgreSQL Global Development Group -use warnings; use strict; +use warnings; print "\n"; diff --git a/doc/src/sgml/generate-keywords-table.pl b/doc/src/sgml/generate-keywords-table.pl index 824b324ef78a4..6332d65aadc78 100644 --- a/doc/src/sgml/generate-keywords-table.pl +++ b/doc/src/sgml/generate-keywords-table.pl @@ -1,6 +1,7 @@ #!/usr/bin/perl # -# Generate the keywords table file +# Generate the keywords table for the documentation's SQL Key Words appendix +# # Copyright (c) 2019-2020, PostgreSQL Global Development Group use strict; @@ -11,8 +12,9 @@ my $srcdir = $ARGV[0]; my %keywords; +my %as_keywords; -# read SQL keywords +# read SQL-spec keywords foreach my $ver (@sql_versions) { @@ -39,9 +41,10 @@ while (<$fh>) { - if (/^PG_KEYWORD\("(\w+)", \w+, (\w+)_KEYWORD\)/) + if (/^PG_KEYWORD\("(\w+)", \w+, (\w+)_KEYWORD\, (\w+)\)/) { $keywords{ uc $1 }{'pg'}{ lc $2 } = 1; + $as_keywords{ uc $1 } = 1 if $3 eq 'AS_LABEL'; } } @@ -107,6 +110,10 @@ END { print "reserved"; } + if ($as_keywords{$word}) + { + print ", requires AS"; + } print "\n"; foreach my $ver (@sql_versions) diff --git a/doc/src/sgml/gin.sgml b/doc/src/sgml/gin.sgml index 5c8d4d52757cf..67754f52f6499 100644 --- a/doc/src/sgml/gin.sgml +++ b/doc/src/sgml/gin.sgml @@ -612,7 +612,7 @@ gin_pending_list_limit can be overridden for individual - GIN indexes by changing storage parameters, and which allows each + GIN indexes by changing storage parameters, which allows each GIN index to have its own cleanup threshold. For example, it's possible to increase the threshold only for the GIN index which can be updated heavily, and decrease it otherwise. diff --git a/doc/src/sgml/gist.sgml b/doc/src/sgml/gist.sgml index f9226e7a35cbb..1bf5f09659136 100644 --- a/doc/src/sgml/gist.sgml +++ b/doc/src/sgml/gist.sgml @@ -259,6 +259,8 @@ CREATE INDEX ON my_table USING GIST (my_inet_column inet_ops); compress method is omitted. The optional tenth method options is needed if the operator class provides the user-specified parameters. + The sortsupport method is also optional and is used to + speed up building a GiST index. @@ -973,7 +975,7 @@ static char *str_param_default = "default"; /* * Sample validator: checks that string is not longer than 8 bytes. */ -static void +static void validate_my_string_relopt(const char *value) { if (strlen(value) > 8) @@ -985,7 +987,7 @@ validate_my_string_relopt(const char *value) /* * Sample filler: switches characters to lower case. */ -static Size +static Size fill_my_string_relopt(const char *value, void *ptr) { char *tmp = str_tolower(value, strlen(value), DEFAULT_COLLATION_OID); @@ -1065,6 +1067,74 @@ my_compress(PG_FUNCTION_ARGS) + + + sortsupport + + + Returns a comparator function to sort data in a way that preserves + locality. It is used by CREATE INDEX and + REINDEX commands. The quality of the created index + depends on how well the sort order determined by the comparator function + preserves locality of the inputs. + + + The sortsupport method is optional. If it is not + provided, CREATE INDEX builds the index by inserting + each tuple to the tree using the penalty and + picksplit functions, which is much slower. + + + + The SQL declaration of the function must look like + this: + + +CREATE OR REPLACE FUNCTION my_sortsupport(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT; + + + The argument is a pointer to a SortSupport + struct. At a minimum, the function must fill in its comparator field. + The comparator takes three arguments: two Datums to compare, and + a pointer to the SortSupport struct. The + Datums are the two indexed values in the format that they are stored + in the index; that is, in the format returned by the + compress method. The full API is defined in + src/include/utils/sortsupport.h. + + + + The matching code in the C module could then follow this skeleton: + + +PG_FUNCTION_INFO_V1(my_sortsupport); + +static int +my_fastcmp(Datum x, Datum y, SortSupport ssup) +{ + /* establish order between x and y by computing some sorting value z */ + + int z1 = ComputeSpatialCode(x); + int z2 = ComputeSpatialCode(y); + + return z1 == z2 ? 0 : z1 > z2 ? 1 : -1; +} + +Datum +my_sortsupport(PG_FUNCTION_ARGS) +{ + SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); + + ssup->comparator = my_fastcmp; + PG_RETURN_VOID(); +} + + + + @@ -1087,23 +1157,38 @@ my_compress(PG_FUNCTION_ARGS) Implementation - GiST Buffering Build + GiST Index Build Methods + + + The simplest way to build a GiST index is just to insert all the entries, + one by one. This tends to be slow for large indexes, because if the + index tuples are scattered across the index and the index is large enough + to not fit in cache, a lot of random I/O will be + needed. PostgreSQL supports two alternative + methods for initial build of a GiST index: sorted + and buffered modes. + + + + The sorted method is only available if each of the opclasses used by the + index provides a sortsupport function, as described + in . If they do, this method is + usually the best, so it is used by default. + + - Building large GiST indexes by simply inserting all the tuples tends to be - slow, because if the index tuples are scattered across the index and the - index is large enough to not fit in cache, the insertions need to perform - a lot of random I/O. Beginning in version 9.2, PostgreSQL supports a more - efficient method to build GiST indexes based on buffering, which can - dramatically reduce the number of random I/Os needed for non-ordered data - sets. For well-ordered data sets the benefit is smaller or non-existent, - because only a small number of pages receive new tuples at a time, and - those pages fit in cache even if the index as whole does not. + The buffered method works by not inserting tuples directly into the index + right away. It can dramatically reduce the amount of random I/O needed + for non-ordered data sets. For well-ordered data sets the benefit is + smaller or non-existent, because only a small number of pages receive new + tuples at a time, and those pages fit in cache even if the index as a + whole does not. - However, buffering index build needs to call the penalty - function more often, which consumes some extra CPU resources. Also, the - buffers used in the buffering build need temporary disk space, up to + The buffered method needs to call the penalty + function more often than the simple method does, which consumes some + extra CPU resources. Also, the buffers need temporary disk space, up to the size of the resulting index. Buffering can also influence the quality of the resulting index, in both positive and negative directions. That influence depends on various factors, like the distribution of the input @@ -1111,12 +1196,13 @@ my_compress(PG_FUNCTION_ARGS) - By default, a GiST index build switches to the buffering method when the - index size reaches . It can - be manually turned on or off by the buffering parameter - to the CREATE INDEX command. The default behavior is good for most cases, - but turning buffering off might speed up the build somewhat if the input - data is ordered. + If sorting is not possible, then by default a GiST index build switches + to the buffering method when the index size reaches + . Buffering can be manually + forced or prevented by the buffering parameter to the + CREATE INDEX command. The default behavior is good for most cases, but + turning buffering off might speed up the build somewhat if the input data + is ordered. diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml index beb309e668e2d..339ed38d42c81 100644 --- a/doc/src/sgml/high-availability.sgml +++ b/doc/src/sgml/high-availability.sgml @@ -39,9 +39,9 @@ Some solutions deal with synchronization by allowing only one server to modify the data. Servers that can modify data are called read/write, master or primary servers. - Servers that track changes in the master are called standby + Servers that track changes in the primary are called standby or secondary servers. A standby server that cannot be connected - to until it is promoted to a master server is called a warm + to until it is promoted to a primary server is called a warm standby server, and one that can accept connections and serves read-only queries is called a hot standby server. @@ -165,10 +165,10 @@ protocol to make nodes agree on a serializable transactional order. Logical replication allows a database server to send a stream of data modifications to another server. PostgreSQL logical replication constructs a stream of logical data modifications - from the WAL. Logical replication allows the data changes from - individual tables to be replicated. Logical replication doesn't require - a particular server to be designated as a primary or a replica but allows - data to flow in multiple directions. For more information on logical + from the WAL. Logical replication allows replication of data changes on + a per-table basis. In addition, a server that is publishing its own + changes can also subscribe to changes from another server, allowing data + to flow in multiple directions. For more information on logical replication, see . Through the logical decoding interface (), third-party extensions can also provide similar functionality. @@ -177,22 +177,24 @@ protocol to make nodes agree on a serializable transactional order. - Trigger-Based Master-Standby Replication + Trigger-Based Primary-Standby Replication - A master-standby replication setup sends all data modification - queries to the master server. The master server asynchronously - sends data changes to the standby server. The standby can answer - read-only queries while the master server is running. The - standby server is ideal for data warehouse queries. + A trigger-based replication setup typically funnels data modification + queries to a designated primary server. Operating on a per-table basis, + the primary server sends data changes (typically) asynchronously to the + standby servers. Standby servers can answer queries while the primary is + running, and may allow some local data changes or write activity. This + form of replication is often used for offloading large analytical or data + warehouse queries. - Slony-I is an example of this type of replication, with per-table - granularity, and support for multiple standby servers. Because it - updates the standby server asynchronously (in batches), there is - possible data loss during fail over. + Slony-I is an example of this type of + replication, with per-table granularity, and support for multiple standby + servers. Because it updates the standby server asynchronously (in + batches), there is possible data loss during fail over. @@ -215,14 +217,10 @@ protocol to make nodes agree on a serializable transactional order. random(), CURRENT_TIMESTAMP, and sequences can have different values on different servers. This is because each server operates independently, and because - SQL queries are broadcast (and not actual modified rows). If + SQL queries are broadcast rather than actual data changes. If this is unacceptable, either the middleware or the application - must query such values from a single server and then use those - values in write queries. Another option is to use this replication - option with a traditional primary-standby setup, i.e., data modification - queries are sent only to the primary and are propagated to the - standby servers via primary-standby replication, not by the replication - middleware. Care must also be taken that all + must determine such values from a single source and then use those + values in write queries. Care must also be taken that all transactions either commit or abort on all servers, perhaps using two-phase commit ( and ). @@ -351,7 +349,7 @@ protocol to make nodes agree on a serializable transactional order. - Allows multiple master servers + Allows multiple primary servers @@ -1395,7 +1393,8 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' If archive_mode is set to on, the archiver is not enabled during recovery or standby mode. If the standby server is promoted, it will start archiving after the promotion, but - will not archive any WAL it did not generate itself. To get a complete + will not archive any WAL or timeline history files that + it did not generate itself. To get a complete series of WAL files in the archive, you must ensure that all WAL is archived, before it reaches the standby. This is inherently true with file-based log shipping, as the standby can only restore files that @@ -1503,7 +1502,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' Note that in this mode, the server will apply WAL one file at a time, so if you use the standby server for queries (see Hot Standby), there is a delay between an action in the primary and when the - action becomes visible in the standby, corresponding the time it takes + action becomes visible in the standby, corresponding to the time it takes to fill up the WAL file. archive_timeout can be used to make that delay shorter. Also note that you can't combine streaming replication with this method. diff --git a/doc/src/sgml/hstore.sgml b/doc/src/sgml/hstore.sgml index 8a1caa3576131..14a36ade00a0b 100644 --- a/doc/src/sgml/hstore.sgml +++ b/doc/src/sgml/hstore.sgml @@ -631,7 +631,7 @@ b Does hstore contain key? - exist('a=>1','a') + exist('a=>1', 'a') t @@ -647,7 +647,7 @@ b for key? - defined('a=>NULL','a') + defined('a=>NULL', 'a') f @@ -662,7 +662,7 @@ b Deletes pair with matching key. - delete('a=>1,b=>2','b') + delete('a=>1,b=>2', 'b') "a"=>"1" @@ -676,7 +676,7 @@ b Deletes pairs with matching keys. - delete('a=>1,b=>2,c=>3',ARRAY['a','b']) + delete('a=>1,b=>2,c=>3', ARRAY['a','b']) "c"=>"3" @@ -690,7 +690,7 @@ b Deletes pairs matching those in the second argument. - delete('a=>1,b=>2','a=>4,b=>2'::hstore) + delete('a=>1,b=>2', 'a=>4,b=>2'::hstore) "a"=>"1" diff --git a/doc/src/sgml/indexam.sgml b/doc/src/sgml/indexam.sgml index 390c49eb6abac..649020b7daa22 100644 --- a/doc/src/sgml/indexam.sgml +++ b/doc/src/sgml/indexam.sgml @@ -612,7 +612,7 @@ amgettuple (IndexScanDesc scan, will pass the caller's snapshot test. On success, amgettuple must also set scan->xs_recheck to true or false. False means it is certain that the index entry matches the scan keys. - true means this is not certain, and the conditions represented by the + True means this is not certain, and the conditions represented by the scan keys must be rechecked against the heap tuple after fetching it. This provision supports lossy index operators. Note that rechecking will extend only to the scan conditions; a partial diff --git a/doc/src/sgml/install-binaries.sgml b/doc/src/sgml/install-binaries.sgml new file mode 100644 index 0000000000000..001c3c7be01fb --- /dev/null +++ b/doc/src/sgml/install-binaries.sgml @@ -0,0 +1,24 @@ + + + Installation from Binaries + + + installation + binaries + + + + PostgreSQL is available in the form of binary + packages for most common operating systems today. When available, this is + the recommended way to install PostgreSQL for users of the system. Building + from source (see ) is only recommended for + people developing PostgreSQL or extensions. + + + + For an updated list of platforms providing binary packages, please visit + the download section on the PostgreSQL website at + and follow the + instructions for the specific platform. + + diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml index 11f5957aca5b2..844ef2cbd29af 100644 --- a/doc/src/sgml/install-windows.sgml +++ b/doc/src/sgml/install-windows.sgml @@ -11,7 +11,8 @@ It is recommended that most users download the binary distribution for Windows, available as a graphical installer package - from the PostgreSQL website. Building from source + from the PostgreSQL website at + . Building from source is only intended for people developing PostgreSQL or extensions. diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index 552303e211421..3ac588dfb5c91 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -21,7 +21,7 @@ documentation. See standalone-profile.xsl for details. PostgreSQL using the source code distribution. If you are installing a pre-packaged distribution, such as an RPM or Debian package, ignore this chapter - and read the packager's instructions instead. + and see instead. @@ -239,7 +239,7 @@ su - postgres class="osname">Linux, NetBSD, Solaris), for other systems you can download an add-on package from . + url="https://www.gnu.org/software/gettext/">. If you are using the Gettext implementation in the GNU C library then you will additionally need the GNU Gettext package for some @@ -2293,7 +2293,7 @@ ERROR: could not load library "/opt/dbs/pgsql/lib/plperl.so": Bad address - OpenSSL is not supported. + OpenSSL is not supported. diff --git a/doc/src/sgml/intarray.sgml b/doc/src/sgml/intarray.sgml index c8db87e97df90..af44c7b2142d1 100644 --- a/doc/src/sgml/intarray.sgml +++ b/doc/src/sgml/intarray.sgml @@ -453,7 +453,7 @@ -- a message can be in one or more sections CREATE TABLE message (mid INT PRIMARY KEY, sections INT[], ...); --- create specialized index with sigature length of 32 bytes +-- create specialized index with signature length of 32 bytes CREATE INDEX message_rdtree_idx ON message USING GIST (sections gist__int_ops(siglen=32)); -- select messages in section 1 OR 2 - OVERLAP operator diff --git a/doc/src/sgml/isn.sgml b/doc/src/sgml/isn.sgml index e55ed073120c0..709bc8345c7e1 100644 --- a/doc/src/sgml/isn.sgml +++ b/doc/src/sgml/isn.sgml @@ -14,7 +14,7 @@ hard-coded list of prefixes; this list of prefixes is also used to hyphenate numbers on output. Since new prefixes are assigned from time to time, the list of prefixes may be out of date. It is hoped that a future version of - this module will obtained the prefix list from one or more tables that + this module will obtain the prefix list from one or more tables that can be easily updated by users as needed; however, at present, the list can only be updated by modifying the source code and recompiling. Alternatively, prefix validation and hyphenation support may be diff --git a/doc/src/sgml/keywords.sgml b/doc/src/sgml/keywords.sgml index 57dcd6ae5c7b4..a7bf30c504681 100644 --- a/doc/src/sgml/keywords.sgml +++ b/doc/src/sgml/keywords.sgml @@ -32,11 +32,11 @@ - In the PostgreSQL parser life is a bit + In the PostgreSQL parser, life is a bit more complicated. There are several different classes of tokens ranging from those that can never be used as an identifier to those - that have absolutely no special status in the parser as compared to - an ordinary identifier. (The latter is usually the case for + that have absolutely no special status in the parser, but are considered + ordinary identifiers. (The latter is usually the case for functions specified by SQL.) Even reserved key words are not completely reserved in PostgreSQL, but can be used as column labels (for example, SELECT 55 AS @@ -57,14 +57,24 @@ reserved are those tokens that are not allowed as column or table names. Some reserved key words are allowable as names for functions or data types; this is also shown in the - table. If not so marked, a reserved key word is only allowed as an - AS column label name. + table. If not so marked, a reserved key word is only allowed as a + column label. + A blank entry in this column means that the word is treated as an + ordinary identifier by PostgreSQL. + + + + Furthermore, while most key words can be used as bare + column labels without writing AS before them (as + described in ), there are a few + that require a leading AS to avoid ambiguity. These + are marked in the table as requires AS. As a general rule, if you get spurious parser errors for commands - that contain any of the listed key words as an identifier you should - try to quote the identifier to see if the problem goes away. + that use any of the listed key words as an identifier, you should + try quoting the identifier to see if the problem goes away. diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index a397073526f4d..de60281fcb496 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -91,21 +91,6 @@ - - - On Windows, there is a way to improve performance if a single - database connection is repeatedly started and shutdown. Internally, - libpq calls WSAStartup() and WSACleanup() for connection startup - and shutdown, respectively. WSAStartup() increments an internal - Windows library reference count which is decremented by WSACleanup(). - When the reference count is just one, calling WSACleanup() frees - all resources and all DLLs are unloaded. This is an expensive - operation. To avoid this, an application can manually call - WSAStartup() so resources will not be freed when the last database - connection is closed. - - - PQconnectdbParamsPQconnectdbParams @@ -812,7 +797,8 @@ int callback_fn(char *buf, int size, PGconn *conn); its path will be in conn->sslkey when the callback is invoked. This will be empty if the default key path is being used. For keys that are engine specifiers, it is up to engine implementations - whether they use the OpenSSL password callback or define their own handling. + whether they use the OpenSSL password + callback or define their own handling. @@ -1226,7 +1212,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname connect_timeout - Maximum wait for connection, in seconds (write as a decimal integer, + Maximum time to wait while connecting, in seconds (write as a decimal integer, e.g., 10). Zero, negative, or not specified means wait indefinitely. The minimum allowed timeout is 2 seconds, therefore a value of 1 is interpreted as 2. @@ -1672,13 +1658,15 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname Specifying this parameter with any non-empty value suppresses the Enter PEM pass phrase: - prompt that OpenSSL will emit by default when an encrypted client - certificate key is provided to libpq. + prompt that OpenSSL will emit by default + when an encrypted client certificate key is provided to + libpq. - If the key is not encrypted this parameter is ignored. The parameter has no - effect on keys specified by OpenSSL engines unless the engine uses the - OpenSSL password callback mechanism for prompts. + If the key is not encrypted this parameter is ignored. The parameter + has no effect on keys specified by OpenSSL + engines unless the engine uses the OpenSSL + password callback mechanism for prompts. There is no environment variable equivalent to this option, and no @@ -2471,8 +2459,9 @@ void *PQsslStruct(const PGconn *conn, const char *struct_name); The struct(s) available depend on the SSL implementation in use. - For OpenSSL, there is one struct, available under the name "OpenSSL", - and it returns a pointer to the OpenSSL SSL struct. + For OpenSSL, there is one struct, + available under the name "OpenSSL", and it returns a pointer to the + OpenSSL SSL struct. To use this function, code along the following lines could be used: @@ -2516,8 +2505,9 @@ void *PQgetssl(const PGconn *conn); This function is equivalent to PQsslStruct(conn, "OpenSSL"). It should not be used in new applications, because the returned struct is - specific to OpenSSL and will not be available if another SSL - implementation is used. To check if a connection uses SSL, call + specific to OpenSSL and will not be + available if another SSL implementation is used. + To check if a connection uses SSL, call instead, and for more details about the connection, use . @@ -7665,15 +7655,17 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) The key may be - stored in cleartext or encrypted with a passphrase using any algorithm supported - by OpenSSL, like AES-128. If the key is stored encrypted, then the passphrase - may be provided in the connection - option. If an encrypted key is supplied and the sslpassword - option is absent or blank, a password will be prompted for interactively by - OpenSSL with a Enter PEM pass phrase: - prompt if a TTY is available. Applications can override the client certificate - prompt and the handling of the sslpassword parameter by supplying - their own key password callback; see + stored in cleartext or encrypted with a passphrase using any algorithm + supported by OpenSSL, like AES-128. If the key + is stored encrypted, then the passphrase may be provided in the + connection option. If an + encrypted key is supplied and the sslpassword option + is absent or blank, a password will be prompted for interactively by + OpenSSL with a + Enter PEM pass phrase: prompt if a TTY is available. + Applications can override the client certificate prompt and the handling + of the sslpassword parameter by supplying their own + key password callback; see . @@ -7936,7 +7928,7 @@ void PQinitOpenSSL(int do_ssl, int do_crypto); When do_ssl is non-zero, libpq - will initialize the OpenSSL library before first + will initialize the OpenSSL library before first opening a database connection. When do_crypto is non-zero, the libcrypto library will be initialized. By default (if is not called), both libraries @@ -7945,7 +7937,7 @@ void PQinitOpenSSL(int do_ssl, int do_crypto); - If your application uses and initializes either OpenSSL + If your application uses and initializes either OpenSSL or its underlying libcrypto library, you must call this function with zeroes for the appropriate parameter(s) before first opening a database connection. Also be sure that you @@ -7967,7 +7959,7 @@ void PQinitSSL(int do_ssl); This function is equivalent to PQinitOpenSSL(do_ssl, do_ssl). It is sufficient for applications that initialize both or neither - of OpenSSL and libcrypto. + of OpenSSL and libcrypto. diff --git a/doc/src/sgml/lobj.sgml b/doc/src/sgml/lobj.sgml index cf4653fe0f94f..6329cf0796beb 100644 --- a/doc/src/sgml/lobj.sgml +++ b/doc/src/sgml/lobj.sgml @@ -901,8 +901,6 @@ exportFile(PGconn *conn, Oid lobjId, char *filename) lo_close(conn, lobj_fd); close(fd); - - return; } static void diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml index c35415801f6f0..a560ad69b44bb 100644 --- a/doc/src/sgml/logical-replication.sgml +++ b/doc/src/sgml/logical-replication.sgml @@ -147,13 +147,13 @@ - A publication is created using the + A publication is created using the CREATE PUBLICATION command and may later be altered or dropped using corresponding commands. The individual tables can be added and removed dynamically using - . Both the ADD + ALTER PUBLICATION. Both the ADD TABLE and DROP TABLE operations are transactional; so the table will start or stop replicating at the correct snapshot once the transaction has committed. @@ -207,10 +207,10 @@ - The subscription is added using and + The subscription is added using CREATE SUBSCRIPTION and can be stopped/resumed at any time using the - command and removed using - . + ALTER SUBSCRIPTION command and removed using + DROP SUBSCRIPTION. @@ -403,7 +403,7 @@ Replication is only supported by tables, including partitioned tables. - Attempts to replicate other types of relations such as views, materialized + Attempts to replicate other types of relations, such as views, materialized views, or foreign tables, will result in an error. @@ -418,7 +418,7 @@ tables.) Publications can also specify that changes are to be replicated using the identity and schema of the partitioned root table instead of that of the individual leaf partitions in which the changes actually - originate (see ). + originate (see CREATE PUBLICATION). diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml index 8d4fdf6700649..813a037facec3 100644 --- a/doc/src/sgml/logicaldecoding.sgml +++ b/doc/src/sgml/logicaldecoding.sgml @@ -223,7 +223,7 @@ $ pg_recvlogical -d postgres --slot=test --drop-slot A logical slot will emit each change just once in normal operation. The current position of each slot is persisted only at checkpoint, so in the case of a crash the slot may return to an earlier LSN, which will - then cause recent changes to be resent when the server restarts. + then cause recent changes to be sent again when the server restarts. Logical decoding clients are responsible for avoiding ill effects from handling the same message more than once. Clients may wish to record the last LSN they saw when decoding and skip over any repeated data or diff --git a/doc/src/sgml/ltree.sgml b/doc/src/sgml/ltree.sgml index 36aa2b5fad86b..06a983c075be4 100644 --- a/doc/src/sgml/ltree.sgml +++ b/doc/src/sgml/ltree.sgml @@ -460,7 +460,7 @@ Europe & Russia*@ & !Transportation position end-1 (counting from 0). - subltree('Top.Child1.Child2',1,2) + subltree('Top.Child1.Child2', 1, 2) Child1 @@ -480,7 +480,7 @@ Europe & Russia*@ & !Transportation the end of the path. - subpath('Top.Child1.Child2',0,2) + subpath('Top.Child1.Child2', 0, 2) Top.Child1 @@ -497,7 +497,7 @@ Europe & Russia*@ & !Transportation from the end of the path. - subpath('Top.Child1.Child2',1) + subpath('Top.Child1.Child2', 1) Child1.Child2 @@ -528,7 +528,7 @@ Europe & Russia*@ & !Transportation a, or -1 if not found. - index('0.1.2.3.5.4.5.6.8.5.6.8','5.6') + index('0.1.2.3.5.4.5.6.8.5.6.8', '5.6') 6 @@ -546,7 +546,7 @@ Europe & Russia*@ & !Transportation start -offset labels from the end of the path. - index('0.1.2.3.5.4.5.6.8.5.6.8','5.6',-4) + index('0.1.2.3.5.4.5.6.8.5.6.8', '5.6', -4) 9 @@ -584,7 +584,7 @@ Europe & Russia*@ & !Transportation (up to 8 arguments are supported). - lca('1.2.3','1.2.3.4.5.6') + lca('1.2.3', '1.2.3.4.5.6') 1.2 diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml index de0794adeb90b..4d8ad754f8577 100644 --- a/doc/src/sgml/maintenance.sgml +++ b/doc/src/sgml/maintenance.sgml @@ -87,7 +87,7 @@ PostgreSQL's - command has to + VACUUM command has to process each table on a regular basis for several reasons: @@ -227,9 +227,9 @@ massive update or delete activity. If you have such a table and you need to reclaim the excess disk space it occupies, you will need to use VACUUM FULL, or alternatively - + CLUSTER or one of the table-rewriting variants of - . + ALTER TABLE. These commands rewrite an entire new copy of the table and build new indexes for it. All these options require exclusive lock. Note that they also temporarily use extra disk space approximately equal to the size @@ -242,7 +242,7 @@ If you have a table whose entire contents are deleted on a periodic basis, consider doing it with - rather + TRUNCATE rather than using DELETE followed by VACUUM. TRUNCATE removes the entire content of the table immediately, without requiring a @@ -269,7 +269,7 @@ The PostgreSQL query planner relies on statistical information about the contents of tables in order to generate good plans for queries. These statistics are gathered by - the command, + the ANALYZE command, which can be invoked by itself or as an optional step in VACUUM. It is important to have reasonably accurate statistics, otherwise poor choices of plans might diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 673a0e73e4534..f5cf163c8c68f 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -27,7 +27,7 @@ ps, top, iostat, and vmstat. Also, once one has identified a poorly-performing query, further investigation might be needed using - PostgreSQL's command. + PostgreSQL's EXPLAIN command. discusses EXPLAIN and other methods for understanding the behavior of an individual query. @@ -314,6 +314,15 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser + + pg_stat_replication_slotspg_stat_replication_slots + One row per replication slot, showing statistics about + replication slot usage. + See + pg_stat_replication_slots for details. + + + pg_stat_wal_receiverpg_stat_wal_receiver Only one row, showing statistics about the WAL receiver from @@ -424,6 +433,14 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser + + pg_stat_walpg_stat_wal + One row only, showing statistics about WAL activity. See + + pg_stat_wal for details. + + + pg_stat_databasepg_stat_database One row per database, showing database-wide statistics. See @@ -2544,6 +2561,88 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i + + <structname>pg_stat_replication_slots</structname> + + + pg_stat_replication_slots + + + + The pg_stat_replication_slots view will contain + one row per logical replication slot, showing statistics about its usage. + + + + <structname>pg_stat_replication_slots</structname> View + + + + + Column Type + + + Description + + + + + + + + slot_name text + + + A unique, cluster-wide identifier for the replication slot + + + + + + spill_txns bigint + + + Number of transactions spilled to disk after the memory used by + logical decoding exceeds logical_decoding_work_mem. The + counter gets incremented both for toplevel transactions and + subtransactions. + + + + + + spill_count bigint + + + Number of times transactions were spilled to disk. Transactions + may get spilled repeatedly, and this counter gets incremented on every + such invocation. + + + + + + spill_bytes bigint + + + Amount of decoded transaction data spilled to disk. + + + + + + stats_reset timestamp with time zone + + + Time at which these statistics were last reset + + + + +
+ +
+ <structname>pg_stat_wal_receiver</structname> @@ -3280,6 +3379,56 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i + + <structname>pg_stat_wal</structname> + + + pg_stat_wal + + + + The pg_stat_wal view will always have a + single row, containing data about WAL activity of the cluster. + + + + <structname>pg_stat_wal</structname> View + + + + + Column Type + + + Description + + + + + + + + wal_buffers_full bigint + + + Number of times WAL data was written to the disk because WAL buffers got full + + + + + + stats_reset timestamp with time zone + + + Time at which these statistics were last reset + + + + +
+ +
+ <structname>pg_stat_database</structname> @@ -4668,8 +4817,9 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i argument. The argument can be bgwriter to reset all the counters shown in the pg_stat_bgwriter - view, or archiver to reset all the counters shown in - the pg_stat_archiver view. + view, archiver to reset all the counters shown in + the pg_stat_archiver view or wal + to reset all the counters shown in the pg_stat_wal view.
This function is restricted to superusers by default, but other users @@ -4743,6 +4893,27 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i can be granted EXECUTE to run the function. + + + + + pg_stat_reset_replication_slot + + pg_stat_reset_replication_slot ( text ) + void + + + Resets statistics to zero for a single replication slot, or for all + replication slots in the cluster. The argument can be either the name + of the slot to reset the stats or NULL. If the argument is NULL, all + counters shown in the pg_stat_replication_slots + view for all replication slots are reset. + + + This function is restricted to superusers by default, but other users + can be granted EXECUTE to run the function. + + @@ -5219,8 +5390,8 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, Note that when ANALYZE is run on a partitioned table, - all of its partitions are also recursively analyzed as also mentioned in - . In that case, ANALYZE + all of its partitions are also recursively analyzed. + In that case, ANALYZE progress is reported first for the parent table, whereby its inheritance statistics are collected, followed by that for each partition. @@ -6089,8 +6260,8 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, waiting for checkpoint to finish The WAL sender process is currently performing - pg_start_backup to set up for - taking a base backup, and waiting for backup start + pg_start_backup to prepare to + take a base backup, and waiting for the start-of-backup checkpoint to finish. diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml index 6920913a260f9..71dd4033372d5 100644 --- a/doc/src/sgml/mvcc.sgml +++ b/doc/src/sgml/mvcc.sgml @@ -950,10 +950,9 @@ ERROR: could not serialize access due to read/write dependencies among transact Acquired by VACUUM (without ), ANALYZE, CREATE INDEX CONCURRENTLY, REINDEX CONCURRENTLY, - CREATE STATISTICS, and certain ALTER - INDEX and ALTER TABLE variants (for full - details see and ). + CREATE STATISTICS, and certain ALTER + INDEX and ALTER TABLE variants (for full + details see the documentation of these commands).
@@ -995,7 +994,7 @@ ERROR: could not serialize access due to read/write dependencies among transact Acquired by CREATE TRIGGER and some forms of - ALTER TABLE (see ). + ALTER TABLE. @@ -1246,7 +1245,7 @@ ERROR: could not serialize access due to read/write dependencies among transact The FOR UPDATE lock mode is also acquired by any DELETE on a row, and also by an - UPDATE that modifies the values on certain columns. Currently, + UPDATE that modifies the values of certain columns. Currently, the set of columns considered for the UPDATE case are those that have a unique index on them that can be used in a foreign key (so partial indexes and expressional indexes are not considered), but this may change @@ -1723,8 +1722,8 @@ SELECT pg_advisory_lock(q.id) FROM Caveats - Some DDL commands, currently only and the - table-rewriting forms of , are not + Some DDL commands, currently only TRUNCATE and the + table-rewriting forms of ALTER TABLE, are not MVCC-safe. This means that after the truncation or rewrite commits, the table will appear empty to concurrent transactions, if they are using a snapshot taken before the DDL command committed. This will only be an diff --git a/doc/src/sgml/oldsnapshot.sgml b/doc/src/sgml/oldsnapshot.sgml new file mode 100644 index 0000000000000..a665ae72e789c --- /dev/null +++ b/doc/src/sgml/oldsnapshot.sgml @@ -0,0 +1,33 @@ + + + + old_snapshot + + + old_snapshot + + + + The old_snapshot module allows inspection + of the server state that is used to implement + . + + + + Functions + + + + pg_old_snapshot_time_mapping(array_offset OUT int4, end_timestamp OUT timestamptz, newest_xmin OUT xid) returns setof record + + + Returns all of the entries in the server's timestamp to XID mapping. + Each entry represents the newest xmin of any snapshot taken in the + corresponding minute. + + + + + + + diff --git a/doc/src/sgml/parallel.sgml b/doc/src/sgml/parallel.sgml index e31bd9d3cebd7..c81abff48d373 100644 --- a/doc/src/sgml/parallel.sgml +++ b/doc/src/sgml/parallel.sgml @@ -471,7 +471,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%'; - The following operations are always parallel restricted. + The following operations are always parallel restricted: diff --git a/doc/src/sgml/perform.sgml b/doc/src/sgml/perform.sgml index 1cd9f5092db17..117a1f7ff92a6 100644 --- a/doc/src/sgml/perform.sgml +++ b/doc/src/sgml/perform.sgml @@ -31,7 +31,7 @@ plan to match the query structure and the properties of the data is absolutely critical for good performance, so the system includes a complex planner that tries to choose good plans. - You can use the command + You can use the EXPLAIN command to see what query plan the planner creates for any query. Plan-reading is an art that requires some experience to master, but this section attempts to cover the basics. @@ -1144,7 +1144,7 @@ WHERE tablename = 'road'; Statistics objects are created using the - command. + CREATE STATISTICS command. Creation of such an object merely creates a catalog entry expressing interest in the statistics. Actual data collection is performed by ANALYZE (either a manual command, or background @@ -1612,7 +1612,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Use <command>COPY</command> - Use to load + Use COPY to load all the rows in one command, instead of using a series of INSERT commands. The COPY command is optimized for loading large numbers of rows; it is less @@ -1623,8 +1623,8 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; - If you cannot use COPY, it might help to use to create a + If you cannot use COPY, it might help to use PREPARE to create a prepared INSERT statement, and then use EXECUTE as many times as required. This avoids some of the overhead of repeatedly parsing and planning @@ -1763,7 +1763,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse; Whenever you have significantly altered the distribution of data - within a table, running is strongly recommended. This + within a table, running ANALYZE is strongly recommended. This includes bulk loading large amounts of data into the table. Running ANALYZE (or VACUUM ANALYZE) ensures that the planner has up-to-date statistics about the diff --git a/doc/src/sgml/pgcrypto.sgml b/doc/src/sgml/pgcrypto.sgml index 6fd645aa70ad2..8748c64e2da6d 100644 --- a/doc/src/sgml/pgcrypto.sgml +++ b/doc/src/sgml/pgcrypto.sgml @@ -45,8 +45,8 @@ digest(data bytea, type text) returns bytea sha224, sha256, sha384 and sha512. If pgcrypto was built with - OpenSSL, more algorithms are available, as detailed in - . + OpenSSL, more algorithms are available, as + detailed in . @@ -1162,9 +1162,10 @@ gen_random_uuid() returns uuid - When compiled with OpenSSL, there will be more algorithms available. - Also public-key encryption functions will be faster as OpenSSL - has more optimized BIGNUM functions. + When compiled with OpenSSL, there will be + more algorithms available. Also public-key encryption functions will + be faster as OpenSSL has more optimized + BIGNUM functions. @@ -1239,7 +1240,8 @@ gen_random_uuid() returns uuid - Any digest algorithm OpenSSL supports is automatically picked up. + Any digest algorithm OpenSSL supports + is automatically picked up. This is not possible with ciphers, which need to be supported explicitly. diff --git a/doc/src/sgml/pgsurgery.sgml b/doc/src/sgml/pgsurgery.sgml new file mode 100644 index 0000000000000..134be9bebde08 --- /dev/null +++ b/doc/src/sgml/pgsurgery.sgml @@ -0,0 +1,107 @@ + + + + pg_surgery + + + pg_surgery + + + + The pg_surgery module provides various functions to + perform surgery on a damaged relation. These functions are unsafe by design + and using them may corrupt (or further corrupt) your database. For example, + these functions can easily be used to make a table inconsistent with its + own indexes, to cause UNIQUE or + FOREIGN KEY constraint violations, or even to make + tuples visible which, when read, will cause a database server crash. + They should be used with great caution and only as a last resort. + + + + Functions + + + + + heap_force_kill(regclass, tid[]) returns void + + + + + heap_force_kill marks used line + pointers as dead without examining the tuples. The + intended use of this function is to forcibly remove tuples that are not + otherwise accessible. For example: + +test=> select * from t1 where ctid = '(0, 1)'; +ERROR: could not access status of transaction 4007513275 +DETAIL: Could not open file "pg_xact/0EED": No such file or directory. + +test=# select heap_force_kill('t1'::regclass, ARRAY['(0, 1)']::tid[]); + heap_force_kill +----------------- + +(1 row) + +test=# select * from t1 where ctid = '(0, 1)'; +(0 rows) + + + + + + + + + heap_force_freeze(regclass, tid[]) returns void + + + + + heap_force_freeze marks tuples as frozen without + examining the tuple data. The intended use of this function is to + make accessible tuples which are inaccessible due to corrupted + visibility information, or which prevent the table from being + successfully vacuumed due to corrupted visibility information. + For example: + +test=> vacuum t1; +ERROR: found xmin 507 from before relfrozenxid 515 +CONTEXT: while scanning block 0 of relation "public.t1" + +test=# select ctid from t1 where xmin = 507; + ctid +------- + (0,3) +(1 row) + +test=# select heap_force_freeze('t1'::regclass, ARRAY['(0, 3)']::tid[]); + heap_force_freeze +------------------- + +(1 row) + +test=# select ctid from t1 where xmin = 2; + ctid +------- + (0,3) +(1 row) + + + + + + + + + + + Authors + + + Ashutosh Sharma ashu.coek88@gmail.com + + + + diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml index 815912666dd08..94a3b36458dfe 100644 --- a/doc/src/sgml/plpgsql.sgml +++ b/doc/src/sgml/plpgsql.sgml @@ -478,6 +478,14 @@ $$ LANGUAGE plpgsql; included it, but it would be redundant. + + To call a function with OUT parameters, omit the + output parameter in the function call: + +SELECT sales_tax(100.00); + + + Output parameters are most useful when returning multiple values. A trivial example is: @@ -489,6 +497,11 @@ BEGIN prod := x * y; END; $$ LANGUAGE plpgsql; + +SELECT * FROM sum_n_product(2, 4); + sum | prod +-----+------ + 6 | 8 As discussed in , this @@ -497,6 +510,31 @@ $$ LANGUAGE plpgsql; RETURNS record. + + This also works with procedures, for example: + + +CREATE PROCEDURE sum_n_product(x int, y int, OUT sum int, OUT prod int) AS $$ +BEGIN + sum := x + y; + prod := x * y; +END; +$$ LANGUAGE plpgsql; + + + In a call to a procedure, all the parameters must be specified. For + output parameters, NULL may be specified. + +CALL sum_n_product(2, 4, NULL, NULL); + sum | prod +-----+------ + 6 | 8 + + Output parameters in procedures become more interesting in nested calls, + where they can be assigned to variables. See for details. + + Another way to declare a PL/pgSQL function is with RETURNS TABLE, for example: @@ -1143,7 +1181,7 @@ BEGIN SELECT users.userid INTO STRICT userid FROM users WHERE users.username = get_userid.username; RETURN userid; -END +END; $$ LANGUAGE plpgsql; On failure, this function might produce an error message such as @@ -1299,7 +1337,7 @@ EXECUTE format('SELECT count(*) FROM %I ' The PL/pgSQL EXECUTE statement is not related to the - SQL + EXECUTE SQL statement supported by the PostgreSQL server. The server's EXECUTE statement cannot be used directly within @@ -1816,7 +1854,7 @@ BEGIN RETURN NEXT r; -- return current row of SELECT END LOOP; RETURN; -END +END; $BODY$ LANGUAGE plpgsql; @@ -1844,7 +1882,7 @@ BEGIN END IF; RETURN; - END + END; $BODY$ LANGUAGE plpgsql; @@ -1918,7 +1956,7 @@ DECLARE myvar int := 5; BEGIN CALL triple(myvar); RAISE NOTICE 'myvar = %', myvar; -- prints 15 -END +END; $$; @@ -3521,7 +3559,7 @@ BEGIN ROLLBACK; END IF; END LOOP; -END +END; $$; CALL transaction_test1(); @@ -5175,7 +5213,7 @@ DECLARE f1 int; BEGIN RETURN f1; -END +END; $$ LANGUAGE plpgsql; WARNING: variable "f1" shadows a previously defined variable LINE 3: f1 int; diff --git a/doc/src/sgml/postgres-fdw.sgml b/doc/src/sgml/postgres-fdw.sgml index 4efaf35d3c4ec..e6fd2143c1056 100644 --- a/doc/src/sgml/postgres-fdw.sgml +++ b/doc/src/sgml/postgres-fdw.sgml @@ -456,14 +456,14 @@ OPTIONS (ADD password_required 'false'); Note that constraints other than NOT NULL will never be imported from the remote tables. Although PostgreSQL - does support CHECK constraints on foreign tables, there is no + does support check constraints on foreign tables, there is no provision for importing them automatically, because of the risk that a constraint expression could evaluate differently on the local and remote - servers. Any such inconsistency in the behavior of a CHECK + servers. Any such inconsistency in the behavior of a check constraint could lead to hard-to-detect errors in query optimization. - So if you wish to import CHECK constraints, you must do so + So if you wish to import check constraints, you must do so manually, and you should verify the semantics of each one carefully. - For more detail about the treatment of CHECK constraints on + For more detail about the treatment of check constraints on foreign tables, see . @@ -705,7 +705,7 @@ CREATE FOREIGN TABLE foreign_table ( Column names must match as well, unless you attach column_name options to the individual columns to show how they are named in the remote table. - In many cases, use of is + In many cases, use of IMPORT FOREIGN SCHEMA is preferable to constructing foreign table definitions manually. diff --git a/doc/src/sgml/postgres.sgml b/doc/src/sgml/postgres.sgml index c41ce9499be42..730d5fdc34837 100644 --- a/doc/src/sgml/postgres.sgml +++ b/doc/src/sgml/postgres.sgml @@ -154,6 +154,7 @@ break is not needed in a wider output rendering. + &installbin; &installation; &installw; &runtime; diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index c3cb7b4255fdc..3a64db6f55099 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -2059,8 +2059,9 @@ The commands accepted in replication mode are: the switch position is the end of the WAL that was streamed, but there are corner cases where the server can send some WAL from the old timeline that it has not itself replayed before promoting. Finally, the - server sends CommandComplete message, and is ready to accept a new - command. + server sends two CommandComplete messages (one that ends the CopyData + and the other ends the START_REPLICATION itself), and + is ready to accept a new command. @@ -2382,7 +2383,8 @@ The commands accepted in replication mode are: The messages inside the CopyBothResponse messages are of the same format - documented for START_REPLICATION ... PHYSICAL. + documented for START_REPLICATION ... PHYSICAL, including + two CommandComplete messages. @@ -2587,7 +2589,7 @@ The commands accepted in replication mode are: and sent along with the backup. The manifest is a list of every file present in the backup with the exception of any WAL files that may be included. It also stores the size, last modification time, and - an optional checksum for each file. + optionally a checksum for each file. A value of force-encode forces all filenames to be hex-encoded; otherwise, this type of encoding is performed only for files whose names are non-UTF8 octet sequences. @@ -2603,7 +2605,7 @@ The commands accepted in replication mode are: MANIFEST_CHECKSUMS checksum_algorithm - Specifies the algorithm that should be applied to each file included + Specifies the checksum algorithm that should be applied to each file included in the backup manifest. Currently, the available algorithms are NONE, CRC32C, SHA224, SHA256, @@ -2837,7 +2839,7 @@ The commands accepted in replication mode are: Every sent transaction contains zero or more DML messages (Insert, Update, Delete). In case of a cascaded setup it can also contain Origin - messages. The origin message indicated that the transaction originated on + messages. The origin message indicates that the transaction originated on different replication node. Since a replication node in the scope of logical replication protocol can be pretty much anything, the only identifier is the origin name. It's downstream's responsibility to handle this as @@ -3236,7 +3238,7 @@ AuthenticationGSS (B) -AuthenticationSSPI (B) +AuthenticationGSSContinue (B) @@ -3254,7 +3256,7 @@ AuthenticationSSPI (B) - Int32(8) + Int32 @@ -3264,11 +3266,21 @@ AuthenticationSSPI (B) - Int32(9) + Int32(8) - Specifies that SSPI authentication is required. + Specifies that this message contains GSSAPI or SSPI data. + + + + + + Byten + + + + GSSAPI or SSPI authentication data. @@ -3281,7 +3293,7 @@ AuthenticationSSPI (B) -AuthenticationGSSContinue (B) +AuthenticationSSPI (B) @@ -3298,32 +3310,22 @@ AuthenticationGSSContinue (B) - - Int32 - - - - Length of message contents in bytes, including self. - - - - Int32(8) - Specifies that this message contains GSSAPI or SSPI data. + Length of message contents in bytes, including self. - Byten + Int32(9) - GSSAPI or SSPI authentication data. + Specifies that SSPI authentication is required. diff --git a/doc/src/sgml/queries.sgml b/doc/src/sgml/queries.sgml index 67ca71e56490b..f06afe2c3fb1d 100644 --- a/doc/src/sgml/queries.sgml +++ b/doc/src/sgml/queries.sgml @@ -24,7 +24,7 @@ The process of retrieving or the command to retrieve data from a database is called a query. In SQL the - command is + SELECT command is used to specify queries. The general syntax of the SELECT command is @@ -762,7 +762,8 @@ SELECT * FROM vw_getfoo; In some cases it is useful to define table functions that can return different column sets depending on how they are invoked. To support this, the table function can be declared as returning - the pseudo-type record. When such a function is used in + the pseudo-type record with no OUT + parameters. When such a function is used in a query, the expected row structure must be specified in the query itself, so that the system can know how to parse and plan the query. This syntax looks like: @@ -803,6 +804,33 @@ SELECT * that the parser knows, for example, what * should expand to. + + + This example uses ROWS FROM: + +SELECT * +FROM ROWS FROM + ( + json_to_recordset('[{"a":40,"b":"foo"},{"a":"100","b":"bar"}]') + AS (a INTEGER, b TEXT), + generate_series(1, 3) + ) AS x (p, q, s) +ORDER BY p; + + p | q | s +-----+-----+--- + 40 | foo | 1 + 100 | bar | 2 + | | 3 + + It joins two functions into a single FROM + target. json_to_recordset() is instructed + to return two columns, the first integer + and the second text. The result of + generate_series() is used directly. + The ORDER BY clause sorts the column values + as integers. + @@ -1496,21 +1524,25 @@ SELECT a AS value, b + c AS sum FROM ... - The AS keyword is optional, but only if the new column - name does not match any - PostgreSQL keyword (see ). To avoid an accidental match to - a keyword, you can double-quote the column name. For example, - VALUE is a keyword, so this does not work: + The AS key word is usually optional, but in some + cases where the desired column name matches a + PostgreSQL key word, you must write + AS or double-quote the column name in order to + avoid ambiguity. + ( shows which key words + require AS to be used as a column label.) + For example, FROM is one such key word, so this + does not work: -SELECT a value, b + c AS sum FROM ... +SELECT a from, b + c AS sum FROM ... - but this does: + but either of these do: -SELECT a "value", b + c AS sum FROM ... +SELECT a AS from, b + c AS sum FROM ... +SELECT a "from", b + c AS sum FROM ... - For protection against possible - future keyword additions, it is recommended that you always either + For greatest safety against possible + future key word additions, it is recommended that you always either write AS or double-quote the output column name. @@ -1979,6 +2011,10 @@ GROUP BY region, product; but we'd have needed two levels of nested sub-SELECTs. It's a bit easier to follow this way. + + + + Recursive Queries @@ -2082,6 +2118,120 @@ GROUP BY sub_part + + Search Order + + + When computing a tree traversal using a recursive query, you might want to + order the results in either depth-first or breadth-first order. This can + be done by computing an ordering column alongside the other data columns + and using that to sort the results at the end. Note that this does not + actually control in which order the query evaluation visits the rows; that + is as always in SQL implementation-dependent. This approach merely + provides a convenient way to order the results afterwards. + + + + To create a depth-first order, we compute for each result row an array of + rows that we have visited so far. For example, consider the following + query that searches a table tree using a + link field: + + +WITH RECURSIVE search_tree(id, link, data) AS ( + SELECT t.id, t.link, t.data + FROM tree t + UNION ALL + SELECT t.id, t.link, t.data + FROM tree t, search_tree st + WHERE t.id = st.link +) +SELECT * FROM search_tree; + + + To add depth-first ordering information, you can write this: + + +WITH RECURSIVE search_tree(id, link, data, path) AS ( + SELECT t.id, t.link, t.data, ARRAY[t.id] + FROM tree t + UNION ALL + SELECT t.id, t.link, t.data, path || t.id + FROM tree t, search_tree st + WHERE t.id = st.link +) +SELECT * FROM search_tree ORDER BY path; + + + + + In the general case where more than one field needs to be used to identify + a row, use an array of rows. For example, if we needed to track fields + f1 and f2: + + +WITH RECURSIVE search_tree(id, link, data, path) AS ( + SELECT t.id, t.link, t.data, ARRAY[ROW(t.f1, t.f2)] + FROM tree t + UNION ALL + SELECT t.id, t.link, t.data, path || ROW(t.f1, t.f2) + FROM tree t, search_tree st + WHERE t.id = st.link +) +SELECT * FROM search_tree ORDER BY path; + + + + + + The queries shown in this and the following section involving + ROW constructors in the target list only support + UNION ALL (not plain UNION) in the + current implementation. + + + + + + Omit the ROW() syntax in the common case where only one + field needs to be tracked. This allows a simple array rather than a + composite-type array to be used, gaining efficiency. + + + + + To create a breadth-first order, you can add a column that tracks the depth + of the search, for example: + + +WITH RECURSIVE search_tree(id, link, data, depth) AS ( + SELECT t.id, t.link, t.data, 0 + FROM tree t + UNION ALL + SELECT t.id, t.link, t.data, depth + 1 + FROM tree t, search_tree st + WHERE t.id = st.link +) +SELECT * FROM search_tree ORDER BY depth; + + + To get a stable sort, add data columns as secondary sorting columns. + + + + + The recursive query evaluation algorithm produces its output in + breadth-first search order. However, this is an implementation detail and + it is perhaps unsound to rely on it. The order of the rows within each + level is certainly undefined, so some explicit ordering might be desired + in any case. + + + + + + Cycle Detection + When working with recursive queries it is important to be sure that the recursive part of the query will eventually return no tuples, @@ -2091,13 +2241,13 @@ GROUP BY sub_part cycle does not involve output rows that are completely duplicate: it may be necessary to check just one or a few fields to see if the same point has been reached before. The standard method for handling such situations is - to compute an array of the already-visited values. For example, consider + to compute an array of the already-visited values. For example, consider again the following query that searches a table graph using a link field: WITH RECURSIVE search_graph(id, link, data, depth) AS ( - SELECT g.id, g.link, g.data, 1 + SELECT g.id, g.link, g.data, 0 FROM graph g UNION ALL SELECT g.id, g.link, g.data, sg.depth + 1 @@ -2112,20 +2262,20 @@ SELECT * FROM search_graph; UNION ALL to UNION would not eliminate the looping. Instead we need to recognize whether we have reached the same row again while following a particular path of links. We add two columns - path and cycle to the loop-prone query: + is_cycle and path to the loop-prone query: -WITH RECURSIVE search_graph(id, link, data, depth, path, cycle) AS ( - SELECT g.id, g.link, g.data, 1, - ARRAY[g.id], - false +WITH RECURSIVE search_graph(id, link, data, depth, is_cycle, path) AS ( + SELECT g.id, g.link, g.data, 0, + false, + ARRAY[g.id] FROM graph g UNION ALL SELECT g.id, g.link, g.data, sg.depth + 1, - path || g.id, - g.id = ANY(path) + g.id = ANY(path), + path || g.id FROM graph g, search_graph sg - WHERE g.id = sg.link AND NOT cycle + WHERE g.id = sg.link AND NOT is_cycle ) SELECT * FROM search_graph; @@ -2140,17 +2290,17 @@ SELECT * FROM search_graph; compare fields f1 and f2: -WITH RECURSIVE search_graph(id, link, data, depth, path, cycle) AS ( - SELECT g.id, g.link, g.data, 1, - ARRAY[ROW(g.f1, g.f2)], - false +WITH RECURSIVE search_graph(id, link, data, depth, is_cycle, path) AS ( + SELECT g.id, g.link, g.data, 0, + false, + ARRAY[ROW(g.f1, g.f2)] FROM graph g UNION ALL SELECT g.id, g.link, g.data, sg.depth + 1, - path || ROW(g.f1, g.f2), - ROW(g.f1, g.f2) = ANY(path) + ROW(g.f1, g.f2) = ANY(path), + path || ROW(g.f1, g.f2) FROM graph g, search_graph sg - WHERE g.id = sg.link AND NOT cycle + WHERE g.id = sg.link AND NOT is_cycle ) SELECT * FROM search_graph; @@ -2166,10 +2316,8 @@ SELECT * FROM search_graph; - The recursive query evaluation algorithm produces its output in - breadth-first search order. You can display the results in depth-first - search order by making the outer query ORDER BY a - path column constructed in this way. + The cycle path column is computed in the same way as the depth-first + ordering column show in the previous section. @@ -2185,7 +2333,7 @@ WITH RECURSIVE t(n) AS ( UNION ALL SELECT n+1 FROM t ) -SELECT n FROM t LIMIT 100; +SELECT n FROM t LIMIT 100; This works because PostgreSQL's implementation @@ -2197,6 +2345,11 @@ SELECT n FROM t LIMIT 100; outer query will usually try to fetch all of the WITH query's output anyway. + + + + + Common Table Expression Materialization A useful property of WITH queries is that they are diff --git a/doc/src/sgml/ref/abort.sgml b/doc/src/sgml/ref/abort.sgml index 0372913365167..16b5602487d7d 100644 --- a/doc/src/sgml/ref/abort.sgml +++ b/doc/src/sgml/ref/abort.sgml @@ -33,7 +33,7 @@ ABORT [ WORK | TRANSACTION ] [ AND [ NO ] CHAIN ] all the updates made by the transaction to be discarded. This command is identical in behavior to the standard SQL command - , + ROLLBACK, and is present only for historical reasons. @@ -57,8 +57,8 @@ ABORT [ WORK | TRANSACTION ] [ AND [ NO ] CHAIN ] If AND CHAIN is specified, a new transaction is - immediately started with the same transaction characteristics (see ) as the just finished one. Otherwise, + immediately started with the same transaction characteristics (see SET TRANSACTION) as the just finished one. Otherwise, no new transaction is started. @@ -70,7 +70,7 @@ ABORT [ WORK | TRANSACTION ] [ AND [ NO ] CHAIN ] Notes - Use to + Use COMMIT to successfully terminate a transaction. diff --git a/doc/src/sgml/ref/alter_aggregate.sgml b/doc/src/sgml/ref/alter_aggregate.sgml index 2ad3e0440bf87..aee10a5ca2e05 100644 --- a/doc/src/sgml/ref/alter_aggregate.sgml +++ b/doc/src/sgml/ref/alter_aggregate.sgml @@ -23,7 +23,7 @@ PostgreSQL documentation ALTER AGGREGATE name ( aggregate_signature ) RENAME TO new_name ALTER AGGREGATE name ( aggregate_signature ) - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER AGGREGATE name ( aggregate_signature ) SET SCHEMA new_schema where aggregate_signature is: @@ -142,7 +142,7 @@ ALTER AGGREGATE name ( aggregate_signatu The recommended syntax for referencing an ordered-set aggregate is to write ORDER BY between the direct and aggregated argument specifications, in the same style as in - . However, it will also work to + CREATE AGGREGATE. However, it will also work to omit ORDER BY and just run the direct and aggregated argument specifications into a single list. In this abbreviated form, if VARIADIC "any" was used in both the direct and diff --git a/doc/src/sgml/ref/alter_collation.sgml b/doc/src/sgml/ref/alter_collation.sgml index bee6f0dd3ca1d..af9ff2867b722 100644 --- a/doc/src/sgml/ref/alter_collation.sgml +++ b/doc/src/sgml/ref/alter_collation.sgml @@ -24,7 +24,7 @@ PostgreSQL documentation ALTER COLLATION name REFRESH VERSION ALTER COLLATION name RENAME TO new_name -ALTER COLLATION name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER COLLATION name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER COLLATION name SET SCHEMA new_schema diff --git a/doc/src/sgml/ref/alter_conversion.sgml b/doc/src/sgml/ref/alter_conversion.sgml index c42bd8b3e4043..a128f20f3e8a8 100644 --- a/doc/src/sgml/ref/alter_conversion.sgml +++ b/doc/src/sgml/ref/alter_conversion.sgml @@ -22,7 +22,7 @@ PostgreSQL documentation ALTER CONVERSION name RENAME TO new_name -ALTER CONVERSION name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER CONVERSION name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER CONVERSION name SET SCHEMA new_schema diff --git a/doc/src/sgml/ref/alter_database.sgml b/doc/src/sgml/ref/alter_database.sgml index 7db878cf532ce..81e37536a3f66 100644 --- a/doc/src/sgml/ref/alter_database.sgml +++ b/doc/src/sgml/ref/alter_database.sgml @@ -31,7 +31,7 @@ ALTER DATABASE name [ [ WITH ] name RENAME TO new_name -ALTER DATABASE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER DATABASE name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER DATABASE name SET TABLESPACE new_tablespace diff --git a/doc/src/sgml/ref/alter_domain.sgml b/doc/src/sgml/ref/alter_domain.sgml index 8201cbb65fcd4..2db53725139ca 100644 --- a/doc/src/sgml/ref/alter_domain.sgml +++ b/doc/src/sgml/ref/alter_domain.sgml @@ -36,7 +36,7 @@ ALTER DOMAIN name ALTER DOMAIN name VALIDATE CONSTRAINT constraint_name ALTER DOMAIN name - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER DOMAIN name RENAME TO new_name ALTER DOMAIN name @@ -80,7 +80,7 @@ ALTER DOMAIN name This form adds a new constraint to a domain using the same syntax as - . + CREATE DOMAIN. When a new constraint is added to a domain, all columns using that domain will be checked against the newly added constraint. These checks can be suppressed by adding the new constraint using the diff --git a/doc/src/sgml/ref/alter_event_trigger.sgml b/doc/src/sgml/ref/alter_event_trigger.sgml index 61919f7845dbc..ef5253bf37eb3 100644 --- a/doc/src/sgml/ref/alter_event_trigger.sgml +++ b/doc/src/sgml/ref/alter_event_trigger.sgml @@ -23,7 +23,7 @@ PostgreSQL documentation ALTER EVENT TRIGGER name DISABLE ALTER EVENT TRIGGER name ENABLE [ REPLICA | ALWAYS ] -ALTER EVENT TRIGGER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER EVENT TRIGGER name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER EVENT TRIGGER name RENAME TO new_name diff --git a/doc/src/sgml/ref/alter_extension.sgml b/doc/src/sgml/ref/alter_extension.sgml index a2d405d6cdfb5..38fd60128b783 100644 --- a/doc/src/sgml/ref/alter_extension.sgml +++ b/doc/src/sgml/ref/alter_extension.sgml @@ -212,11 +212,12 @@ ALTER EXTENSION name DROP IN, OUT, INOUT, or VARIADIC. If omitted, the default is IN. - Note that ALTER EXTENSION does not actually pay - any attention to OUT arguments, since only the input - arguments are needed to determine the function's identity. - So it is sufficient to list the IN, INOUT, - and VARIADIC arguments. + Note that ALTER EXTENSION does not actually pay any + attention to OUT arguments for functions and + aggregates (but not procedures), since only the input arguments are + needed to determine the function's identity. So it is sufficient to + list the IN, INOUT, and + VARIADIC arguments for functions and aggregates. @@ -251,7 +252,7 @@ ALTER EXTENSION name DROP The data type(s) of the operator's arguments (optionally schema-qualified). Write NONE for the missing argument - of a prefix or postfix operator. + of a prefix operator. diff --git a/doc/src/sgml/ref/alter_foreign_data_wrapper.sgml b/doc/src/sgml/ref/alter_foreign_data_wrapper.sgml index 14f3d616e71c9..54f34c2c01516 100644 --- a/doc/src/sgml/ref/alter_foreign_data_wrapper.sgml +++ b/doc/src/sgml/ref/alter_foreign_data_wrapper.sgml @@ -25,7 +25,7 @@ ALTER FOREIGN DATA WRAPPER name [ HANDLER handler_function | NO HANDLER ] [ VALIDATOR validator_function | NO VALIDATOR ] [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) ] -ALTER FOREIGN DATA WRAPPER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER FOREIGN DATA WRAPPER name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER FOREIGN DATA WRAPPER name RENAME TO new_name diff --git a/doc/src/sgml/ref/alter_foreign_table.sgml b/doc/src/sgml/ref/alter_foreign_table.sgml index 0f11897c99770..7ca03f3ac9f11 100644 --- a/doc/src/sgml/ref/alter_foreign_table.sgml +++ b/doc/src/sgml/ref/alter_foreign_table.sgml @@ -53,7 +53,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] nameparent_table NO INHERIT parent_table - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ]) @@ -71,7 +71,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form adds a new column to the foreign table, using the same syntax as - . + CREATE FOREIGN TABLE. Unlike the case when adding a column to a regular table, nothing happens to the underlying storage: this action simply declares that some new column is now accessible through the foreign table. @@ -133,8 +133,8 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form sets the per-column statistics-gathering target for subsequent - operations. - See the similar form of + ANALYZE operations. + See the similar form of ALTER TABLE for more details. @@ -146,7 +146,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form sets or resets per-attribute options. - See the similar form of + See the similar form of ALTER TABLE for more details. @@ -159,7 +159,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form sets the storage mode for a column. - See the similar form of + See the similar form of ALTER TABLE for more details. Note that the storage mode has no effect unless the table's foreign-data wrapper chooses to pay attention to it. @@ -172,7 +172,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form adds a new constraint to a foreign table, using the same - syntax as . + syntax as CREATE FOREIGN TABLE. Currently only CHECK constraints are supported. @@ -181,7 +181,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name.) + in CREATE FOREIGN TABLE.) If the constraint is marked NOT VALID, then it isn't assumed to hold, but is only recorded for possible future use. @@ -216,7 +216,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name These forms configure the firing of trigger(s) belonging to the foreign - table. See the similar form of for more + table. See the similar form of ALTER TABLE for more details. @@ -239,7 +239,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name This form adds the target foreign table as a new child of the specified parent table. - See the similar form of + See the similar form of ALTER TABLE for more details. @@ -503,7 +503,7 @@ ALTER FOREIGN TABLE [ IF EXISTS ] name - Refer to for a further description of valid + Refer to CREATE FOREIGN TABLE for a further description of valid parameters. diff --git a/doc/src/sgml/ref/alter_function.sgml b/doc/src/sgml/ref/alter_function.sgml index 70b1f24bc003a..54e61e7d78858 100644 --- a/doc/src/sgml/ref/alter_function.sgml +++ b/doc/src/sgml/ref/alter_function.sgml @@ -26,7 +26,7 @@ ALTER FUNCTION name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] RENAME TO new_name ALTER FUNCTION name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER FUNCTION name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] SET SCHEMA new_schema ALTER FUNCTION name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] diff --git a/doc/src/sgml/ref/alter_group.sgml b/doc/src/sgml/ref/alter_group.sgml index 39cc2b88cfa20..fa4a8df912497 100644 --- a/doc/src/sgml/ref/alter_group.sgml +++ b/doc/src/sgml/ref/alter_group.sgml @@ -27,6 +27,7 @@ ALTER GROUP role_specification DROP where role_specification can be: role_name + | CURRENT_ROLE | CURRENT_USER | SESSION_USER @@ -50,14 +51,14 @@ ALTER GROUP group_name RENAME TO group for this purpose.) These variants are effectively equivalent to granting or revoking membership in the role named as the group; so the preferred way to do this is to use - or - . + GRANT or + REVOKE. The third variant changes the name of the group. This is exactly equivalent to renaming the role with - . + ALTER ROLE. diff --git a/doc/src/sgml/ref/alter_index.sgml b/doc/src/sgml/ref/alter_index.sgml index a5e3b06ee4932..793119d2fc1a5 100644 --- a/doc/src/sgml/ref/alter_index.sgml +++ b/doc/src/sgml/ref/alter_index.sgml @@ -81,7 +81,7 @@ ALTER INDEX ALL IN TABLESPACE name this command, use ALTER DATABASE or explicit ALTER INDEX invocations instead if desired. See also - . + CREATE TABLESPACE. @@ -118,11 +118,11 @@ ALTER INDEX ALL IN TABLESPACE name This form changes one or more index-method-specific storage parameters for the index. See - + CREATE INDEX for details on the available parameters. Note that the index contents will not be modified immediately by this command; depending on the parameter you might need to rebuild the index with - + REINDEX to get the desired effects. @@ -144,7 +144,7 @@ ALTER INDEX ALL IN TABLESPACE name This form sets the per-column statistics-gathering target for - subsequent operations, though can + subsequent ANALYZE operations, though can be used only on index columns that are defined as an expression. Since expressions lack a unique name, we refer to them using the ordinal number of the index column. @@ -252,7 +252,7 @@ ALTER INDEX ALL IN TABLESPACE name These operations are also possible using - . + ALTER TABLE. ALTER INDEX is in fact just an alias for the forms of ALTER TABLE that apply to indexes. diff --git a/doc/src/sgml/ref/alter_language.sgml b/doc/src/sgml/ref/alter_language.sgml index eac63dec1322b..0b61c18aee369 100644 --- a/doc/src/sgml/ref/alter_language.sgml +++ b/doc/src/sgml/ref/alter_language.sgml @@ -22,7 +22,7 @@ PostgreSQL documentation ALTER [ PROCEDURAL ] LANGUAGE name RENAME TO new_name -ALTER [ PROCEDURAL ] LANGUAGE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER [ PROCEDURAL ] LANGUAGE name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } diff --git a/doc/src/sgml/ref/alter_large_object.sgml b/doc/src/sgml/ref/alter_large_object.sgml index 356f8a8eabf4d..17ea1491ba379 100644 --- a/doc/src/sgml/ref/alter_large_object.sgml +++ b/doc/src/sgml/ref/alter_large_object.sgml @@ -21,7 +21,7 @@ PostgreSQL documentation -ALTER LARGE OBJECT large_object_oid OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER LARGE OBJECT large_object_oid OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } diff --git a/doc/src/sgml/ref/alter_materialized_view.sgml b/doc/src/sgml/ref/alter_materialized_view.sgml index 7321183dd0db8..bf379db77e38d 100644 --- a/doc/src/sgml/ref/alter_materialized_view.sgml +++ b/doc/src/sgml/ref/alter_materialized_view.sgml @@ -44,7 +44,7 @@ ALTER MATERIALIZED VIEW ALL IN TABLESPACE namestorage_parameter [= value] [, ... ] ) RESET ( storage_parameter [, ... ] ) - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } @@ -72,7 +72,8 @@ ALTER MATERIALIZED VIEW ALL IN TABLESPACE nameALTER MATERIALIZED VIEW are a subset of those available for ALTER TABLE, and have the same meaning when used for - materialized views. See the descriptions for + materialized views. See the descriptions for + ALTER TABLE for details. diff --git a/doc/src/sgml/ref/alter_opclass.sgml b/doc/src/sgml/ref/alter_opclass.sgml index 59a64caa4fadc..b1db459b113c3 100644 --- a/doc/src/sgml/ref/alter_opclass.sgml +++ b/doc/src/sgml/ref/alter_opclass.sgml @@ -25,7 +25,7 @@ ALTER OPERATOR CLASS name USING index_method - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER OPERATOR CLASS name USING index_method SET SCHEMA new_schema diff --git a/doc/src/sgml/ref/alter_operator.sgml b/doc/src/sgml/ref/alter_operator.sgml index b3bfa9ccbe97e..ad90c137f1491 100644 --- a/doc/src/sgml/ref/alter_operator.sgml +++ b/doc/src/sgml/ref/alter_operator.sgml @@ -21,13 +21,13 @@ PostgreSQL documentation -ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } ) - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER OPERATOR name ( { left_type | NONE } , right_type ) + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } -ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } ) +ALTER OPERATOR name ( { left_type | NONE } , right_type ) SET SCHEMA new_schema -ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } ) +ALTER OPERATOR name ( { left_type | NONE } , right_type ) SET ( { RESTRICT = { res_proc | NONE } | JOIN = { join_proc | NONE } } [, ... ] ) @@ -79,8 +79,7 @@ ALTER OPERATOR name ( { left_typeright_type - The data type of the operator's right operand; write - NONE if the operator has no right operand. + The data type of the operator's right operand. diff --git a/doc/src/sgml/ref/alter_opfamily.sgml b/doc/src/sgml/ref/alter_opfamily.sgml index 4ac1cca95a3f2..b3b5d61a852e6 100644 --- a/doc/src/sgml/ref/alter_opfamily.sgml +++ b/doc/src/sgml/ref/alter_opfamily.sgml @@ -37,7 +37,7 @@ ALTER OPERATOR FAMILY name USING index_method - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER OPERATOR FAMILY name USING index_method SET SCHEMA new_schema @@ -141,7 +141,7 @@ ALTER OPERATOR FAMILY name USING name ON table_name RENAME TO new_name ALTER POLICY name ON table_name - [ TO { role_name | PUBLIC | CURRENT_USER | SESSION_USER } [, ...] ] + [ TO { role_name | PUBLIC | CURRENT_ROLE | CURRENT_USER | SESSION_USER } [, ...] ] [ USING ( using_expression ) ] [ WITH CHECK ( check_expression ) ] diff --git a/doc/src/sgml/ref/alter_procedure.sgml b/doc/src/sgml/ref/alter_procedure.sgml index dae80076d953d..5c176fb5d8760 100644 --- a/doc/src/sgml/ref/alter_procedure.sgml +++ b/doc/src/sgml/ref/alter_procedure.sgml @@ -26,7 +26,7 @@ ALTER PROCEDURE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] RENAME TO new_name ALTER PROCEDURE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER PROCEDURE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] SET SCHEMA new_schema ALTER PROCEDURE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] @@ -81,8 +81,9 @@ ALTER PROCEDURE name [ ( [ [ name ADD TABLE [ ALTER PUBLICATION name SET TABLE [ ONLY ] table_name [ * ] [, ...] ALTER PUBLICATION name DROP TABLE [ ONLY ] table_name [ * ] [, ...] ALTER PUBLICATION name SET ( publication_parameter [= value] [, ... ] ) -ALTER PUBLICATION name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER PUBLICATION name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER PUBLICATION name RENAME TO new_name diff --git a/doc/src/sgml/ref/alter_role.sgml b/doc/src/sgml/ref/alter_role.sgml index dbf258ef50d03..aef30521bcc57 100644 --- a/doc/src/sgml/ref/alter_role.sgml +++ b/doc/src/sgml/ref/alter_role.sgml @@ -46,6 +46,7 @@ ALTER ROLE { role_specification | A where role_specification can be: role_name + | CURRENT_ROLE | CURRENT_USER | SESSION_USER @@ -62,11 +63,11 @@ ALTER ROLE { role_specification | A The first variant of this command listed in the synopsis can change many of the role attributes that can be specified in - . + CREATE ROLE. (All the possible attributes are covered, except that there are no options for adding or removing memberships; use - and - for that.) + GRANT and + REVOKE for that.) Attributes not mentioned in the command retain their previous settings. Database superusers can change any of these settings for any role. Roles having CREATEROLE privilege can change any of these @@ -102,8 +103,8 @@ ALTER ROLE { role_specification | A default, overriding whatever setting is present in postgresql.conf or has been received from the postgres command line. This only happens at login time; executing - or - does not cause new + SET ROLE or + SET SESSION AUTHORIZATION does not cause new configuration values to be set. Settings set for all databases are overridden by database-specific settings attached to a role. Settings for specific databases or specific roles override @@ -134,6 +135,7 @@ ALTER ROLE { role_specification | A + CURRENT_ROLE CURRENT_USER @@ -174,7 +176,7 @@ ALTER ROLE { role_specification | A These clauses alter attributes originally set by - . For more information, see the + CREATE ROLE. For more information, see the CREATE ROLE reference page. @@ -218,8 +220,8 @@ ALTER ROLE { role_specification | A Role-specific variable settings take effect only at login; - and - + SET ROLE and + SET SESSION AUTHORIZATION do not process role-specific variable settings. @@ -237,14 +239,14 @@ ALTER ROLE { role_specification | A Notes - Use - to add new roles, and to remove a role. + Use CREATE ROLE + to add new roles, and DROP ROLE to remove a role. ALTER ROLE cannot change a role's memberships. - Use and - + Use GRANT and + REVOKE to do that. diff --git a/doc/src/sgml/ref/alter_routine.sgml b/doc/src/sgml/ref/alter_routine.sgml index d1699691e10f2..36acaff3198d6 100644 --- a/doc/src/sgml/ref/alter_routine.sgml +++ b/doc/src/sgml/ref/alter_routine.sgml @@ -26,7 +26,7 @@ ALTER ROUTINE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] RENAME TO new_name ALTER ROUTINE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER ROUTINE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] SET SCHEMA new_schema ALTER ROUTINE name [ ( [ [ argmode ] [ argname ] argtype [, ...] ] ) ] diff --git a/doc/src/sgml/ref/alter_schema.sgml b/doc/src/sgml/ref/alter_schema.sgml index 2937214026ec9..04624c5a5eb0f 100644 --- a/doc/src/sgml/ref/alter_schema.sgml +++ b/doc/src/sgml/ref/alter_schema.sgml @@ -22,7 +22,7 @@ PostgreSQL documentation ALTER SCHEMA name RENAME TO new_name -ALTER SCHEMA name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER SCHEMA name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } diff --git a/doc/src/sgml/ref/alter_sequence.sgml b/doc/src/sgml/ref/alter_sequence.sgml index bfd20af6d3d5a..3cd9ece49f223 100644 --- a/doc/src/sgml/ref/alter_sequence.sgml +++ b/doc/src/sgml/ref/alter_sequence.sgml @@ -31,7 +31,7 @@ ALTER SEQUENCE [ IF EXISTS ] name [ RESTART [ [ WITH ] restart ] ] [ CACHE cache ] [ [ NO ] CYCLE ] [ OWNED BY { table_name.column_name | NONE } ] -ALTER SEQUENCE [ IF EXISTS ] name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER SEQUENCE [ IF EXISTS ] name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER SEQUENCE [ IF EXISTS ] name RENAME TO new_name ALTER SEQUENCE [ IF EXISTS ] name SET SCHEMA new_schema diff --git a/doc/src/sgml/ref/alter_server.sgml b/doc/src/sgml/ref/alter_server.sgml index 17e55b093e93e..186f38b5f82e6 100644 --- a/doc/src/sgml/ref/alter_server.sgml +++ b/doc/src/sgml/ref/alter_server.sgml @@ -23,7 +23,7 @@ PostgreSQL documentation ALTER SERVER name [ VERSION 'new_version' ] [ OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) ] -ALTER SERVER name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER SERVER name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER SERVER name RENAME TO new_name diff --git a/doc/src/sgml/ref/alter_statistics.sgml b/doc/src/sgml/ref/alter_statistics.sgml index be4c3f1f0576e..ce6cdf2bb1ecc 100644 --- a/doc/src/sgml/ref/alter_statistics.sgml +++ b/doc/src/sgml/ref/alter_statistics.sgml @@ -23,7 +23,7 @@ PostgreSQL documentation -ALTER STATISTICS name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER STATISTICS name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER STATISTICS name RENAME TO new_name ALTER STATISTICS name SET SCHEMA new_schema ALTER STATISTICS name SET STATISTICS new_target @@ -99,9 +99,10 @@ ALTER STATISTICS name SET STATISTIC The statistic-gathering target for this statistics object for subsequent - operations. + ANALYZE operations. The target can be set in the range 0 to 10000; alternatively, set it - to -1 to revert to using the system default statistics + to -1 to revert to using the maximum of the statistics target of the + referenced columns, if set, or the system default statistics target (). For more information on the use of statistics by the PostgreSQL query planner, refer to diff --git a/doc/src/sgml/ref/alter_subscription.sgml b/doc/src/sgml/ref/alter_subscription.sgml index a1666b370be9d..db5e59f707c6a 100644 --- a/doc/src/sgml/ref/alter_subscription.sgml +++ b/doc/src/sgml/ref/alter_subscription.sgml @@ -27,7 +27,7 @@ ALTER SUBSCRIPTION name REFRESH PUB ALTER SUBSCRIPTION name ENABLE ALTER SUBSCRIPTION name DISABLE ALTER SUBSCRIPTION name SET ( subscription_parameter [= value] [, ... ] ) -ALTER SUBSCRIPTION name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER SUBSCRIPTION name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER SUBSCRIPTION name RENAME TO new_name diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml index c1576cc69628e..c25ef5abd6ade 100644 --- a/doc/src/sgml/ref/alter_table.sgml +++ b/doc/src/sgml/ref/alter_table.sgml @@ -82,7 +82,7 @@ ALTER TABLE [ IF EXISTS ] name NO INHERIT parent_table OF type_name NOT OF - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } REPLICA IDENTITY { DEFAULT | USING INDEX index_name | FULL | NOTHING } and partition_bound_spec is: @@ -152,7 +152,7 @@ WITH ( MODULUS numeric_literal, REM This form adds a new column to the table, using the same syntax as - . If IF NOT EXISTS + CREATE TABLE. If IF NOT EXISTS is specified and a column already exists with this name, no error is thrown. @@ -268,7 +268,7 @@ WITH ( MODULUS numeric_literal, REM These forms change whether a column is an identity column or change the generation attribute of an existing identity column. - See for details. + See CREATE TABLE for details. Like SET DEFAULT, these forms only affect the behavior of subsequent INSERT and UPDATE commands; they do not cause rows @@ -290,7 +290,7 @@ WITH ( MODULUS numeric_literal, REM These forms alter the sequence that underlies an existing identity column. sequence_option is an option - supported by such + supported by ALTER SEQUENCE such as INCREMENT BY. @@ -302,7 +302,7 @@ WITH ( MODULUS numeric_literal, REM This form sets the per-column statistics-gathering target for subsequent - operations. + ANALYZE operations. The target can be set in the range 0 to 10000; alternatively, set it to -1 to revert to using the system default statistics target (). @@ -326,7 +326,7 @@ WITH ( MODULUS numeric_literal, REM defined per-attribute options are n_distinct and n_distinct_inherited, which override the number-of-distinct-values estimates made by subsequent - + ANALYZE operations. n_distinct affects the statistics for the table itself, while n_distinct_inherited affects the statistics gathered for the table plus its inheritance children. When set to a @@ -388,7 +388,7 @@ WITH ( MODULUS numeric_literal, REM This form adds a new constraint to a table using the same constraint - syntax as , plus the option NOT + syntax as CREATE TABLE, plus the option NOT VALID, which is currently only allowed for foreign key and CHECK constraints. @@ -422,7 +422,7 @@ WITH ( MODULUS numeric_literal, REM Additional restrictions apply when unique or primary key constraints - are added to partitioned tables; see . + are added to partitioned tables; see CREATE TABLE. Also, foreign key constraints on partitioned tables may not be declared NOT VALID at present. @@ -598,7 +598,7 @@ WITH ( MODULUS numeric_literal, REM even if row level security is disabled. In this case, the policies will not be applied and the policies will be ignored. See also - . + CREATE POLICY. @@ -613,7 +613,7 @@ WITH ( MODULUS numeric_literal, REM disabled (the default) then row level security will not be applied when the user is the table owner. See also - . + CREATE POLICY. @@ -623,7 +623,7 @@ WITH ( MODULUS numeric_literal, REM This form selects the default index for future - + CLUSTER operations. It does not actually re-cluster the table. @@ -637,7 +637,7 @@ WITH ( MODULUS numeric_literal, REM This form removes the most recently used - + CLUSTER index specification from the table. This affects future cluster operations that don't specify an index. @@ -669,7 +669,7 @@ WITH ( MODULUS numeric_literal, REM When applied to a partitioned table, nothing is moved, but any partitions created afterwards with CREATE TABLE PARTITION OF will use that tablespace, - unless the TABLESPACE clause is used to override it. + unless overridden by a TABLESPACE clause. @@ -685,7 +685,7 @@ WITH ( MODULUS numeric_literal, REM information_schema relations are not considered part of the system catalogs and will be moved. See also - . + CREATE TABLESPACE. @@ -707,12 +707,12 @@ WITH ( MODULUS numeric_literal, REM This form changes one or more storage parameters for the table. See in the - documentation + CREATE TABLE documentation for details on the available parameters. Note that the table contents will not be modified immediately by this command; depending on the parameter you might need to rewrite the table to get the desired effects. - That can be done with VACUUM - FULL, or one of the forms + That can be done with VACUUM + FULL, CLUSTER or one of the forms of ALTER TABLE that forces a table rewrite. For planner related parameters, changes will take effect from the next time the table is locked so currently executing queries will not be @@ -878,7 +878,7 @@ WITH ( MODULUS numeric_literal, REM A partition using FOR VALUES uses same syntax for partition_bound_spec as - . The partition bound specification + CREATE TABLE. The partition bound specification must correspond to the partitioning strategy and partition key of the target table. The table to be attached must have all the same columns as the target table and no more; moreover, the column types must also @@ -889,7 +889,7 @@ WITH ( MODULUS numeric_literal, REM from the parent table will be created in the partition, if they don't already exist. If any of the CHECK constraints of the table being - attached is marked NO INHERIT, the command will fail; + attached are marked NO INHERIT, the command will fail; such constraints must be recreated without the NO INHERIT clause. diff --git a/doc/src/sgml/ref/alter_tablespace.sgml b/doc/src/sgml/ref/alter_tablespace.sgml index 356fb9f93f32d..6de80746d5646 100644 --- a/doc/src/sgml/ref/alter_tablespace.sgml +++ b/doc/src/sgml/ref/alter_tablespace.sgml @@ -22,7 +22,7 @@ PostgreSQL documentation ALTER TABLESPACE name RENAME TO new_name -ALTER TABLESPACE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER TABLESPACE name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER TABLESPACE name SET ( tablespace_option = value [, ... ] ) ALTER TABLESPACE name RESET ( tablespace_option [, ... ] ) diff --git a/doc/src/sgml/ref/alter_trigger.sgml b/doc/src/sgml/ref/alter_trigger.sgml index 6d4784c82f195..43a7da4f0bcf3 100644 --- a/doc/src/sgml/ref/alter_trigger.sgml +++ b/doc/src/sgml/ref/alter_trigger.sgml @@ -93,7 +93,7 @@ ALTER TRIGGER name ON The ability to temporarily enable or disable a trigger is provided by - , not by + ALTER TABLE, not by ALTER TRIGGER, because ALTER TRIGGER has no convenient way to express the option of enabling or disabling all of a table's triggers at once. diff --git a/doc/src/sgml/ref/alter_tsconfig.sgml b/doc/src/sgml/ref/alter_tsconfig.sgml index ebe0b94b27e50..8fafcd3bbd828 100644 --- a/doc/src/sgml/ref/alter_tsconfig.sgml +++ b/doc/src/sgml/ref/alter_tsconfig.sgml @@ -32,7 +32,7 @@ ALTER TEXT SEARCH CONFIGURATION name ALTER TEXT SEARCH CONFIGURATION name DROP MAPPING [ IF EXISTS ] FOR token_type [, ... ] ALTER TEXT SEARCH CONFIGURATION name RENAME TO new_name -ALTER TEXT SEARCH CONFIGURATION name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER TEXT SEARCH CONFIGURATION name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER TEXT SEARCH CONFIGURATION name SET SCHEMA new_schema diff --git a/doc/src/sgml/ref/alter_tsdictionary.sgml b/doc/src/sgml/ref/alter_tsdictionary.sgml index b29865e11e929..d1923ef1609fa 100644 --- a/doc/src/sgml/ref/alter_tsdictionary.sgml +++ b/doc/src/sgml/ref/alter_tsdictionary.sgml @@ -25,7 +25,7 @@ ALTER TEXT SEARCH DICTIONARY name ( option [ = value ] [, ... ] ) ALTER TEXT SEARCH DICTIONARY name RENAME TO new_name -ALTER TEXT SEARCH DICTIONARY name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER TEXT SEARCH DICTIONARY name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER TEXT SEARCH DICTIONARY name SET SCHEMA new_schema diff --git a/doc/src/sgml/ref/alter_type.sgml b/doc/src/sgml/ref/alter_type.sgml index f015fcd2689b2..64bf266373d45 100644 --- a/doc/src/sgml/ref/alter_type.sgml +++ b/doc/src/sgml/ref/alter_type.sgml @@ -23,7 +23,7 @@ PostgreSQL documentation -ALTER TYPE name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER TYPE name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER TYPE name RENAME TO new_name ALTER TYPE name SET SCHEMA new_schema ALTER TYPE name RENAME ATTRIBUTE attribute_name TO new_attribute_name [ CASCADE | RESTRICT ] @@ -90,7 +90,7 @@ ALTER TYPE name SET ( This form adds a new attribute to a composite type, using the same syntax as - . + CREATE TYPE. diff --git a/doc/src/sgml/ref/alter_user.sgml b/doc/src/sgml/ref/alter_user.sgml index 6769c8ecc4b7d..0ee89f54c5ce0 100644 --- a/doc/src/sgml/ref/alter_user.sgml +++ b/doc/src/sgml/ref/alter_user.sgml @@ -46,6 +46,7 @@ ALTER USER { role_specification | A where role_specification can be: role_name + | CURRENT_ROLE | CURRENT_USER | SESSION_USER @@ -56,7 +57,7 @@ ALTER USER { role_specification | A ALTER USER is now an alias for - . + ALTER ROLE. diff --git a/doc/src/sgml/ref/alter_user_mapping.sgml b/doc/src/sgml/ref/alter_user_mapping.sgml index 7a9b5a188af44..ee5aee9bc9e51 100644 --- a/doc/src/sgml/ref/alter_user_mapping.sgml +++ b/doc/src/sgml/ref/alter_user_mapping.sgml @@ -21,7 +21,7 @@ PostgreSQL documentation -ALTER USER MAPPING FOR { user_name | USER | CURRENT_USER | SESSION_USER | PUBLIC } +ALTER USER MAPPING FOR { user_name | USER | CURRENT_ROLE | CURRENT_USER | SESSION_USER | PUBLIC } SERVER server_name OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ] ) @@ -51,7 +51,7 @@ ALTER USER MAPPING FOR { user_name user_name - User name of the mapping. CURRENT_USER + User name of the mapping. CURRENT_ROLE, CURRENT_USER, and USER match the name of the current user. PUBLIC is used to match all present and future user names in the system. diff --git a/doc/src/sgml/ref/alter_view.sgml b/doc/src/sgml/ref/alter_view.sgml index e8d9e11e0f6f2..98c312c5bf6b7 100644 --- a/doc/src/sgml/ref/alter_view.sgml +++ b/doc/src/sgml/ref/alter_view.sgml @@ -23,7 +23,7 @@ PostgreSQL documentation ALTER VIEW [ IF EXISTS ] name ALTER [ COLUMN ] column_name SET DEFAULT expression ALTER VIEW [ IF EXISTS ] name ALTER [ COLUMN ] column_name DROP DEFAULT -ALTER VIEW [ IF EXISTS ] name OWNER TO { new_owner | CURRENT_USER | SESSION_USER } +ALTER VIEW [ IF EXISTS ] name OWNER TO { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ALTER VIEW [ IF EXISTS ] name RENAME [ COLUMN ] column_name TO new_column_name ALTER VIEW [ IF EXISTS ] name RENAME TO new_name ALTER VIEW [ IF EXISTS ] name SET SCHEMA new_schema diff --git a/doc/src/sgml/ref/analyze.sgml b/doc/src/sgml/ref/analyze.sgml index 5ac3ba8321937..7d816c87c6030 100644 --- a/doc/src/sgml/ref/analyze.sgml +++ b/doc/src/sgml/ref/analyze.sgml @@ -174,7 +174,7 @@ ANALYZE [ VERBOSE ] [ table_and_columns + strategy for read-mostly databases is to run VACUUM and ANALYZE once a day during a low-usage time of day. (This will not be sufficient if there is heavy update activity.) @@ -205,7 +205,7 @@ ANALYZE [ VERBOSE ] [ table_and_columnsANALYZE is run, even if the actual table contents did not change. This might result in small changes in the planner's estimated costs shown by - . + EXPLAIN. In rare situations, this non-determinism will cause the planner's choices of query plans to change after ANALYZE is run. To avoid this, raise the amount of statistics collected by @@ -216,8 +216,8 @@ ANALYZE [ VERBOSE ] [ table_and_columns configuration variable, or on a column-by-column basis by setting the per-column statistics - target with ALTER TABLE ... ALTER COLUMN ... SET - STATISTICS (see ). + target with ALTER TABLE ... ALTER COLUMN ... SET + STATISTICS. The target value sets the maximum number of entries in the most-common-value list and the maximum number of bins in the histogram. The default target value @@ -246,8 +246,7 @@ ANALYZE [ VERBOSE ] [ table_and_columnsALTER TABLE ... ALTER COLUMN ... SET (n_distinct = ...) - (see ). + ALTER TABLE ... ALTER COLUMN ... SET (n_distinct = ...). diff --git a/doc/src/sgml/ref/begin.sgml b/doc/src/sgml/ref/begin.sgml index c23bbfb4e711c..016b021487411 100644 --- a/doc/src/sgml/ref/begin.sgml +++ b/doc/src/sgml/ref/begin.sgml @@ -37,9 +37,9 @@ BEGIN [ WORK | TRANSACTION ] [ transaction_mode BEGIN initiates a transaction block, that is, all statements after a BEGIN command will be - executed in a single transaction until an explicit or is given. + executed in a single transaction until an explicit COMMIT or ROLLBACK is given. By default (without BEGIN), PostgreSQL executes transactions in autocommit mode, that is, each @@ -60,7 +60,7 @@ BEGIN [ WORK | TRANSACTION ] [ transaction_mode If the isolation level, read/write mode, or deferrable mode is specified, the new transaction has those characteristics, as if - + SET TRANSACTION was executed. @@ -90,13 +90,13 @@ BEGIN [ WORK | TRANSACTION ] [ transaction_modeNotes - has the same functionality + START TRANSACTION has the same functionality as BEGIN. - Use or - + Use COMMIT or + ROLLBACK to terminate a transaction block. @@ -131,7 +131,7 @@ BEGIN; BEGIN is a PostgreSQL language extension. It is equivalent to the SQL-standard command - , whose reference page + START TRANSACTION, whose reference page contains additional compatibility information. diff --git a/doc/src/sgml/ref/close.sgml b/doc/src/sgml/ref/close.sgml index e464df1965d9f..32d20edd6aa42 100644 --- a/doc/src/sgml/ref/close.sgml +++ b/doc/src/sgml/ref/close.sgml @@ -84,7 +84,7 @@ CLOSE { name | ALL } PostgreSQL does not have an explicit OPEN cursor statement; a cursor is considered open when it is declared. Use the - + DECLARE statement to declare a cursor. diff --git a/doc/src/sgml/ref/cluster.sgml b/doc/src/sgml/ref/cluster.sgml index 4da60d8d56abe..b9450e7366ae3 100644 --- a/doc/src/sgml/ref/cluster.sgml +++ b/doc/src/sgml/ref/cluster.sgml @@ -57,7 +57,7 @@ CLUSTER [VERBOSE] CLUSTER table_name reclusters the table using the same index as before. You can also use the CLUSTER or SET WITHOUT CLUSTER - forms of to set the index to be used for + forms of ALTER TABLE to set the index to be used for future cluster operations, or to clear any previous setting. @@ -159,7 +159,7 @@ CLUSTER [VERBOSE] Because the planner records statistics about the ordering of - tables, it is advisable to run + tables, it is advisable to run ANALYZE on the newly clustered table. Otherwise, the planner might make poor choices of query plans. diff --git a/doc/src/sgml/ref/clusterdb.sgml b/doc/src/sgml/ref/clusterdb.sgml index 177856ca74d25..c838b22c44053 100644 --- a/doc/src/sgml/ref/clusterdb.sgml +++ b/doc/src/sgml/ref/clusterdb.sgml @@ -90,12 +90,15 @@ PostgreSQL documentation - Specifies the name of the database to be clustered. - If this is not specified and (or - ) is not used, the database name is read + Specifies the name of the database to be clustered, + when / is not used. + If this is not specified, the database name is read from the environment variable PGDATABASE. If that is not set, the user name specified for the connection is - used. + used. The dbname can be a connection string. If so, + connection string parameters will override any conflicting command + line options. @@ -246,10 +249,16 @@ PostgreSQL documentation - Specifies the name of the database to connect to discover what other - databases should be clustered. If not specified, the - postgres database will be used, - and if that does not exist, template1 will be used. + Specifies the name of the database to connect to to discover which + databases should be clustered, + when / is used. + If not specified, the postgres database will be used, + or if that does not exist, template1 will be used. + This can be a connection + string. If so, connection string parameters will override any + conflicting command line options. Also, connection string parameters + other than the database name itself will be re-used when connecting + to other databases. diff --git a/doc/src/sgml/ref/comment.sgml b/doc/src/sgml/ref/comment.sgml index fd7492a25567e..eda91b4e240f5 100644 --- a/doc/src/sgml/ref/comment.sgml +++ b/doc/src/sgml/ref/comment.sgml @@ -178,11 +178,12 @@ COMMENT ON argument: IN, OUT, INOUT, or VARIADIC. If omitted, the default is IN. - Note that COMMENT does not actually pay - any attention to OUT arguments, since only the input - arguments are needed to determine the function's identity. - So it is sufficient to list the IN, INOUT, - and VARIADIC arguments. + Note that COMMENT does not actually pay any attention + to OUT arguments for functions and aggregates (but + not procedures), since only the input arguments are needed to determine + the function's identity. So it is sufficient to list the + IN, INOUT, and + VARIADIC arguments for functions and aggregates. @@ -224,7 +225,7 @@ COMMENT ON The data type(s) of the operator's arguments (optionally schema-qualified). Write NONE for the missing argument - of a prefix or postfix operator. + of a prefix operator. diff --git a/doc/src/sgml/ref/copy.sgml b/doc/src/sgml/ref/copy.sgml index 18189abc6c90b..369342b74d5b5 100644 --- a/doc/src/sgml/ref/copy.sgml +++ b/doc/src/sgml/ref/copy.sgml @@ -112,9 +112,11 @@ COPY { table_name [ ( query - A , , - , or - command whose results are to be + A SELECT, + VALUES, + INSERT, + UPDATE, or + DELETE command whose results are to be copied. Note that parentheses are required around the query. diff --git a/doc/src/sgml/ref/create_aggregate.sgml b/doc/src/sgml/ref/create_aggregate.sgml index a315fff8bd3f6..222e0aa5c9d08 100644 --- a/doc/src/sgml/ref/create_aggregate.sgml +++ b/doc/src/sgml/ref/create_aggregate.sgml @@ -629,7 +629,7 @@ SELECT col FROM tab ORDER BY col USING sortop LIMIT 1; The meanings of PARALLEL SAFE, PARALLEL RESTRICTED, and PARALLEL UNSAFE are the same as - in . An aggregate will not be + in CREATE FUNCTION. An aggregate will not be considered for parallelization if it is marked PARALLEL UNSAFE (which is the default!) or PARALLEL RESTRICTED. Note that the parallel-safety markings of the aggregate's support diff --git a/doc/src/sgml/ref/create_cast.sgml b/doc/src/sgml/ref/create_cast.sgml index 2b4d4d557328d..bad75bc1dce5b 100644 --- a/doc/src/sgml/ref/create_cast.sgml +++ b/doc/src/sgml/ref/create_cast.sgml @@ -304,7 +304,7 @@ SELECT CAST ( 2 AS numeric ) + 4.0; Notes - Use to remove user-defined casts. + Use DROP CAST to remove user-defined casts. diff --git a/doc/src/sgml/ref/create_database.sgml b/doc/src/sgml/ref/create_database.sgml index 420576c5e83eb..41cb4068ec2fc 100644 --- a/doc/src/sgml/ref/create_database.sgml +++ b/doc/src/sgml/ref/create_database.sgml @@ -226,7 +226,7 @@ CREATE DATABASE name - Use to remove a database. + Use DROP DATABASE to remove a database. @@ -235,9 +235,9 @@ CREATE DATABASE name - Database-level configuration parameters (set via ) and database-level permissions (set via - ) are not copied from the template database. + Database-level configuration parameters (set via ALTER DATABASE) and database-level permissions (set via + GRANT) are not copied from the template database. diff --git a/doc/src/sgml/ref/create_foreign_table.sgml b/doc/src/sgml/ref/create_foreign_table.sgml index 3ee0f2d635d30..f9477efe58d72 100644 --- a/doc/src/sgml/ref/create_foreign_table.sgml +++ b/doc/src/sgml/ref/create_foreign_table.sgml @@ -159,7 +159,7 @@ CHECK ( expression ) [ NO INHERIT ] tables from which the new foreign table automatically inherits all columns. Parent tables can be plain tables or foreign tables. See the similar form of - for more details. + CREATE TABLE for more details. @@ -171,7 +171,7 @@ CHECK ( expression ) [ NO INHERIT ] This form can be used to create the foreign table as partition of the given parent table with specified partition bound values. See the similar form of - for more details. + CREATE TABLE for more details. Note that it is currently not allowed to create the foreign table as a partition of the parent table if there are UNIQUE indexes on the parent table. (See also diff --git a/doc/src/sgml/ref/create_function.sgml b/doc/src/sgml/ref/create_function.sgml index 97285b7578416..3c1eaea651cbf 100644 --- a/doc/src/sgml/ref/create_function.sgml +++ b/doc/src/sgml/ref/create_function.sgml @@ -557,7 +557,7 @@ CREATE [ OR REPLACE ] FUNCTION the SQL function. The string obj_file is the name of the shared library file containing the compiled C function, and is interpreted - as for the command. The string + as for the LOAD command. The string link_symbol is the function's link symbol, that is, the name of the function in the C language source code. If the link symbol is omitted, it is assumed to diff --git a/doc/src/sgml/ref/create_index.sgml b/doc/src/sgml/ref/create_index.sgml index 33aa64e81d58f..749db2845e8c6 100644 --- a/doc/src/sgml/ref/create_index.sgml +++ b/doc/src/sgml/ref/create_index.sgml @@ -463,11 +463,15 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] - Determines whether the buffering build technique described in + Determines whether the buffered build technique described in is used to build the index. With - OFF it is disabled, with ON it is enabled, and - with AUTO it is initially disabled, but turned on - on-the-fly once the index size reaches . The default is AUTO. + OFF buffering is disabled, with ON + it is enabled, and with AUTO it is initially disabled, + but is turned on on-the-fly once the index size reaches + . The default + is AUTO. + Note that if sorted build is possible, it will be used instead of + buffered build unless buffering=ON is specified. @@ -771,7 +775,7 @@ Indexes: least a 32MB share of the total maintenance_work_mem budget. There must also be a remaining 32MB share for the leader process. - Increasing + Increasing may allow more workers to be used, which will reduce the time needed for index creation, so long as the index build is not already I/O bound. Of course, there should also be sufficient @@ -779,8 +783,8 @@ Indexes: - Setting a value for parallel_workers via directly controls how many parallel + Setting a value for parallel_workers via ALTER TABLE directly controls how many parallel worker processes will be requested by a CREATE INDEX against the table. This bypasses the cost model completely, and prevents maintenance_work_mem @@ -808,7 +812,7 @@ Indexes: - Use + Use DROP INDEX to remove an index. diff --git a/doc/src/sgml/ref/create_language.sgml b/doc/src/sgml/ref/create_language.sgml index 10d1533d6d8c9..102efe5a6c7f7 100644 --- a/doc/src/sgml/ref/create_language.sgml +++ b/doc/src/sgml/ref/create_language.sgml @@ -137,7 +137,7 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE inline_handler is the name of a previously registered function that will be called to execute an anonymous code block - ( command) + (DO command) in this language. If no inline_handler function is specified, the language does not support anonymous code @@ -183,7 +183,7 @@ CREATE [ OR REPLACE ] [ TRUSTED ] [ PROCEDURAL ] LANGUAGE to drop procedural languages. + Use DROP LANGUAGE to drop procedural languages. diff --git a/doc/src/sgml/ref/create_materialized_view.sgml b/doc/src/sgml/ref/create_materialized_view.sgml index de9f17655c632..5ba851b687a42 100644 --- a/doc/src/sgml/ref/create_materialized_view.sgml +++ b/doc/src/sgml/ref/create_materialized_view.sgml @@ -132,8 +132,8 @@ CREATE MATERIALIZED VIEW [ IF NOT EXISTS ] table_name query - A , TABLE, - or command. This query will run within a + A SELECT, TABLE, + or VALUES command. This query will run within a security-restricted operation; in particular, calls to functions that themselves create temporary tables will fail. diff --git a/doc/src/sgml/ref/create_opclass.sgml b/doc/src/sgml/ref/create_opclass.sgml index f42fb6494c6b3..2d75a1c0b0d6a 100644 --- a/doc/src/sgml/ref/create_opclass.sgml +++ b/doc/src/sgml/ref/create_opclass.sgml @@ -161,7 +161,7 @@ CREATE OPERATOR CLASS name [ DEFAUL In an OPERATOR clause, the operand data type(s) of the operator, or NONE to - signify a left-unary or right-unary operator. The operand data + signify a prefix operator. The operand data types can be omitted in the normal case where they are the same as the operator class's data type. diff --git a/doc/src/sgml/ref/create_operator.sgml b/doc/src/sgml/ref/create_operator.sgml index 66c34e0072f0d..e27512ff39193 100644 --- a/doc/src/sgml/ref/create_operator.sgml +++ b/doc/src/sgml/ref/create_operator.sgml @@ -86,20 +86,9 @@ CREATE OPERATOR name ( - At least one of LEFTARG and RIGHTARG must be defined. For - binary operators, both must be defined. For right unary - operators, only LEFTARG should be defined, while for left - unary operators only RIGHTARG should be defined. - - - - - Right unary, also called postfix, operators are deprecated and will be - removed in PostgreSQL version 14. - - - - + For binary operators, both LEFTARG and + RIGHTARG must be defined. For prefix operators only + RIGHTARG should be defined. The function_name function must have been previously defined using CREATE FUNCTION and must be defined to accept the correct number @@ -160,7 +149,7 @@ CREATE OPERATOR name ( The data type of the operator's left operand, if any. - This option would be omitted for a left-unary operator. + This option would be omitted for a prefix operator. @@ -169,8 +158,7 @@ CREATE OPERATOR name ( right_type - The data type of the operator's right operand, if any. - This option would be omitted for a right-unary operator. + The data type of the operator's right operand. @@ -263,8 +251,8 @@ COMMUTATOR = OPERATOR(myschema.===) , - Use to delete user-defined operators - from a database. Use to modify operators in a + Use DROP OPERATOR to delete user-defined operators + from a database. Use ALTER OPERATOR to modify operators in a database. diff --git a/doc/src/sgml/ref/create_policy.sgml b/doc/src/sgml/ref/create_policy.sgml index 2e1229c4f94ca..b4f90561018c4 100644 --- a/doc/src/sgml/ref/create_policy.sgml +++ b/doc/src/sgml/ref/create_policy.sgml @@ -24,7 +24,7 @@ PostgreSQL documentation CREATE POLICY name ON table_name [ AS { PERMISSIVE | RESTRICTIVE } ] [ FOR { ALL | SELECT | INSERT | UPDATE | DELETE } ] - [ TO { role_name | PUBLIC | CURRENT_USER | SESSION_USER } [, ...] ] + [ TO { role_name | PUBLIC | CURRENT_ROLE | CURRENT_USER | SESSION_USER } [, ...] ] [ USING ( using_expression ) ] [ WITH CHECK ( check_expression ) ] diff --git a/doc/src/sgml/ref/create_procedure.sgml b/doc/src/sgml/ref/create_procedure.sgml index d225695626cbf..e258eca5ceeae 100644 --- a/doc/src/sgml/ref/create_procedure.sgml +++ b/doc/src/sgml/ref/create_procedure.sgml @@ -97,11 +97,9 @@ CREATE [ OR REPLACE ] PROCEDURE - The mode of an argument: IN, + The mode of an argument: IN, OUT, INOUT, or VARIADIC. If omitted, - the default is IN. (OUT - arguments are currently not supported for procedures. Use - INOUT instead.) + the default is IN. @@ -285,7 +283,7 @@ CREATE [ OR REPLACE ] PROCEDURE the SQL procedure. The string obj_file is the name of the shared library file containing the compiled C procedure, and is interpreted - as for the command. The string + as for the LOAD command. The string link_symbol is the procedure's link symbol, that is, the name of the procedure in the C language source code. If the link symbol is omitted, it is assumed diff --git a/doc/src/sgml/ref/create_role.sgml b/doc/src/sgml/ref/create_role.sgml index 6e4148a17c486..d23133945db20 100644 --- a/doc/src/sgml/ref/create_role.sgml +++ b/doc/src/sgml/ref/create_role.sgml @@ -162,7 +162,7 @@ in sync when changing the above synopsis! If not specified, NOLOGIN is the default, except when CREATE ROLE is invoked through its alternative spelling - . + CREATE USER. @@ -335,8 +335,8 @@ in sync when changing the above synopsis! Notes - Use to - change the attributes of a role, and + Use ALTER ROLE to + change the attributes of a role, and DROP ROLE to remove a role. All the attributes specified by CREATE ROLE can be modified by later ALTER ROLE commands. @@ -345,8 +345,8 @@ in sync when changing the above synopsis! The preferred way to add and remove members of roles that are being used as groups is to use - and - . + GRANT and + REVOKE. @@ -364,7 +364,7 @@ in sync when changing the above synopsis! a member of a role with CREATEDB privilege does not immediately grant the ability to create databases, even if INHERIT is set; it would be necessary to become that role via - before + SET ROLE before creating a database. diff --git a/doc/src/sgml/ref/create_schema.sgml b/doc/src/sgml/ref/create_schema.sgml index ffbe1ba3bcc22..3c2dddb1631ee 100644 --- a/doc/src/sgml/ref/create_schema.sgml +++ b/doc/src/sgml/ref/create_schema.sgml @@ -29,6 +29,7 @@ CREATE SCHEMA IF NOT EXISTS AUTHORIZATION role_sp where role_specification can be: user_name + | CURRENT_ROLE | CURRENT_USER | SESSION_USER diff --git a/doc/src/sgml/ref/create_subscription.sgml b/doc/src/sgml/ref/create_subscription.sgml index b7d7457d004e9..e812beee3738f 100644 --- a/doc/src/sgml/ref/create_subscription.sgml +++ b/doc/src/sgml/ref/create_subscription.sgml @@ -160,7 +160,7 @@ CREATE SUBSCRIPTION subscription_name It is safe to use off for logical replication: If the subscriber loses transactions because of missing - synchronization, the data will be resent from the publisher. + synchronization, the data will be sent again from the publisher. diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index 087cad184c0cd..fd6777ae0192c 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -1152,7 +1152,7 @@ WITH ( MODULUS numeric_literal, REM constraint that is not deferrable will be checked immediately after every command. Checking of constraints that are deferrable can be postponed until the end of the transaction - (using the command). + (using the SET CONSTRAINTS command). NOT DEFERRABLE is the default. Currently, only UNIQUE, PRIMARY KEY, EXCLUDE, and @@ -1176,7 +1176,7 @@ WITH ( MODULUS numeric_literal, REM statement. This is the default. If the constraint is INITIALLY DEFERRED, it is checked only at the end of the transaction. The constraint check time can be - altered with the command. + altered with the SET CONSTRAINTS command. @@ -1244,8 +1244,8 @@ WITH ( MODULUS numeric_literal, REM All rows in the temporary table will be deleted at the end - of each transaction block. Essentially, an automatic is done + of each transaction block. Essentially, an automatic TRUNCATE is done at each commit. When used on a partitioned table, this is not cascaded to its partitions. @@ -1359,10 +1359,11 @@ WITH ( MODULUS numeric_literal, REM The toast_tuple_target specifies the minimum tuple length required before - we try to move long column values into TOAST tables, and is also the - target length we try to reduce the length below once toasting begins. - This only affects columns marked as either External or Extended - and applies only to new tuples; there is no effect on existing rows. + we try to compress and/or move long column values into TOAST tables, and + is also the target length we try to reduce the length below once toasting + begins. This affects columns marked as External (for move), + Main (for compression), or Extended (for both) and applies only to new + tuples. There is no effect on existing rows. By default this parameter is set to allow at least 4 tuples per block, which with the default block size will be 2040 bytes. Valid values are between 128 bytes and the (block size - header), by default 8160 bytes. @@ -1430,7 +1431,7 @@ WITH ( MODULUS numeric_literal, REM Disabling index cleanup can speed up VACUUM very significantly, but may also lead to severely bloated indexes if table modifications are frequent. The INDEX_CLEANUP - parameter of , if specified, overrides + parameter of VACUUM, if specified, overrides the value of this option. @@ -1451,7 +1452,7 @@ WITH ( MODULUS numeric_literal, REM the truncated pages is returned to the operating system. Note that the truncation requires ACCESS EXCLUSIVE lock on the table. The TRUNCATE parameter - of , if specified, overrides the value + of VACUUM, if specified, overrides the value of this option. diff --git a/doc/src/sgml/ref/create_table_as.sgml b/doc/src/sgml/ref/create_table_as.sgml index a4640929cfba6..bcbd73b2272ca 100644 --- a/doc/src/sgml/ref/create_table_as.sgml +++ b/doc/src/sgml/ref/create_table_as.sgml @@ -185,8 +185,8 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI All rows in the temporary table will be deleted at the end - of each transaction block. Essentially, an automatic is done + of each transaction block. Essentially, an automatic TRUNCATE is done at each commit. @@ -222,9 +222,9 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI query - A , TABLE, or - command, or an command that runs a + A SELECT, TABLE, or VALUES + command, or an EXECUTE command that runs a prepared SELECT, TABLE, or VALUES query. diff --git a/doc/src/sgml/ref/create_tablespace.sgml b/doc/src/sgml/ref/create_tablespace.sgml index 462b8831c2746..84fa7ee5e29e2 100644 --- a/doc/src/sgml/ref/create_tablespace.sgml +++ b/doc/src/sgml/ref/create_tablespace.sgml @@ -22,7 +22,7 @@ PostgreSQL documentation CREATE TABLESPACE tablespace_name - [ OWNER { new_owner | CURRENT_USER | SESSION_USER } ] + [ OWNER { new_owner | CURRENT_ROLE | CURRENT_USER | SESSION_USER } ] LOCATION 'directory' [ WITH ( tablespace_option = value [, ... ] ) ] diff --git a/doc/src/sgml/ref/create_transform.sgml b/doc/src/sgml/ref/create_transform.sgml index 5b46c23196db2..3f81dc6bba2cf 100644 --- a/doc/src/sgml/ref/create_transform.sgml +++ b/doc/src/sgml/ref/create_transform.sgml @@ -147,7 +147,7 @@ CREATE [ OR REPLACE ] TRANSFORM FOR type_name LANGUAG Notes - Use to remove transforms. + Use DROP TRANSFORM to remove transforms. diff --git a/doc/src/sgml/ref/create_trigger.sgml b/doc/src/sgml/ref/create_trigger.sgml index 289dd1d9da8ef..60346e1e83b02 100644 --- a/doc/src/sgml/ref/create_trigger.sgml +++ b/doc/src/sgml/ref/create_trigger.sgml @@ -170,7 +170,7 @@ CREATE [ CONSTRAINT ] TRIGGER name When the CONSTRAINT option is specified, this command creates a constraint trigger. This is the same as a regular trigger except that the timing of the trigger firing can be adjusted using - . + SET CONSTRAINTS. Constraint triggers must be AFTER ROW triggers on plain tables (not foreign tables). They can be fired either at the end of the statement causing the triggering @@ -442,7 +442,7 @@ UPDATE OF column_name1 [, column_name2 - Use to remove a trigger. + Use DROP TRIGGER to remove a trigger. diff --git a/doc/src/sgml/ref/create_type.sgml b/doc/src/sgml/ref/create_type.sgml index 111f8e65d29d7..970b517db9f21 100644 --- a/doc/src/sgml/ref/create_type.sgml +++ b/doc/src/sgml/ref/create_type.sgml @@ -121,8 +121,8 @@ CREATE TYPE name must be less than NAMEDATALEN bytes long (64 bytes in a standard PostgreSQL build). (It is possible to create an enumerated type with zero labels, but such a type cannot be used - to hold values before at least one label is added using .) + to hold values before at least one label is added using ALTER TYPE.) diff --git a/doc/src/sgml/ref/create_user.sgml b/doc/src/sgml/ref/create_user.sgml index 198e06e7230e8..48d2089238c7f 100644 --- a/doc/src/sgml/ref/create_user.sgml +++ b/doc/src/sgml/ref/create_user.sgml @@ -49,7 +49,7 @@ CREATE USER name [ [ WITH ] CREATE USER is now an alias for - . + CREATE ROLE. The only difference is that when the command is spelled CREATE USER, LOGIN is assumed by default, whereas NOLOGIN is assumed when diff --git a/doc/src/sgml/ref/create_user_mapping.sgml b/doc/src/sgml/ref/create_user_mapping.sgml index 9719a4ff2c0d5..55debd54012df 100644 --- a/doc/src/sgml/ref/create_user_mapping.sgml +++ b/doc/src/sgml/ref/create_user_mapping.sgml @@ -21,7 +21,7 @@ PostgreSQL documentation -CREATE USER MAPPING [ IF NOT EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } +CREATE USER MAPPING [ IF NOT EXISTS ] FOR { user_name | USER | CURRENT_ROLE | CURRENT_USER | PUBLIC } SERVER server_name [ OPTIONS ( option 'value' [ , ... ] ) ] @@ -67,7 +67,7 @@ CREATE USER MAPPING [ IF NOT EXISTS ] FOR { user_ The name of an existing user that is mapped to foreign server. - CURRENT_USER and USER match the name of + CURRENT_ROLE, CURRENT_USER, and USER match the name of the current user. When PUBLIC is specified, a so-called public mapping is created that is used when no user-specific mapping is applicable. diff --git a/doc/src/sgml/ref/create_view.sgml b/doc/src/sgml/ref/create_view.sgml index eb5591b63c73f..4b5b1cf795318 100644 --- a/doc/src/sgml/ref/create_view.sgml +++ b/doc/src/sgml/ref/create_view.sgml @@ -137,8 +137,8 @@ CREATE VIEW [ schema . ] view_namelocal or cascaded, and is equivalent to specifying WITH [ CASCADED | LOCAL ] CHECK OPTION (see below). - This option can be changed on existing views using . + This option can be changed on existing views using ALTER VIEW. @@ -160,8 +160,8 @@ CREATE VIEW [ schema . ] view_namequery - A or - command + A SELECT or + VALUES command which will provide the columns and rows of the view. @@ -245,7 +245,7 @@ CREATE VIEW [ schema . ] view_nameNotes - Use the + Use the DROP VIEW statement to drop views. diff --git a/doc/src/sgml/ref/createdb.sgml b/doc/src/sgml/ref/createdb.sgml index d3c92943f0714..86473455c9d04 100644 --- a/doc/src/sgml/ref/createdb.sgml +++ b/doc/src/sgml/ref/createdb.sgml @@ -46,7 +46,7 @@ PostgreSQL documentation createdb is a wrapper around the - SQL command . + SQL command CREATE DATABASE. There is no effective difference between creating databases via this utility and via other methods for accessing the server. @@ -197,7 +197,7 @@ PostgreSQL documentation The options , , , , and correspond to options of the underlying - SQL command ; see there for more information + SQL command CREATE DATABASE; see there for more information about them. @@ -284,6 +284,9 @@ PostgreSQL documentation database will be used; if that does not exist (or if it is the name of the new database being created), template1 will be used. + This can be a connection + string. If so, connection string parameters will override any + conflicting command line options. diff --git a/doc/src/sgml/ref/createuser.sgml b/doc/src/sgml/ref/createuser.sgml index 9d24df8b7a887..4d60dc2cda123 100644 --- a/doc/src/sgml/ref/createuser.sgml +++ b/doc/src/sgml/ref/createuser.sgml @@ -49,7 +49,7 @@ PostgreSQL documentation createuser is a wrapper around the - SQL command . + SQL command CREATE ROLE. There is no effective difference between creating users via this utility and via other methods for accessing the server. diff --git a/doc/src/sgml/ref/declare.sgml b/doc/src/sgml/ref/declare.sgml index d6177dcd9c44e..2152134635e46 100644 --- a/doc/src/sgml/ref/declare.sgml +++ b/doc/src/sgml/ref/declare.sgml @@ -39,7 +39,7 @@ DECLARE name [ BINARY ] [ INSENSITI can be used to retrieve a small number of rows at a time out of a larger query. After the cursor is created, rows are fetched from it using - . + FETCH. @@ -124,8 +124,8 @@ DECLARE name [ BINARY ] [ INSENSITI query - A or - command + A SELECT or + VALUES command which will provide the rows to be returned by the cursor. @@ -183,9 +183,9 @@ DECLARE name [ BINARY ] [ INSENSITI PostgreSQL reports an error if such a command is used outside a transaction block. Use - and - - (or ) + BEGIN and + COMMIT + (or ROLLBACK) to define a transaction block. @@ -244,7 +244,7 @@ DECLARE name [ BINARY ] [ INSENSITI If the cursor's query includes FOR UPDATE or FOR SHARE, then returned rows are locked at the time they are first fetched, in the same way as for a regular - command with + SELECT command with these options. In addition, the returned rows will be the most up-to-date versions; therefore these options provide the equivalent of what the SQL standard diff --git a/doc/src/sgml/ref/delete.sgml b/doc/src/sgml/ref/delete.sgml index ec3c40df2ea9d..1b81b4e7d743f 100644 --- a/doc/src/sgml/ref/delete.sgml +++ b/doc/src/sgml/ref/delete.sgml @@ -41,7 +41,7 @@ DELETE FROM [ ONLY ] table_name [ * - provides a + TRUNCATE provides a faster mechanism to remove all rows from a table. diff --git a/doc/src/sgml/ref/drop_group.sgml b/doc/src/sgml/ref/drop_group.sgml index 47d4a72121b6f..eb7dc182c82b3 100644 --- a/doc/src/sgml/ref/drop_group.sgml +++ b/doc/src/sgml/ref/drop_group.sgml @@ -30,7 +30,7 @@ DROP GROUP [ IF EXISTS ] name [, .. DROP GROUP is now an alias for - . + DROP ROLE. diff --git a/doc/src/sgml/ref/drop_language.sgml b/doc/src/sgml/ref/drop_language.sgml index 4705836ac79e8..8ba6621bc4af2 100644 --- a/doc/src/sgml/ref/drop_language.sgml +++ b/doc/src/sgml/ref/drop_language.sgml @@ -38,7 +38,7 @@ DROP [ PROCEDURAL ] LANGUAGE [ IF EXISTS ] name As of PostgreSQL 9.1, most procedural languages have been made into extensions, and should - therefore be removed with + therefore be removed with DROP EXTENSION not DROP LANGUAGE. diff --git a/doc/src/sgml/ref/drop_operator.sgml b/doc/src/sgml/ref/drop_operator.sgml index 2dff050ecf222..7bcdd082ae70d 100644 --- a/doc/src/sgml/ref/drop_operator.sgml +++ b/doc/src/sgml/ref/drop_operator.sgml @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP OPERATOR [ IF EXISTS ] name ( { left_type | NONE } , { right_type | NONE } ) [, ...] [ CASCADE | RESTRICT ] +DROP OPERATOR [ IF EXISTS ] name ( { left_type | NONE } , right_type ) [, ...] [ CASCADE | RESTRICT ] @@ -73,8 +73,7 @@ DROP OPERATOR [ IF EXISTS ] name ( right_type - The data type of the operator's right operand; write - NONE if the operator has no right operand. + The data type of the operator's right operand. @@ -113,24 +112,17 @@ DROP OPERATOR ^ (integer, integer); - Remove the left unary bitwise complement operator + Remove the bitwise-complement prefix operator ~b for type bit: DROP OPERATOR ~ (none, bit); - - Remove the right unary factorial operator x! - for type bigint: - -DROP OPERATOR ! (bigint, none); - - Remove multiple operators in one command: -DROP OPERATOR ~ (none, bit), ! (bigint, none); +DROP OPERATOR ~ (none, bit), ^ (integer, integer); diff --git a/doc/src/sgml/ref/drop_owned.sgml b/doc/src/sgml/ref/drop_owned.sgml index 09107bef64749..8fa8c414a10e4 100644 --- a/doc/src/sgml/ref/drop_owned.sgml +++ b/doc/src/sgml/ref/drop_owned.sgml @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP OWNED BY { name | CURRENT_USER | SESSION_USER } [, ...] [ CASCADE | RESTRICT ] +DROP OWNED BY { name | CURRENT_ROLE | CURRENT_USER | SESSION_USER } [, ...] [ CASCADE | RESTRICT ] @@ -90,7 +90,7 @@ DROP OWNED BY { name | CURRENT_USER - The command is an alternative that + The REASSIGN OWNED command is an alternative that reassigns the ownership of all the database objects owned by one or more roles. However, REASSIGN OWNED does not deal with privileges for other objects. diff --git a/doc/src/sgml/ref/drop_procedure.sgml b/doc/src/sgml/ref/drop_procedure.sgml index 6da266ae2dae9..bf2c6ce1aaa1a 100644 --- a/doc/src/sgml/ref/drop_procedure.sgml +++ b/doc/src/sgml/ref/drop_procedure.sgml @@ -67,8 +67,9 @@ DROP PROCEDURE [ IF EXISTS ] name [ - The mode of an argument: IN or VARIADIC. - If omitted, the default is IN. + The mode of an argument: IN, OUT, + INOUT, or VARIADIC. If omitted, + the default is IN. diff --git a/doc/src/sgml/ref/drop_role.sgml b/doc/src/sgml/ref/drop_role.sgml index 13079f3e1f4af..13dc1cc64998a 100644 --- a/doc/src/sgml/ref/drop_role.sgml +++ b/doc/src/sgml/ref/drop_role.sgml @@ -40,7 +40,9 @@ DROP ROLE [ IF EXISTS ] name [, ... of the cluster; an error will be raised if so. Before dropping the role, you must drop all the objects it owns (or reassign their ownership) and revoke any privileges the role has been granted on other objects. - The and + The REASSIGN + OWNED and DROP + OWNED commands can be useful for this purpose; see for more discussion. diff --git a/doc/src/sgml/ref/drop_table.sgml b/doc/src/sgml/ref/drop_table.sgml index bf8996d19858b..450458fd2a42d 100644 --- a/doc/src/sgml/ref/drop_table.sgml +++ b/doc/src/sgml/ref/drop_table.sgml @@ -32,8 +32,8 @@ DROP TABLE [ IF EXISTS ] name [, .. DROP TABLE removes tables from the database. Only the table owner, the schema owner, and superuser can drop a table. To empty a table of rows - without destroying the table, use - or . + without destroying the table, use DELETE + or TRUNCATE. diff --git a/doc/src/sgml/ref/drop_user.sgml b/doc/src/sgml/ref/drop_user.sgml index 37ab856125d10..74e736b0ebd8e 100644 --- a/doc/src/sgml/ref/drop_user.sgml +++ b/doc/src/sgml/ref/drop_user.sgml @@ -30,7 +30,7 @@ DROP USER [ IF EXISTS ] name [, ... DROP USER is simply an alternate spelling of - . + DROP ROLE. diff --git a/doc/src/sgml/ref/drop_user_mapping.sgml b/doc/src/sgml/ref/drop_user_mapping.sgml index 7cb09f1166ddb..9e8896a307f78 100644 --- a/doc/src/sgml/ref/drop_user_mapping.sgml +++ b/doc/src/sgml/ref/drop_user_mapping.sgml @@ -21,7 +21,7 @@ PostgreSQL documentation -DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_USER | PUBLIC } SERVER server_name +DROP USER MAPPING [ IF EXISTS ] FOR { user_name | USER | CURRENT_ROLE | CURRENT_USER | PUBLIC } SERVER server_name @@ -59,7 +59,7 @@ DROP USER MAPPING [ IF EXISTS ] FOR { user_nameuser_name - User name of the mapping. CURRENT_USER + User name of the mapping. CURRENT_ROLE, CURRENT_USER, and USER match the name of the current user. PUBLIC is used to match all present and future user names in the system. diff --git a/doc/src/sgml/ref/dropdb.sgml b/doc/src/sgml/ref/dropdb.sgml index ded85b0e232df..d36aed38c527d 100644 --- a/doc/src/sgml/ref/dropdb.sgml +++ b/doc/src/sgml/ref/dropdb.sgml @@ -41,7 +41,7 @@ PostgreSQL documentation dropdb is a wrapper around the - SQL command . + SQL command DROP DATABASE. There is no effective difference between dropping databases via this utility and via other methods for accessing the server. @@ -217,6 +217,9 @@ PostgreSQL documentation target database. If not specified, the postgres database will be used; if that does not exist (or is the database being dropped), template1 will be used. + This can be a connection + string. If so, connection string parameters will override any + conflicting command line options. diff --git a/doc/src/sgml/ref/dropuser.sgml b/doc/src/sgml/ref/dropuser.sgml index f9aab340d3bad..81580507e8268 100644 --- a/doc/src/sgml/ref/dropuser.sgml +++ b/doc/src/sgml/ref/dropuser.sgml @@ -42,7 +42,7 @@ PostgreSQL documentation dropuser is a wrapper around the - SQL command . + SQL command DROP ROLE. There is no effective difference between dropping users via this utility and via other methods for accessing the server. diff --git a/doc/src/sgml/ref/end.sgml b/doc/src/sgml/ref/end.sgml index 8b8f4f0dbb9fc..498652919ad83 100644 --- a/doc/src/sgml/ref/end.sgml +++ b/doc/src/sgml/ref/end.sgml @@ -33,7 +33,7 @@ END [ WORK | TRANSACTION ] [ AND [ NO ] CHAIN ] made by the transaction become visible to others and are guaranteed to be durable if a crash occurs. This command is a PostgreSQL extension - that is equivalent to . + that is equivalent to COMMIT. @@ -69,7 +69,7 @@ END [ WORK | TRANSACTION ] [ AND [ NO ] CHAIN ] Notes - Use to + Use ROLLBACK to abort a transaction. @@ -94,8 +94,8 @@ END; END is a PostgreSQL - extension that provides functionality equivalent to , which is + extension that provides functionality equivalent to COMMIT, which is specified in the SQL standard. diff --git a/doc/src/sgml/ref/explain.sgml b/doc/src/sgml/ref/explain.sgml index 906b2ccd50a2f..b0ccdd26e7306 100644 --- a/doc/src/sgml/ref/explain.sgml +++ b/doc/src/sgml/ref/explain.sgml @@ -302,7 +302,7 @@ ROLLBACK; the autovacuum daemon will take care of that automatically. But if a table has recently had substantial changes in its contents, you might need to do a manual - rather than wait for autovacuum to catch up + ANALYZE rather than wait for autovacuum to catch up with the changes. diff --git a/doc/src/sgml/ref/fetch.sgml b/doc/src/sgml/ref/fetch.sgml index e802be61c8c6d..ec843f5684429 100644 --- a/doc/src/sgml/ref/fetch.sgml +++ b/doc/src/sgml/ref/fetch.sgml @@ -335,9 +335,9 @@ FETCH count - + DECLARE is used to define a cursor. Use - + MOVE to change cursor position without retrieving data. diff --git a/doc/src/sgml/ref/grant.sgml b/doc/src/sgml/ref/grant.sgml index bc573f7826b27..c3db393bdea70 100644 --- a/doc/src/sgml/ref/grant.sgml +++ b/doc/src/sgml/ref/grant.sgml @@ -87,6 +87,7 @@ GRANT role_name [, ...] TO role_name | PUBLIC + | CURRENT_ROLE | CURRENT_USER | SESSION_USER @@ -258,7 +259,7 @@ GRANT role_name [, ...] TO Notes - The command is used + The REVOKE command is used to revoke access privileges. diff --git a/doc/src/sgml/ref/lock.sgml b/doc/src/sgml/ref/lock.sgml index 0c4688603d9f9..4cdfae2279e3e 100644 --- a/doc/src/sgml/ref/lock.sgml +++ b/doc/src/sgml/ref/lock.sgml @@ -186,9 +186,9 @@ LOCK [ TABLE ] [ ONLY ] name [ * ] PostgreSQL reports an error if LOCK is used outside a transaction block. Use - and - - (or ) + BEGIN and + COMMIT + (or ROLLBACK) to define a transaction block. diff --git a/doc/src/sgml/ref/pg_basebackup.sgml b/doc/src/sgml/ref/pg_basebackup.sgml index aa0b27c9f300f..e993e8761c13f 100644 --- a/doc/src/sgml/ref/pg_basebackup.sgml +++ b/doc/src/sgml/ref/pg_basebackup.sgml @@ -368,7 +368,7 @@ PostgreSQL documentation The following command-line options control the generation of the - backup and the running of the program: + backup and the invocation of the program: @@ -540,7 +540,7 @@ PostgreSQL documentation of each file for users who wish to verify that the backup has not been tampered with, while the CRC32C algorithm provides a checksum that is much faster to calculate; it is good at catching errors due to accidental - changes but is not resistant to targeted modifications. Note that, to + changes but is not resistant to malicious modifications. Note that, to be useful against an adversary who has access to the backup, the backup manifest would need to be stored securely elsewhere or otherwise verified not to have been modified since the backup was taken. @@ -653,8 +653,9 @@ PostgreSQL documentation - Specifies parameters used to connect to the server, as a connection - string. See for more information. + Specifies parameters used to connect to the server, as a connction string; these + will override any conflicting command line options. The option is called --dbname for consistency with other diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index 0b2e2de87b6d8..0aa35cf0c3b4c 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -322,7 +322,7 @@ PostgreSQL documentation Run the dump in parallel by dumping njobs - tables simultaneously. This option reduces the time of the dump but it also + tables simultaneously. This option may reduce the time needed to perform the dump but it also increases the load on the database server. You can only use this option with the directory output format because this is the only output format where multiple processes can write their data at the same time. @@ -517,9 +517,7 @@ PostgreSQL documentation Dump only tables with names matching - pattern. - For this purpose, table includes views, materialized views, - sequences, and foreign tables. Multiple tables + pattern. Multiple tables can be selected by writing multiple switches. The pattern parameter is interpreted as a pattern according to the same rules used by @@ -531,6 +529,14 @@ PostgreSQL documentation below. + + As well as tables, this option can be used to dump the definition of matching + views, materialized views, foreign tables, and sequences. It will not dump the + contents of views or materialized views, and the contents of foreign tables will + only be dumped if the corresponding foreign server is specified with + . + + The and switches have no effect when is used, because tables selected by will @@ -548,18 +554,6 @@ PostgreSQL documentation - - - The behavior of the switch is not entirely upward - compatible with pre-8.2 PostgreSQL - versions. Formerly, writing -t tab would dump all - tables named tab, but now it just dumps whichever one - is visible in your default search path. To get the old behavior - you can write -t '*.tab'. Also, you must write something - like -t sch.tab to select a table in a particular schema, - rather than the old locution of -n sch -t tab. - - @@ -594,6 +588,8 @@ PostgreSQL documentation pg_dump to output detailed object comments and start/stop times to the dump file, and progress messages to standard error. + Repeating the option causes additional debug-level messages + to appear on standard error. @@ -1130,14 +1126,10 @@ PostgreSQL documentation Specifies the name of the database to connect to. This is equivalent to specifying dbname as the first non-option - argument on the command line. - - - If this parameter contains an = sign or starts - with a valid URI prefix - (postgresql:// - or postgres://), it is treated as a - conninfo string. See for more information. + argument on the command line. The dbname + can be a connection string. + If so, connection string parameters will override any conflicting + command line options. diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index 43abc530a0f61..4360b2cf5773a 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -202,7 +202,9 @@ PostgreSQL documentation Specifies verbose mode. This will cause pg_dumpall to output start/stop times to the dump file, and progress messages to standard error. - It will also enable verbose output in pg_dump. + Repeating the option causes additional debug-level messages + to appear on standard error. + The option is also passed down to pg_dump. @@ -550,8 +552,9 @@ PostgreSQL documentation - Specifies parameters used to connect to the server, as a connection - string. See for more information. + Specifies parameters used to connect to the server, as a connction string; these + will override any conflicting command line options. The option is called --dbname for consistency with other diff --git a/doc/src/sgml/ref/pg_isready.sgml b/doc/src/sgml/ref/pg_isready.sgml index 3d5b551b87f26..ba25ca65a40e0 100644 --- a/doc/src/sgml/ref/pg_isready.sgml +++ b/doc/src/sgml/ref/pg_isready.sgml @@ -47,15 +47,11 @@ PostgreSQL documentation - Specifies the name of the database to connect to. - - - If this parameter contains an = sign or starts - with a valid URI prefix - (postgresql:// - or postgres://), it is treated as a - conninfo string. See for more information. + Specifies the name of the database to connect to. The + dbname can be a connection string. If so, + connection string parameters will override any conflicting command + line options. diff --git a/doc/src/sgml/ref/pg_receivewal.sgml b/doc/src/sgml/ref/pg_receivewal.sgml index 865ec84262190..26a66c0e19b89 100644 --- a/doc/src/sgml/ref/pg_receivewal.sgml +++ b/doc/src/sgml/ref/pg_receivewal.sgml @@ -252,8 +252,9 @@ PostgreSQL documentation - Specifies parameters used to connect to the server, as a connection - string. See for more information. + Specifies parameters used to connect to the server, as a connction string; these + will override any conflicting command line options. The option is called --dbname for consistency with other diff --git a/doc/src/sgml/ref/pg_recvlogical.sgml b/doc/src/sgml/ref/pg_recvlogical.sgml index 41508fdc1e56c..6b1d98d06ef1f 100644 --- a/doc/src/sgml/ref/pg_recvlogical.sgml +++ b/doc/src/sgml/ref/pg_recvlogical.sgml @@ -273,14 +273,16 @@ PostgreSQL documentation - - + + - The database to connect to. See the description of the actions for - what this means in detail. This can be a libpq connection string; - see for more information. Defaults - to user name. + The database to connect to. See the description + of the actions for what this means in detail. + The dbname can be a connection string. If so, + connection string parameters will override any conflicting + command line options. Defaults to the user name. diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index 27eab2f02a522..93ea937ac8ea7 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -156,7 +156,10 @@ PostgreSQL documentation Connect to database dbname and restore directly - into the database. + into the database. The dbname can + be a connection string. + If so, connection string parameters will override any conflicting + command line options. @@ -483,7 +486,12 @@ PostgreSQL documentation - Specifies verbose mode. + Specifies verbose mode. This will cause + pg_restore to output detailed object + comments and start/stop times to the output file, and progress + messages to standard error. + Repeating the option causes additional debug-level messages + to appear on standard error. diff --git a/doc/src/sgml/ref/pg_rewind.sgml b/doc/src/sgml/ref/pg_rewind.sgml index ae23badc08c45..688acdcb06af8 100644 --- a/doc/src/sgml/ref/pg_rewind.sgml +++ b/doc/src/sgml/ref/pg_rewind.sgml @@ -211,7 +211,7 @@ PostgreSQL documentation pg_rewind to return without waiting, which is faster, but means that a subsequent operating system crash can leave the synchronized data directory corrupt. Generally, this option is - useful for testing but should not be used when creating a production + useful for testing but should not be used on a production installation. @@ -322,7 +322,7 @@ GRANT EXECUTE ON function pg_catalog.pg_read_binary_file(text, bigint, bigint, b When executing pg_rewind using an online cluster as source which has been recently promoted, it is necessary - to execute a CHECKPOINT after promotion so as its + to execute a CHECKPOINT after promotion such that its control file reflects up-to-date timeline information, which is used by pg_rewind to check if the target cluster can be rewound using the designated source cluster. diff --git a/doc/src/sgml/ref/pg_verifybackup.sgml b/doc/src/sgml/ref/pg_verifybackup.sgml index c160992e6d7d6..a0989d3cd165f 100644 --- a/doc/src/sgml/ref/pg_verifybackup.sgml +++ b/doc/src/sgml/ref/pg_verifybackup.sgml @@ -82,8 +82,8 @@ PostgreSQL documentation for any files for which the computed checksum does not match the checksum stored in the manifest. This step is not performed for any files which produced errors in the previous step, since they are already known - to have problems. Also, files which were ignored in the previous step are - also ignored in this step. + to have problems. Files which were ignored in the previous step are also + ignored in this step. @@ -121,7 +121,8 @@ PostgreSQL documentation Options - The following command-line options control the behavior. + pg_verifybackup accepts the following + command-line arguments: diff --git a/doc/src/sgml/ref/pgbench.sgml b/doc/src/sgml/ref/pgbench.sgml index 75575b6f06043..7180fedd65850 100644 --- a/doc/src/sgml/ref/pgbench.sgml +++ b/doc/src/sgml/ref/pgbench.sgml @@ -812,8 +812,8 @@ pgbench options d Common Options - pgbench accepts the following command-line - common arguments: + pgbench also accepts the following common command-line + arguments for connection parameters: diff --git a/doc/src/sgml/ref/postgres-ref.sgml b/doc/src/sgml/ref/postgres-ref.sgml index 806949df42b75..fda678e345c84 100644 --- a/doc/src/sgml/ref/postgres-ref.sgml +++ b/doc/src/sgml/ref/postgres-ref.sgml @@ -143,8 +143,8 @@ PostgreSQL documentation This option is meant for other programs that interact with a server instance, such as , to query configuration - parameter values. User-facing applications should instead use or the pg_settings view. + parameter values. User-facing applications should instead use SHOW or the pg_settings view. diff --git a/doc/src/sgml/ref/prepare.sgml b/doc/src/sgml/ref/prepare.sgml index 1e484f6d2021f..57a34ff83c792 100644 --- a/doc/src/sgml/ref/prepare.sgml +++ b/doc/src/sgml/ref/prepare.sgml @@ -66,7 +66,7 @@ PREPARE name [ ( command. + manually cleaned up using the DEALLOCATE command. @@ -163,7 +163,7 @@ PREPARE name [ ( To examine the query plan PostgreSQL is using - for a prepared statement, use , for example + for a prepared statement, use EXPLAIN, for example EXPLAIN EXECUTE name(parameter_values); diff --git a/doc/src/sgml/ref/prepare_transaction.sgml b/doc/src/sgml/ref/prepare_transaction.sgml index 18051983e160b..f4f6118ac3165 100644 --- a/doc/src/sgml/ref/prepare_transaction.sgml +++ b/doc/src/sgml/ref/prepare_transaction.sgml @@ -39,8 +39,8 @@ PREPARE TRANSACTION transaction_id Once prepared, a transaction can later be committed or rolled back - with - or , + with COMMIT PREPARED + or ROLLBACK PREPARED, respectively. Those commands can be issued from any session, not only the one that executed the original transaction. @@ -92,8 +92,8 @@ PREPARE TRANSACTION transaction_id - This command must be used inside a transaction block. Use to start one. + This command must be used inside a transaction block. Use BEGIN to start one. diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index ef18fe27e03c9..221a967bfe664 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -168,15 +168,10 @@ EOF Specifies the name of the database to connect to. This is equivalent to specifying dbname as the first non-option - argument on the command line. - - - If this parameter contains an = sign or starts - with a valid URI prefix - (postgresql:// - or postgres://), it is treated as a - conninfo string. See for more information. + argument on the command line. The dbname + can be a connection string. + If so, connection string parameters will override any conflicting + command line options. @@ -498,7 +493,7 @@ EOF Never issue a password prompt. If the server requires password - authentication and a password is not available by other means + authentication and a password is not available from other sources such as a .pgpass file, the connection attempt will fail. This option can be useful in batch jobs and scripts where no user is present to enter a password. @@ -518,13 +513,15 @@ EOF Force psql to prompt for a - password before connecting to a database. + password before connecting to a database, even if the password will + not be used. - This option is never essential, since psql - will automatically prompt for a password if the server demands - password authentication. However, psql + If the server requires password authentication and a password is not + available from other sources such as a .pgpass + file, psql will prompt for a + password in any case. However, psql will waste a connection attempt finding out that the server wants a password. In some cases it is worth typing to avoid the extra connection attempt. @@ -759,8 +756,8 @@ testdb=> Whenever a command is executed, psql also polls for asynchronous notification events generated by - and - . + LISTEN and + NOTIFY. @@ -892,40 +889,65 @@ testdb=> Establishes a new connection to a PostgreSQL server. The connection parameters to use can be specified either - using a positional syntax, or using conninfo connection - strings as detailed in . + using a positional syntax (one or more of database name, user, + host, and port), or using a conninfo + connection string as detailed in + . If no arguments are given, a + new connection is made using the same parameters as before. - Where the command omits database name, user, host, or port, the new - connection can reuse values from the previous connection. By default, - values from the previous connection are reused except when processing - a conninfo string. Passing a first argument - of -reuse-previous=on - or -reuse-previous=off overrides that default. - When the command neither specifies nor reuses a particular parameter, - the libpq default is used. Specifying any + Specifying any of dbname, username, host or port as - is equivalent to omitting that parameter. - If hostaddr was specified in the original - connection's conninfo, that address is reused - for the new connection (disregarding any other host specification). + + + + The new connection can re-use connection parameters from the previous + connection; not only database name, user, host, and port, but other + settings such as sslmode. By default, + parameters are re-used in the positional syntax, but not when + a conninfo string is given. Passing a + first argument of -reuse-previous=on + or -reuse-previous=off overrides that default. If + parameters are re-used, then any parameter not explicitly specified as + a positional parameter or in the conninfo + string is taken from the existing connection's parameters. An + exception is that if the host setting + is changed from its previous value using the positional syntax, + any hostaddr setting present in the + existing connection's parameters is dropped. + Also, any password used for the existing connection will be re-used + only if the user, host, and port settings are not changed. + When the command neither specifies nor reuses a particular parameter, + the libpq default is used. If the new connection is successfully made, the previous connection is closed. - If the connection attempt failed (wrong user name, access - denied, etc.), the previous connection will only be kept if - psql is in interactive mode. When - executing a non-interactive script, processing will - immediately stop with an error. This distinction was chosen as + If the connection attempt fails (wrong user name, access + denied, etc.), the previous connection will be kept if + psql is in interactive mode. But when + executing a non-interactive script, the old connection is closed + and an error is reported. That may or may not terminate the + script; if it does not, all database-accessing commands will fail + until another \connect command is successfully + executed. This distinction was chosen as a user convenience against typos on the one hand, and a safety mechanism that scripts are not accidentally acting on the wrong database on the other hand. + Note that whenever a \connect command attempts + to re-use parameters, the values re-used are those of the last + successful connection, not of any failed attempts made subsequently. + However, in the case of a + non-interactive \connect failure, no parameters + are allowed to be re-used later, since the script would likely be + expecting the values from the failed \connect + to be re-used. @@ -935,6 +957,7 @@ testdb=> => \c mydb myuser host.dom 6432 => \c service=foo => \c "host=localhost port=5432 dbname=mydb connect_timeout=10 sslmode=disable" +=> \c -reuse-previous=on sslmode=require -- changes only sslmode => \c postgresql://tom@localhost/mydb?application_name=myapp @@ -996,7 +1019,7 @@ testdb=> Performs a frontend (client) copy. This is an operation that - runs an SQL + runs an SQL COPY command, but instead of the server reading or writing the specified file, psql reads or writes the file and @@ -1033,9 +1056,9 @@ testdb=> The syntax of this command is similar to that of the - SQL + SQL COPY command. All options other than the data source/destination are - as specified for . + as specified for COPY. Because of this, special parsing rules apply to the \copy meta-command. Unlike most other meta-commands, the entire remainder of the line is always taken to be the arguments of \copy, @@ -1400,8 +1423,8 @@ testdb=> - Descriptions for objects can be created with the + Descriptions for objects can be created with the COMMENT SQL command. @@ -1438,9 +1461,9 @@ testdb=> - The command is used to set - default access privileges. The meaning of the - privilege display is explained in + The ALTER DEFAULT + PRIVILEGES command is used to set default access + privileges. The meaning of the privilege display is explained in . @@ -1754,8 +1777,8 @@ testdb=> - The and - + The GRANT and + REVOKE commands are used to set access privileges. The meaning of the privilege display is explained in . @@ -1810,8 +1833,8 @@ testdb=> - The and - + The ALTER ROLE and + ALTER DATABASE commands are used to define per-role and per-database configuration settings. @@ -3182,7 +3205,7 @@ lo_import 152801 This command is unrelated to the SQL - command . + command SET. diff --git a/doc/src/sgml/ref/reassign_owned.sgml b/doc/src/sgml/ref/reassign_owned.sgml index 42f72a726fd1d..ab692bd06908b 100644 --- a/doc/src/sgml/ref/reassign_owned.sgml +++ b/doc/src/sgml/ref/reassign_owned.sgml @@ -21,8 +21,8 @@ PostgreSQL documentation -REASSIGN OWNED BY { old_role | CURRENT_USER | SESSION_USER } [, ...] - TO { new_role | CURRENT_USER | SESSION_USER } +REASSIGN OWNED BY { old_role | CURRENT_ROLE | CURRENT_USER | SESSION_USER } [, ...] + TO { new_role | CURRENT_ROLE | CURRENT_USER | SESSION_USER } @@ -82,7 +82,7 @@ REASSIGN OWNED BY { old_role | CURR - The command is an alternative that + The DROP OWNED command is an alternative that simply drops all the database objects owned by one or more roles. diff --git a/doc/src/sgml/ref/refresh_materialized_view.sgml b/doc/src/sgml/ref/refresh_materialized_view.sgml index 8ae62671adab9..3bf888444782f 100644 --- a/doc/src/sgml/ref/refresh_materialized_view.sgml +++ b/doc/src/sgml/ref/refresh_materialized_view.sgml @@ -94,7 +94,7 @@ REFRESH MATERIALIZED VIEW [ CONCURRENTLY ] name While the default index for future - + CLUSTER operations is retained, REFRESH MATERIALIZED VIEW does not order the generated rows based on this property. If you want the data to be ordered upon generation, you must use an ORDER BY diff --git a/doc/src/sgml/ref/reindex.sgml b/doc/src/sgml/ref/reindex.sgml index 33af4ae02a139..fa43e3a972023 100644 --- a/doc/src/sgml/ref/reindex.sgml +++ b/doc/src/sgml/ref/reindex.sgml @@ -252,7 +252,7 @@ REINDEX [ ( option [, ...] ) ] { IN Reindexing a single index or table requires being the owner of that index or table. Reindexing a schema or database requires being the - owner of that schema or database. Note that is therefore sometimes + owner of that schema or database. Note specifically that it's thus possible for non-superusers to rebuild indexes of tables owned by other users. However, as a special exception, when REINDEX DATABASE, REINDEX SCHEMA diff --git a/doc/src/sgml/ref/reindexdb.sgml b/doc/src/sgml/ref/reindexdb.sgml index 026fd018d93e9..574144533378d 100644 --- a/doc/src/sgml/ref/reindexdb.sgml +++ b/doc/src/sgml/ref/reindexdb.sgml @@ -93,7 +93,7 @@ PostgreSQL documentation reindexdb is a wrapper around the SQL - command . + command REINDEX. There is no effective difference between reindexing databases via this utility and via other methods for accessing the server. @@ -134,12 +134,15 @@ PostgreSQL documentation - Specifies the name of the database to be reindexed. - If this is not specified and (or - ) is not used, the database name is read + Specifies the name of the database to be reindexed, + when / is not used. + If this is not specified, the database name is read from the environment variable PGDATABASE. If that is not set, the user name specified for the connection is - used. + used. The dbname can be a connection string. If so, + connection string parameters will override any conflicting command + line options. @@ -174,8 +177,8 @@ PostgreSQL documentation Execute the reindex commands in parallel by running njobs - commands simultaneously. This option reduces the time of the - processing but it also increases the load on the database server. + commands simultaneously. This option may reduce the processing time + but it also increases the load on the database server. reindexdb will open @@ -348,10 +351,16 @@ PostgreSQL documentation - Specifies the name of the database to connect to discover what other - databases should be reindexed. If not specified, the - postgres database will be used, - and if that does not exist, template1 will be used. + Specifies the name of the database to connect to to discover which + databases should be reindexed, + when / is used. + If not specified, the postgres database will be used, + or if that does not exist, template1 will be used. + This can be a connection + string. If so, connection string parameters will override any + conflicting command line options. Also, connection string parameters + other than the database name itself will be re-used when connecting + to other databases. diff --git a/doc/src/sgml/ref/revoke.sgml b/doc/src/sgml/ref/revoke.sgml index b6bac21c57a3a..35ff87a4f5e26 100644 --- a/doc/src/sgml/ref/revoke.sgml +++ b/doc/src/sgml/ref/revoke.sgml @@ -114,6 +114,7 @@ REVOKE [ ADMIN OPTION FOR ] [ GROUP ] role_name | PUBLIC + | CURRENT_ROLE | CURRENT_USER | SESSION_USER @@ -130,7 +131,7 @@ REVOKE [ ADMIN OPTION FOR ] - See the description of the command for + See the description of the GRANT command for the meaning of the privilege types. @@ -291,7 +292,7 @@ REVOKE admins FROM joe; Compatibility - The compatibility notes of the command + The compatibility notes of the GRANT command apply analogously to REVOKE. The keyword RESTRICT or CASCADE is required according to the standard, but PostgreSQL diff --git a/doc/src/sgml/ref/rollback.sgml b/doc/src/sgml/ref/rollback.sgml index 1357eaa8323ac..142f71e77425d 100644 --- a/doc/src/sgml/ref/rollback.sgml +++ b/doc/src/sgml/ref/rollback.sgml @@ -70,7 +70,7 @@ ROLLBACK [ WORK | TRANSACTION ] [ AND [ NO ] CHAIN ] Notes - Use to + Use COMMIT to successfully terminate a transaction. diff --git a/doc/src/sgml/ref/rollback_to.sgml b/doc/src/sgml/ref/rollback_to.sgml index 4d5647a302e2a..3d5a241e1aa98 100644 --- a/doc/src/sgml/ref/rollback_to.sgml +++ b/doc/src/sgml/ref/rollback_to.sgml @@ -64,7 +64,7 @@ ROLLBACK [ WORK | TRANSACTION ] TO [ SAVEPOINT ] savepoint_nameNotes - Use to destroy a savepoint + Use RELEASE SAVEPOINT to destroy a savepoint without discarding the effects of commands executed after it was established. diff --git a/doc/src/sgml/ref/savepoint.sgml b/doc/src/sgml/ref/savepoint.sgml index 87243b1d2046f..b17342a1ee6a5 100644 --- a/doc/src/sgml/ref/savepoint.sgml +++ b/doc/src/sgml/ref/savepoint.sgml @@ -64,8 +64,8 @@ SAVEPOINT savepoint_name Notes - Use to - rollback to a savepoint. Use + Use ROLLBACK TO to + rollback to a savepoint. Use RELEASE SAVEPOINT to destroy a savepoint, keeping the effects of commands executed after it was established. diff --git a/doc/src/sgml/ref/security_label.sgml b/doc/src/sgml/ref/security_label.sgml index e9688cce214b2..9b87bcd51961c 100644 --- a/doc/src/sgml/ref/security_label.sgml +++ b/doc/src/sgml/ref/security_label.sgml @@ -127,11 +127,12 @@ SECURITY LABEL [ FOR provider ] ON argument: IN, OUT, INOUT, or VARIADIC. If omitted, the default is IN. - Note that SECURITY LABEL does not actually - pay any attention to OUT arguments, since only the input - arguments are needed to determine the function's identity. - So it is sufficient to list the IN, INOUT, - and VARIADIC arguments. + Note that SECURITY LABEL does not actually pay any + attention to OUT arguments for functions and + aggregates (but not procedures), since only the input arguments are + needed to determine the function's identity. So it is sufficient to + list the IN, INOUT, and + VARIADIC arguments for functions and aggregates. diff --git a/doc/src/sgml/ref/select.sgml b/doc/src/sgml/ref/select.sgml index b93e4ca208b1f..472b7cae812bd 100644 --- a/doc/src/sgml/ref/select.sgml +++ b/doc/src/sgml/ref/select.sgml @@ -38,7 +38,7 @@ SELECT [ ALL | DISTINCT [ ON ( expressionfrom_item [, ...] ] [ WHERE condition ] [ GROUP BY grouping_element [, ...] ] - [ HAVING condition [, ...] ] + [ HAVING condition ] [ WINDOW window_name AS ( window_definition ) [, ...] ] [ { UNION | INTERSECT | EXCEPT } [ ALL | DISTINCT ] select ] [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ] @@ -446,7 +446,7 @@ TABLE [ ONLY ] table_name [ * ] sub-SELECT must be surrounded by parentheses, and an alias must be provided for it. A - command + VALUES command can also be used here. @@ -1534,7 +1534,7 @@ KEY SHARE to the row-level lock(s) — the required ROW SHARE table-level lock is still taken in the ordinary way (see ). You can use - + LOCK with the NOWAIT option first, if you need to acquire the table-level lock without waiting. diff --git a/doc/src/sgml/ref/select_into.sgml b/doc/src/sgml/ref/select_into.sgml index b1af52a4da127..7b327d9eeef32 100644 --- a/doc/src/sgml/ref/select_into.sgml +++ b/doc/src/sgml/ref/select_into.sgml @@ -28,7 +28,7 @@ SELECT [ ALL | DISTINCT [ ON ( expressionfrom_item [, ...] ] [ WHERE condition ] [ GROUP BY expression [, ...] ] - [ HAVING condition [, ...] ] + [ HAVING condition ] [ WINDOW window_name AS ( window_definition ) [, ...] ] [ { UNION | INTERSECT | EXCEPT } [ ALL | DISTINCT ] select ] [ ORDER BY expression [ ASC | DESC | USING operator ] [ NULLS { FIRST | LAST } ] [, ...] ] @@ -95,7 +95,7 @@ SELECT [ ALL | DISTINCT [ ON ( expressionNotes - is functionally similar to + CREATE TABLE AS is functionally similar to SELECT INTO. CREATE TABLE AS is the recommended syntax, since this form of SELECT INTO is not available in ECPG @@ -109,8 +109,8 @@ SELECT [ ALL | DISTINCT [ ON ( expressionCREATE TABLE AS, SELECT INTO does not allow to specify properties like a table's access method with or the table's - tablespace with . Use if necessary. Therefore, the default table + tablespace with . Use + CREATE TABLE AS if necessary. Therefore, the default table access method is chosen for the new table. See for more information. diff --git a/doc/src/sgml/ref/set_role.sgml b/doc/src/sgml/ref/set_role.sgml index a4842f363c8be..739f2c5cdfa5c 100644 --- a/doc/src/sgml/ref/set_role.sgml +++ b/doc/src/sgml/ref/set_role.sgml @@ -48,7 +48,7 @@ RESET ROLE The SESSION and LOCAL modifiers act the same - as for the regular + as for the regular SET command. @@ -82,7 +82,7 @@ RESET ROLE SET ROLE has effects comparable to - , but the privilege + SET SESSION AUTHORIZATION, but the privilege checks involved are quite different. Also, SET SESSION AUTHORIZATION determines which roles are allowable for later SET ROLE commands, whereas changing @@ -92,7 +92,7 @@ RESET ROLE SET ROLE does not process session variables as specified by - the role's settings; this only happens during + the role's ALTER ROLE settings; this only happens during login. diff --git a/doc/src/sgml/ref/set_session_auth.sgml b/doc/src/sgml/ref/set_session_auth.sgml index 6a838e58b7643..e44e78ed8d67e 100644 --- a/doc/src/sgml/ref/set_session_auth.sgml +++ b/doc/src/sgml/ref/set_session_auth.sgml @@ -45,7 +45,7 @@ RESET SESSION AUTHORIZATION identifier is normally equal to the session user identifier, but might change temporarily in the context of SECURITY DEFINER functions and similar mechanisms; it can also be changed by - . + SET ROLE. The current user identifier is relevant for permission checking. @@ -58,7 +58,7 @@ RESET SESSION AUTHORIZATION The SESSION and LOCAL modifiers act the same - as for the regular + as for the regular SET command. diff --git a/doc/src/sgml/ref/start_transaction.sgml b/doc/src/sgml/ref/start_transaction.sgml index d6cd1d4177921..74ccd7e3456cd 100644 --- a/doc/src/sgml/ref/start_transaction.sgml +++ b/doc/src/sgml/ref/start_transaction.sgml @@ -37,8 +37,8 @@ START TRANSACTION [ transaction_mode This command begins a new transaction block. If the isolation level, read/write mode, or deferrable mode is specified, the new transaction has those - characteristics, as if was executed. This is the same - as the command. + characteristics, as if SET TRANSACTION was executed. This is the same + as the BEGIN command. diff --git a/doc/src/sgml/ref/truncate.sgml b/doc/src/sgml/ref/truncate.sgml index 5922ee579e110..91cdac5562330 100644 --- a/doc/src/sgml/ref/truncate.sgml +++ b/doc/src/sgml/ref/truncate.sgml @@ -160,8 +160,7 @@ TRUNCATE [ TABLE ] [ ONLY ] name [ When RESTART IDENTITY is specified, the implied ALTER SEQUENCE RESTART operations are also done transactionally; that is, they will be rolled back if the surrounding - transaction does not commit. This is unlike the normal behavior of - ALTER SEQUENCE RESTART. Be aware that if any additional + transaction does not commit. Be aware that if any additional sequence operations are done on the restarted sequences before the transaction rolls back, the effects of these operations on the sequences will be rolled back, but not their effects on currval(); diff --git a/doc/src/sgml/ref/vacuum.sgml b/doc/src/sgml/ref/vacuum.sgml index a48f75ad7baf2..21ab57d880489 100644 --- a/doc/src/sgml/ref/vacuum.sgml +++ b/doc/src/sgml/ref/vacuum.sgml @@ -235,22 +235,22 @@ VACUUM [ FULL ] [ FREEZE ] [ VERBOSE ] [ ANALYZE ] [ integer background workers (for the details of each vacuum phase, please - refer to ). In plain VACUUM - (without FULL), if the PARALLEL option - is omitted, then the number of workers is determined based on the number of - indexes on the relation that support parallel vacuum operation and is further - limited by . An index - can participate in parallel vacuum if and only if the size of the index is - more than . Please note - that it is not guaranteed that the number of parallel workers specified in - integer will be used during - execution. It is possible for a vacuum to run with fewer workers than - specified, or even with no workers at all. Only one worker can be used per - index. So parallel workers are launched only when there are at least - 2 indexes in the table. Workers for vacuum are launched - before the start of each phase and exit at the end of the phase. These - behaviors might change in a future release. This option can't be used with - the FULL option. + refer to ). The number of workers used + to perform the operation is equal to the number of indexes on the + relation that support parallel vacuum which is limited by the number of + workers specified with PARALLEL option if any which is + further limited by . + An index can participate in parallel vacuum if and only if the size of the + index is more than . + Please note that it is not guaranteed that the number of parallel workers + specified in integer will be + used during execution. It is possible for a vacuum to run with fewer + workers than specified, or even with no workers at all. Only one worker + can be used per index. So parallel workers are launched only when there + are at least 2 indexes in the table. Workers for + vacuum are launched before the start of each phase and exit at the end of + the phase. These behaviors might change in a future release. This + option can't be used with the FULL option. diff --git a/doc/src/sgml/ref/vacuumdb.sgml b/doc/src/sgml/ref/vacuumdb.sgml index 95d6894cb03a6..a90fc9322f99e 100644 --- a/doc/src/sgml/ref/vacuumdb.sgml +++ b/doc/src/sgml/ref/vacuumdb.sgml @@ -62,7 +62,7 @@ PostgreSQL documentation vacuumdb is a wrapper around the SQL - command . + command VACUUM. There is no effective difference between vacuuming and analyzing databases via this utility and via other methods for accessing the server. @@ -92,12 +92,15 @@ PostgreSQL documentation - Specifies the name of the database to be cleaned or analyzed. - If this is not specified and (or - ) is not used, the database name is read + Specifies the name of the database to be cleaned or analyzed, + when / is not used. + If this is not specified, the database name is read from the environment variable PGDATABASE. If that is not set, the user name specified for the connection is - used. + used. The dbname can be a connection string. If so, + connection string parameters will override any conflicting command + line options. @@ -155,8 +158,8 @@ PostgreSQL documentation Execute the vacuum or analyze commands in parallel by running njobs - commands simultaneously. This option reduces the time of the - processing but it also increases the load on the database server. + commands simultaneously. This option may reduce the processing time + but it also increases the load on the database server. vacuumdb will open @@ -471,10 +474,16 @@ PostgreSQL documentation - Specifies the name of the database to connect to discover what other - databases should be vacuumed. If not specified, the - postgres database will be used, - and if that does not exist, template1 will be used. + Specifies the name of the database to connect to to discover which + databases should be vacuumed, + when / is used. + If not specified, the postgres database will be used, + or if that does not exist, template1 will be used. + This can be a connection + string. If so, connection string parameters will override any + conflicting command line options. Also, connection string parameters + other than the database name itself will be re-used when connecting + to other databases. diff --git a/doc/src/sgml/rules.sgml b/doc/src/sgml/rules.sgml index bcf860b68b9c2..e81addcfa9bb9 100644 --- a/doc/src/sgml/rules.sgml +++ b/doc/src/sgml/rules.sgml @@ -769,7 +769,7 @@ SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; - The benefit of implementing views with the rule system is, + The benefit of implementing views with the rule system is that the planner has all the information about which tables have to be scanned plus the relationships between these tables plus the restrictive @@ -781,7 +781,7 @@ SELECT t1.a, t2.b, t1.ctid FROM t1, t2 WHERE t1.a = t2.a; the best path to execute the query, and the more information the planner has, the better this decision can be. And the rule system as implemented in PostgreSQL - ensures, that this is all information available about the query + ensures that this is all information available about the query up to that point. @@ -2087,7 +2087,7 @@ CREATE FUNCTION tricky(text, text) RETURNS bool AS $$ BEGIN RAISE NOTICE '% => %', $1, $2; RETURN true; -END +END; $$ LANGUAGE plpgsql COST 0.0000000000000000000001; SELECT * FROM phone_number WHERE tricky(person, phone); diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index f5842319358b5..17e938148c5cd 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -2345,9 +2345,8 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 The clientcert authentication option is available for all authentication methods, but only in pg_hba.conf lines specified as hostssl. When clientcert is - not specified or is set to no-verify, the server will still - verify any presented client certificates against its CA file, if one is - configured — but it will not insist that a client certificate be presented. + not specified, the server verifies the client certificate against its CA + file only if a client certificate is presented and the CA is configured. @@ -2575,7 +2574,7 @@ openssl x509 -req -in server.csr -text -days 365 \ The PostgreSQL server will listen for both normal and GSSAPI-encrypted connections on the same TCP - port, and will negotiate with any connecting client on whether to + port, and will negotiate with any connecting client whether to use GSSAPI for encryption (and for authentication). By default, this decision is up to the client (which means it can be downgraded by an attacker); see about @@ -2611,34 +2610,39 @@ openssl x509 -req -in server.csr -text -days 365 \ First make sure that an SSH server is running properly on the same machine as the PostgreSQL server and that you can log in using - ssh as some user. Then you can establish a secure - tunnel with a command like this from the client machine: + ssh as some user; you then can establish a + secure tunnel to the remote server. A secure tunnel listens on a + local port and forwards all traffic to a port on the remote machine. + Traffic sent to the remote port can arrive on its + localhost address, or different bind + address if desired; it does not appear as coming from your + local machine. This command creates a secure tunnel from the client + machine to the remote machine foo.com: ssh -L 63333:localhost:5432 joe@foo.com The first number in the argument, 63333, is the - port number of your end of the tunnel; it can be any unused port. - (IANA reserves ports 49152 through 65535 for private use.) The - second number, 5432, is the remote end of the tunnel: the port - number your server is using. The name or IP address between the - port numbers is the host with the database server you are going to - connect to, as seen from the host you are logging in to, which - is foo.com in this example. In order to connect - to the database server using this tunnel, you connect to port 63333 - on the local machine: + local port number of the tunnel; it can be any unused port. (IANA + reserves ports 49152 through 65535 for private use.) The name or IP + address after this is the remote bind address you are connecting to, + i.e., localhost, which is the default. The second + number, 5432, is the remote end of the tunnel, e.g., the port number + your database server is using. In order to connect to the database + server using this tunnel, you connect to port 63333 on the local + machine: psql -h localhost -p 63333 postgres - To the database server it will then look as though you are really + To the database server it will then look as though you are user joe on host foo.com - connecting to localhost in that context, and it + connecting to the localhost bind address, and it will use whatever authentication procedure was configured for - connections from this user and host. Note that the server will not + connections by that user to that bind address. Note that the server will not think the connection is SSL-encrypted, since in fact it is not encrypted between the SSH server and the PostgreSQL server. This should not pose any - extra security risk as long as they are on the same machine. + extra security risk because they are on the same machine. @@ -2650,12 +2654,12 @@ psql -h localhost -p 63333 postgres - You could also have set up the port forwarding as + You could also have set up port forwarding as ssh -L 63333:foo.com:5432 joe@foo.com but then the database server will see the connection as coming in - on its foo.com interface, which is not opened by + on its foo.com bind address, which is not opened by the default setting listen_addresses = 'localhost'. This is usually not what you want. diff --git a/doc/src/sgml/seg.sgml b/doc/src/sgml/seg.sgml index e86142d885a51..e0dfbc76cf350 100644 --- a/doc/src/sgml/seg.sgml +++ b/doc/src/sgml/seg.sgml @@ -205,8 +205,8 @@ test=> select '6.25 .. 6.50'::seg as "pH";
- Because ... is widely used in data sources, it is allowed - as an alternative spelling of ... Unfortunately, this + Because the ... operator is widely used in data sources, it is allowed + as an alternative spelling of the .. operator. Unfortunately, this creates a parsing ambiguity: it is not clear whether the upper bound in 0...23 is meant to be 23 or 0.23. This is resolved by requiring at least one digit before the decimal diff --git a/doc/src/sgml/sepgsql.sgml b/doc/src/sgml/sepgsql.sgml index 9961569afc8ae..e896a44ce5917 100644 --- a/doc/src/sgml/sepgsql.sgml +++ b/doc/src/sgml/sepgsql.sgml @@ -51,7 +51,7 @@ - The statement allows assignment of + The SECURITY LABEL statement allows assignment of a security label to a database object. @@ -451,7 +451,7 @@ UPDATE t1 SET x = 2, y = func1(y) WHERE z = 100; - additionally requires + CREATE DATABASE additionally requires getattr permission for the source or template database. @@ -509,7 +509,7 @@ UPDATE t1 SET x = 2, y = func1(y) WHERE z = 100; - Using on an object additionally + Using SECURITY LABEL on an object additionally requires relabelfrom permission for the object in conjunction with its old security label and relabelto permission for the object in conjunction with its new security label. @@ -644,7 +644,7 @@ ERROR: SELinux: security policy violation Miscellaneous - We reject the command across the board, because + We reject the LOAD command across the board, because any module loaded could easily circumvent security policy enforcement. diff --git a/doc/src/sgml/sources.sgml b/doc/src/sgml/sources.sgml index 998e7d5fba196..38a516080c189 100644 --- a/doc/src/sgml/sources.sgml +++ b/doc/src/sgml/sources.sgml @@ -373,7 +373,7 @@ ereport(ERROR, specify suppression of the CONTEXT: portion of a message in the postmaster log. This should only be used for verbose debugging messages where the repeated inclusion of context would bloat the log - volume too much. + too much. @@ -518,7 +518,7 @@ Hint: the addendum Use of Quotes - Use quotes always to delimit file names, user-supplied identifiers, and + Always use quotes to delimit file names, user-supplied identifiers, and other variables that might contain words. Do not use them to mark up variables that will not contain words (for example, operator names). diff --git a/doc/src/sgml/sslinfo.sgml b/doc/src/sgml/sslinfo.sgml index 7d3fcb716707a..e16f61b41d72c 100644 --- a/doc/src/sgml/sslinfo.sgml +++ b/doc/src/sgml/sslinfo.sgml @@ -173,8 +173,8 @@ This function returns the value of the specified field in the certificate subject, or NULL if the field is not present. - Field names are string constants that are - converted into ASN1 object identifiers using the OpenSSL object + Field names are string constants that are converted into ASN1 object + identifiers using the OpenSSL object database. The following values are acceptable: diff --git a/doc/src/sgml/syntax.sgml b/doc/src/sgml/syntax.sgml index b0ae5d2e127e1..3fdd87823e008 100644 --- a/doc/src/sgml/syntax.sgml +++ b/doc/src/sgml/syntax.sgml @@ -836,7 +836,7 @@ CAST ( 'string' AS type ) When working with non-SQL-standard operator names, you will usually need to separate adjacent operators with spaces to avoid ambiguity. - For example, if you have defined a left unary operator named @, + For example, if you have defined a prefix operator named @, you cannot write X*@Y; you must write X* @Y to ensure that PostgreSQL reads it as two operator names @@ -1444,11 +1444,10 @@ $1.somecolumn - There are three possible syntaxes for an operator invocation: + There are two possible syntaxes for an operator invocation: expression operator expression (binary infix operator) operator expression (unary prefix operator) - expression operator (unary postfix operator) where the operator token follows the syntax rules of , or is one of the diff --git a/doc/src/sgml/textsearch.sgml b/doc/src/sgml/textsearch.sgml index 2ebdf02bfaf07..7bd8d53dc4e0b 100644 --- a/doc/src/sgml/textsearch.sgml +++ b/doc/src/sgml/textsearch.sgml @@ -1623,9 +1623,9 @@ occurrences to display in the result.', SELECT to_tsquery('fat') <-> to_tsquery('cat | rat'); - ?column? ------------------------------------ - 'fat' <-> 'cat' | 'fat' <-> 'rat' + ?column? +---------------------------- + 'fat' <-> ( 'cat' | 'rat' ) @@ -1645,7 +1645,7 @@ SELECT to_tsquery('fat') <-> to_tsquery('cat | rat'); Returns a query that searches for a match to the first given query - followed by a match to the second given query at a distance of at + followed by a match to the second given query at a distance of exactly distance lexemes, using the <N> tsquery operator. For example: @@ -2419,7 +2419,7 @@ ALTER TEXT SEARCH CONFIGURATION astro_en positions in tsvector, which in turn affect ranking: -SELECT to_tsvector('english','in the list of stop words'); +SELECT to_tsvector('english', 'in the list of stop words'); to_tsvector ---------------------------- 'list':3 'stop':5 'word':6 @@ -2429,12 +2429,12 @@ SELECT to_tsvector('english','in the list of stop words'); calculated for documents with and without stop words are quite different: -SELECT ts_rank_cd (to_tsvector('english','in the list of stop words'), to_tsquery('list & stop')); +SELECT ts_rank_cd (to_tsvector('english', 'in the list of stop words'), to_tsquery('list & stop')); ts_rank_cd ------------ 0.05 -SELECT ts_rank_cd (to_tsvector('english','list stop words'), to_tsquery('list & stop')); +SELECT ts_rank_cd (to_tsvector('english', 'list stop words'), to_tsquery('list & stop')); ts_rank_cd ------------ 0.1 @@ -2493,12 +2493,12 @@ CREATE TEXT SEARCH DICTIONARY public.simple_dict ( Now we can test our dictionary: -SELECT ts_lexize('public.simple_dict','YeS'); +SELECT ts_lexize('public.simple_dict', 'YeS'); ts_lexize ----------- {yes} -SELECT ts_lexize('public.simple_dict','The'); +SELECT ts_lexize('public.simple_dict', 'The'); ts_lexize ----------- {} @@ -2514,12 +2514,12 @@ SELECT ts_lexize('public.simple_dict','The'); ALTER TEXT SEARCH DICTIONARY public.simple_dict ( Accept = false ); -SELECT ts_lexize('public.simple_dict','YeS'); +SELECT ts_lexize('public.simple_dict', 'YeS'); ts_lexize ----------- -SELECT ts_lexize('public.simple_dict','The'); +SELECT ts_lexize('public.simple_dict', 'The'); ts_lexize ----------- {} @@ -2633,7 +2633,7 @@ indices index* Then we will get these results: mydb=# CREATE TEXT SEARCH DICTIONARY syn (template=synonym, synonyms='synonym_sample'); -mydb=# SELECT ts_lexize('syn','indices'); +mydb=# SELECT ts_lexize('syn', 'indices'); ts_lexize ----------- {index} @@ -2641,13 +2641,13 @@ mydb=# SELECT ts_lexize('syn','indices'); mydb=# CREATE TEXT SEARCH CONFIGURATION tst (copy=simple); mydb=# ALTER TEXT SEARCH CONFIGURATION tst ALTER MAPPING FOR asciiword WITH syn; -mydb=# SELECT to_tsvector('tst','indices'); +mydb=# SELECT to_tsvector('tst', 'indices'); to_tsvector ------------- 'index':1 (1 row) -mydb=# SELECT to_tsquery('tst','indices'); +mydb=# SELECT to_tsquery('tst', 'indices'); to_tsquery ------------ 'index':* @@ -2659,7 +2659,7 @@ mydb=# SELECT 'indexes are very useful'::tsvector; 'are' 'indexes' 'useful' 'very' (1 row) -mydb=# SELECT 'indexes are very useful'::tsvector @@ to_tsquery('tst','indices'); +mydb=# SELECT 'indexes are very useful'::tsvector @@ to_tsquery('tst', 'indices'); ?column? ---------- t @@ -3354,7 +3354,7 @@ ts_debug( config re Here is a simple example: -SELECT * FROM ts_debug('english','a fat cat sat on a mat - it ate a fat rats'); +SELECT * FROM ts_debug('english', 'a fat cat sat on a mat - it ate a fat rats'); alias | description | token | dictionaries | dictionary | lexemes -----------+-----------------+-------+----------------+--------------+--------- asciiword | Word, all ASCII | a | {english_stem} | english_stem | {} @@ -3405,7 +3405,7 @@ ALTER TEXT SEARCH CONFIGURATION public.english -SELECT * FROM ts_debug('public.english','The Brightest supernovaes'); +SELECT * FROM ts_debug('public.english', 'The Brightest supernovaes'); alias | description | token | dictionaries | dictionary | lexemes -----------+-----------------+-------------+-------------------------------+----------------+------------- asciiword | Word, all ASCII | The | {english_ispell,english_stem} | english_ispell | {} @@ -3444,7 +3444,7 @@ SELECT * FROM ts_debug('public.english','The Brightest supernovaes'); SELECT alias, token, dictionary, lexemes -FROM ts_debug('public.english','The Brightest supernovaes'); +FROM ts_debug('public.english', 'The Brightest supernovaes'); alias | token | dictionary | lexemes -----------+-------------+----------------+------------- asciiword | The | english_ispell | {} @@ -3592,7 +3592,7 @@ SELECT ts_lexize('english_stem', 'a'); where this can be confusing: -SELECT ts_lexize('thesaurus_astro','supernovae stars') is null; +SELECT ts_lexize('thesaurus_astro', 'supernovae stars') is null; ?column? ---------- t diff --git a/doc/src/sgml/tsm-system-rows.sgml b/doc/src/sgml/tsm-system-rows.sgml index 071ff301d07b6..d960aa3e0fbc6 100644 --- a/doc/src/sgml/tsm-system-rows.sgml +++ b/doc/src/sgml/tsm-system-rows.sgml @@ -10,7 +10,7 @@ The tsm_system_rows module provides the table sampling method SYSTEM_ROWS, which can be used in - the TABLESAMPLE clause of a + the TABLESAMPLE clause of a SELECT command. diff --git a/doc/src/sgml/tsm-system-time.sgml b/doc/src/sgml/tsm-system-time.sgml index cd074926d85a5..df6e83a9236ea 100644 --- a/doc/src/sgml/tsm-system-time.sgml +++ b/doc/src/sgml/tsm-system-time.sgml @@ -10,7 +10,7 @@ The tsm_system_time module provides the table sampling method SYSTEM_TIME, which can be used in - the TABLESAMPLE clause of a + the TABLESAMPLE clause of a SELECT command. diff --git a/doc/src/sgml/typeconv.sgml b/doc/src/sgml/typeconv.sgml index 98662fc91fb6d..cfeb851a507e1 100644 --- a/doc/src/sgml/typeconv.sgml +++ b/doc/src/sgml/typeconv.sgml @@ -97,8 +97,8 @@ Operators PostgreSQL allows expressions with -prefix and postfix unary (one-argument) operators, -as well as binary (two-argument) operators. Like functions, operators can +prefix (one-argument) operators, +as well as infix (two-argument) operators. Like functions, operators can be overloaded, so the same problem of selecting the right operator exists. @@ -266,7 +266,7 @@ create objects. In such situations, cast arguments to force an exact match. If one argument of a binary operator invocation is of the unknown type, then assume it is the same type as the other argument for this check. -Invocations involving two unknown inputs, or a unary operator +Invocations involving two unknown inputs, or a prefix operator with an unknown input, will never find a match at this step. diff --git a/doc/src/sgml/user-manag.sgml b/doc/src/sgml/user-manag.sgml index 829decd883904..cc082521a2ae6 100644 --- a/doc/src/sgml/user-manag.sgml +++ b/doc/src/sgml/user-manag.sgml @@ -51,8 +51,8 @@ operating system users. In practice it might be convenient to maintain a correspondence, but this is not required. Database roles are global across a database cluster installation (and not - per individual database). To create a role use the SQL command: + per individual database). To create a role use the CREATE ROLE SQL command: CREATE ROLE name; @@ -61,7 +61,7 @@ CREATE ROLE name; double-quoted. (In practice, you will usually want to add additional options, such as LOGIN, to the command. More details appear below.) To remove an existing role, use the analogous - command: + DROP ROLE command: DROP ROLE name; @@ -303,8 +303,8 @@ CREATE ROLE name; Once the group role exists, you can add and remove members using the - and - commands: + GRANT and + REVOKE commands: GRANT group_role TO role1, ... ; REVOKE group_role FROM role1, ... ; @@ -319,7 +319,7 @@ REVOKE group_role FROM role1 The members of a group role can use the privileges of the role in two ways. First, every member of a group can explicitly do - to + SET ROLE to temporarily become the group role. In this state, the database session has access to the privileges of the group role rather than the original login role, and any database objects created are @@ -402,8 +402,8 @@ RESET ROLE; - To destroy a group role, use : + To destroy a group role, use DROP ROLE: DROP ROLE name; @@ -418,7 +418,7 @@ DROP ROLE name; Because roles can own database objects and can hold privileges to access other objects, dropping a role is often not just a matter of a - quick . Any objects owned by the role must + quick DROP ROLE. Any objects owned by the role must first be dropped or reassigned to other owners; and any permissions granted to the role must be revoked. @@ -429,7 +429,7 @@ DROP ROLE name; ALTER TABLE bobs_table OWNER TO alice; - Alternatively, the command can be + Alternatively, the REASSIGN OWNED command can be used to reassign ownership of all objects owned by the role-to-be-dropped to a single other role. Because REASSIGN OWNED cannot access objects in other databases, it is necessary to run it in each database @@ -442,7 +442,7 @@ ALTER TABLE bobs_table OWNER TO alice; Once any valuable objects have been transferred to new owners, any remaining objects owned by the role-to-be-dropped can be dropped with - the command. Again, this command cannot + the DROP OWNED command. Again, this command cannot access objects in other databases, so it is necessary to run it in each database that contains objects owned by the role. Also, DROP OWNED will not drop entire databases or tablespaces, so it is @@ -598,7 +598,7 @@ DROP ROLE doomed_role; Administrators can grant access to these roles to users using the - command, for example: + GRANT command, for example: GRANT pg_signal_backend TO admin_user; diff --git a/doc/src/sgml/xaggr.sgml b/doc/src/sgml/xaggr.sgml index f035866848c85..36c2d21101b9f 100644 --- a/doc/src/sgml/xaggr.sgml +++ b/doc/src/sgml/xaggr.sgml @@ -490,7 +490,7 @@ SELECT percentile_disc(0.5) WITHIN GROUP (ORDER BY income) FROM households; Also, because the final function performs the sort, it is not possible to continue adding input rows by executing the transition function again later. This means the final function is not READ_ONLY; - it must be declared in + it must be declared in CREATE AGGREGATE as READ_WRITE, or as SHAREABLE if it's possible for additional final-function calls to make use of the already-sorted state. diff --git a/doc/src/sgml/xfunc.sgml b/doc/src/sgml/xfunc.sgml index 0f60a4a0ab6dd..2863f7c20657f 100644 --- a/doc/src/sgml/xfunc.sgml +++ b/doc/src/sgml/xfunc.sgml @@ -85,7 +85,7 @@ that a procedure does not return a value, so there is no return type declaration. While a function is called as part of a query or DML command, a procedure is called in isolation using - the command. If the CALL command is not + the CALL command. If the CALL command is not part of an explicit transaction, a procedure in many server-side languages can commit, rollback, and begin new transactions during its execution, which is not possible in functions. @@ -94,15 +94,15 @@ The explanations on how to define user-defined functions in the rest of this chapter apply to procedures as well, except that - the command is used instead, there is + the CREATE PROCEDURE command is used instead, there is no return type, and some other features such as strictness don't apply. Collectively, functions and procedures are also known as routinesroutine. - There are commands such as - and that can operate on functions and + There are commands such as ALTER ROUTINE + and DROP ROUTINE that can operate on functions and procedures without having to know which kind it is. Note, however, that there is no CREATE ROUTINE command. @@ -179,6 +179,24 @@ SELECT clean_emp(); + + You can also write this as a procedure, thus avoiding the issue of the + return type. For example: + +CREATE PROCEDURE clean_emp() AS ' + DELETE FROM emp + WHERE salary < 0; +' LANGUAGE SQL; + +CALL clean_emp(); + + In simple cases like this, the difference between a function returning + void and a procedure is mostly stylistic. However, + procedures offer additional functionality such as transaction control + that is not available in functions. Also, procedures are SQL standard + whereas returning void is a PostgreSQL extension. + + The entire body of a SQL function is parsed before any of it is @@ -716,6 +734,47 @@ DROP FUNCTION sum_n_product (int, int); + + <acronym>SQL</acronym> Procedures with Output Parameters + + + procedures + output parameter + + + + Output parameters are also supported in procedures, but they work a bit + differently from functions. Notably, output parameters + are included in the signature of a procedure and + must be specified in the procedure call. + + + + For example, the bank account debiting routine from earlier could be + written like this: + +CREATE PROCEDURE tp1 (accountno integer, debit numeric, OUT new_balance numeric) AS $$ + UPDATE bank + SET balance = balance - debit + WHERE accountno = tp1.accountno + RETURNING balance; +$$ LANGUAGE SQL; + + To call this procedure, it is irrelevant what is passed as the argument + of the OUT parameter, so you could pass + NULL: + +CALL tp1(17, 100.0, NULL); + + + + + Procedures with output parameters are more useful in PL/pgSQL, where the + output parameters can be assigned to variables. See for details. + + + <acronym>SQL</acronym> Functions with Variable Numbers of Arguments @@ -1531,7 +1590,7 @@ CREATE FUNCTION test(int, int) RETURNS int Every function has a volatility classification, with the possibilities being VOLATILE, STABLE, or IMMUTABLE. VOLATILE is the default if the - + CREATE FUNCTION command does not specify a category. The volatility category is a promise to the optimizer about the behavior of the function: @@ -3432,7 +3491,7 @@ if (!ptr) Some basic facts can be supplied by declarative annotations provided in - the command. Most important of + the CREATE FUNCTION command. Most important of these is the function's volatility category (IMMUTABLE, STABLE, or VOLATILE); one should always be careful to diff --git a/doc/src/sgml/xindex.sgml b/doc/src/sgml/xindex.sgml index 2cfd71b5b77a7..609fa35d4cad7 100644 --- a/doc/src/sgml/xindex.sgml +++ b/doc/src/sgml/xindex.sgml @@ -400,22 +400,22 @@ specified by the index method. + + Additionally, some opclasses allow users to specify parameters which + control their behavior. Each builtin index access method has an optional + options support function, which defines a set of + opclass-specific parameters. + + B-trees require a comparison support function, - and allow three additional support functions to be + and allow four additional support functions to be supplied at the operator class author's option, as shown in . The requirements for these support functions are explained further in . - - Additionally, some opclasses allow users to specify parameters which - control their behavior. Each builtin index access method has an optional - options support function, which defines a set of - opclass-specific parameters. - - B-Tree Support Functions @@ -469,8 +469,8 @@
- Hash indexes require one support function, and allow a second one to be - supplied at the operator class author's option, as shown in . @@ -511,7 +511,7 @@ - GiST indexes have nine support functions, two of which are optional, + GiST indexes have ten support functions, three of which are optional, as shown in . (For more information see .) @@ -594,7 +594,7 @@ - SP-GiST indexes require five support functions, as + SP-GiST indexes have six support functions, one of which is optional, as shown in . (For more information see .) @@ -653,7 +653,7 @@ - GIN indexes have six support functions, three of which are optional, + GIN indexes have seven support functions, four of which are optional, as shown in . (For more information see .) @@ -730,9 +730,9 @@ - BRIN indexes have four basic support functions, as shown in - ; those basic functions - may require additional support functions to be provided. + BRIN indexes have five basic support functions, one of which is optional, + as shown in . Some versions of + the basic functions require additional support functions to be provided. (For more information see .) diff --git a/doc/src/sgml/xoper.sgml b/doc/src/sgml/xoper.sgml index 56b08491c96c9..98f4c5c4aa46a 100644 --- a/doc/src/sgml/xoper.sgml +++ b/doc/src/sgml/xoper.sgml @@ -20,8 +20,8 @@
- PostgreSQL supports left unary, right - unary, and binary operators. Operators can be + PostgreSQL supports prefix + and infix operators. Operators can be overloaded;overloadingoperators that is, the same operator name can be used for different operators that have different numbers and types of operands. When a query is @@ -64,9 +64,9 @@ SELECT (a + b) AS c FROM test_complex; - We've shown how to create a binary operator here. To create unary - operators, just omit one of leftarg (for left unary) or - rightarg (for right unary). The function + We've shown how to create a binary operator here. To create a prefix + operator, just omit the leftarg. + The function clause and the argument clauses are the only required items in CREATE OPERATOR. The commutator clause shown in the example is an optional hint to the query @@ -202,7 +202,7 @@ SELECT (a + b) AS c FROM test_complex; Unlike commutators, a pair of unary operators could validly be marked as each other's negators; that would mean (A x) equals NOT (B x) - for all x, or the equivalent for right unary operators. + for all x. diff --git a/doc/src/sgml/xplang.sgml b/doc/src/sgml/xplang.sgml index 7f1409305629c..31d403c4806b7 100644 --- a/doc/src/sgml/xplang.sgml +++ b/doc/src/sgml/xplang.sgml @@ -103,7 +103,7 @@ CREATE FUNCTION handler_function_name() Optionally, the language handler can provide an inline handler function that executes anonymous code blocks - ( commands) + (DO commands) written in this language. If an inline handler function is provided by the language, declare it with a command like diff --git a/src/Makefile b/src/Makefile index bcdbd9588aa43..79e274a4769be 100644 --- a/src/Makefile +++ b/src/Makefile @@ -66,13 +66,11 @@ clean: $(MAKE) -C test $@ $(MAKE) -C tutorial NO_PGXS=1 $@ $(MAKE) -C test/isolation $@ - $(MAKE) -C test/thread $@ distclean maintainer-clean: $(MAKE) -C test $@ $(MAKE) -C tutorial NO_PGXS=1 $@ $(MAKE) -C test/isolation $@ - $(MAKE) -C test/thread $@ rm -f Makefile.port Makefile.global diff --git a/src/backend/access/gin/README b/src/backend/access/gin/README index 125a82219b9ed..41d4e1e8a0937 100644 --- a/src/backend/access/gin/README +++ b/src/backend/access/gin/README @@ -413,7 +413,7 @@ leftmost leaf of the tree. Deletion algorithm keeps exclusive locks on left siblings of pages comprising currently investigated path. Thus, if current page is to be removed, all required pages to remove both downlink and rightlink are already locked. That -evades potential right to left page locking order, which could deadlock with +avoids potential right to left page locking order, which could deadlock with concurrent stepping right. A search concurrent to page deletion might already have read a pointer to the diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 671b5e9186ff0..9d3fa9c3b75b2 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -3,6 +3,24 @@ * gistbuild.c * build algorithm for GiST indexes implementation. * + * There are two different strategies: + * + * 1. Sort all input tuples, pack them into GiST leaf pages in the sorted + * order, and create downlinks and internal pages as we go. This builds + * the index from the bottom up, similar to how B-tree index build + * works. + * + * 2. Start with an empty index, and insert all tuples one by one. + * + * The sorted method is used if the operator classes for all columns have + * a 'sortsupport' defined. Otherwise, we resort to the second strategy. + * + * The second strategy can optionally use buffers at different levels of + * the tree to reduce I/O, see "Buffering build algorithm" in the README + * for a more detailed explanation. It initially calls insert over and + * over, but switches to the buffered algorithm after a certain number of + * tuples (unless buffering mode is disabled). + * * * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -28,6 +46,7 @@ #include "storage/smgr.h" #include "utils/memutils.h" #include "utils/rel.h" +#include "utils/tuplesort.h" /* Step of index tuples for check whether to switch to buffering build mode */ #define BUFFERING_MODE_SWITCH_CHECK_STEP 256 @@ -40,8 +59,14 @@ */ #define BUFFERING_MODE_TUPLE_SIZE_STATS_TARGET 4096 +/* + * Strategy used to build the index. It can change between the + * GIST_BUFFERING_* modes on the fly, but if the Sorted method is used, + * that needs to be decided up-front and cannot be changed afterwards. + */ typedef enum { + GIST_SORTED_BUILD, /* bottom-up build by sorting */ GIST_BUFFERING_DISABLED, /* in regular build mode and aren't going to * switch */ GIST_BUFFERING_AUTO, /* in regular build mode, but will switch to @@ -51,7 +76,7 @@ typedef enum * before switching to the buffering build * mode */ GIST_BUFFERING_ACTIVE /* in buffering build mode */ -} GistBufferingMode; +} GistBuildMode; /* Working state for gistbuild and its callback */ typedef struct @@ -60,23 +85,58 @@ typedef struct Relation heaprel; GISTSTATE *giststate; - int64 indtuples; /* number of tuples indexed */ - int64 indtuplesSize; /* total size of all indexed tuples */ - Size freespace; /* amount of free space to leave on pages */ + GistBuildMode buildMode; + + int64 indtuples; /* number of tuples indexed */ + /* * Extra data structures used during a buffering build. 'gfbb' contains * information related to managing the build buffers. 'parentMap' is a * lookup table of the parent of each internal page. */ + int64 indtuplesSize; /* total size of all indexed tuples */ GISTBuildBuffers *gfbb; HTAB *parentMap; - GistBufferingMode bufferingMode; + /* + * Extra data structures used during a sorting build. + */ + Tuplesortstate *sortstate; /* state data for tuplesort.c */ + + BlockNumber pages_allocated; + BlockNumber pages_written; + + int ready_num_pages; + BlockNumber ready_blknos[XLR_MAX_BLOCK_ID]; + Page ready_pages[XLR_MAX_BLOCK_ID]; } GISTBuildState; +/* + * In sorted build, we use a stack of these structs, one for each level, + * to hold an in-memory buffer of the righmost page at the level. When the + * page fills up, it is written out and a new page is allocated. + */ +typedef struct GistSortedBuildPageState +{ + Page page; + struct GistSortedBuildPageState *parent; /* Upper level, if any */ +} GistSortedBuildPageState; + /* prototypes for private functions */ + +static void gistSortedBuildCallback(Relation index, ItemPointer tid, + Datum *values, bool *isnull, + bool tupleIsAlive, void *state); +static void gist_indexsortbuild(GISTBuildState *state); +static void gist_indexsortbuild_pagestate_add(GISTBuildState *state, + GistSortedBuildPageState *pagestate, + IndexTuple itup); +static void gist_indexsortbuild_pagestate_flush(GISTBuildState *state, + GistSortedBuildPageState *pagestate); +static void gist_indexsortbuild_flush_ready_pages(GISTBuildState *state); + static void gistInitBuffering(GISTBuildState *buildstate); static int calculatePagesPerBuffer(GISTBuildState *buildstate, int levelStep); static void gistBuildCallback(Relation index, @@ -107,10 +167,9 @@ static void gistMemorizeParent(GISTBuildState *buildstate, BlockNumber child, static void gistMemorizeAllDownlinks(GISTBuildState *buildstate, Buffer parent); static BlockNumber gistGetParent(GISTBuildState *buildstate, BlockNumber child); + /* - * Main entry point to GiST index build. Initially calls insert over and over, - * but switches to more efficient buffering build algorithm after a certain - * number of tuples (unless buffering mode is disabled). + * Main entry point to GiST index build. */ IndexBuildResult * gistbuild(Relation heap, Relation index, IndexInfo *indexInfo) @@ -118,124 +177,425 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo) IndexBuildResult *result; double reltuples; GISTBuildState buildstate; - Buffer buffer; - Page page; MemoryContext oldcxt = CurrentMemoryContext; int fillfactor; + Oid SortSupportFnOids[INDEX_MAX_KEYS]; + GiSTOptions *options = (GiSTOptions *) index->rd_options; + + /* + * We expect to be called exactly once for any index relation. If that's + * not the case, big trouble's what we have. + */ + if (RelationGetNumberOfBlocks(index) != 0) + elog(ERROR, "index \"%s\" already contains data", + RelationGetRelationName(index)); buildstate.indexrel = index; buildstate.heaprel = heap; + buildstate.sortstate = NULL; + buildstate.giststate = initGISTstate(index); - if (index->rd_options) - { - /* Get buffering mode from the options string */ - GiSTOptions *options = (GiSTOptions *) index->rd_options; + /* + * Create a temporary memory context that is reset once for each tuple + * processed. (Note: we don't bother to make this a child of the + * giststate's scanCxt, so we have to delete it separately at the end.) + */ + buildstate.giststate->tempCxt = createTempGistContext(); + /* + * Choose build strategy. First check whether the user specified to use + * buffering mode. (The use-case for that in the field is somewhat + * questionable perhaps, but it's important for testing purposes.) + */ + if (options) + { if (options->buffering_mode == GIST_OPTION_BUFFERING_ON) - buildstate.bufferingMode = GIST_BUFFERING_STATS; + buildstate.buildMode = GIST_BUFFERING_STATS; else if (options->buffering_mode == GIST_OPTION_BUFFERING_OFF) - buildstate.bufferingMode = GIST_BUFFERING_DISABLED; - else - buildstate.bufferingMode = GIST_BUFFERING_AUTO; - - fillfactor = options->fillfactor; + buildstate.buildMode = GIST_BUFFERING_DISABLED; + else /* must be "auto" */ + buildstate.buildMode = GIST_BUFFERING_AUTO; } else { - /* - * By default, switch to buffering mode when the index grows too large - * to fit in cache. - */ - buildstate.bufferingMode = GIST_BUFFERING_AUTO; - fillfactor = GIST_DEFAULT_FILLFACTOR; + buildstate.buildMode = GIST_BUFFERING_AUTO; + } + + /* + * Unless buffering mode was forced, see if we can use sorting instead. + */ + if (buildstate.buildMode != GIST_BUFFERING_STATS) + { + bool hasallsortsupports = true; + int keyscount = IndexRelationGetNumberOfKeyAttributes(index); + + for (int i = 0; i < keyscount; i++) + { + SortSupportFnOids[i] = index_getprocid(index, i + 1, + GIST_SORTSUPPORT_PROC); + if (!OidIsValid(SortSupportFnOids[i])) + { + hasallsortsupports = false; + break; + } + } + if (hasallsortsupports) + buildstate.buildMode = GIST_SORTED_BUILD; } - /* Calculate target amount of free space to leave on pages */ + + /* + * Calculate target amount of free space to leave on pages. + */ + fillfactor = options ? options->fillfactor : GIST_DEFAULT_FILLFACTOR; buildstate.freespace = BLCKSZ * (100 - fillfactor) / 100; /* - * We expect to be called exactly once for any index relation. If that's - * not the case, big trouble's what we have. + * Build the index using the chosen strategy. */ - if (RelationGetNumberOfBlocks(index) != 0) - elog(ERROR, "index \"%s\" already contains data", - RelationGetRelationName(index)); + buildstate.indtuples = 0; + buildstate.indtuplesSize = 0; - /* no locking is needed */ - buildstate.giststate = initGISTstate(index); + if (buildstate.buildMode == GIST_SORTED_BUILD) + { + /* + * Sort all data, build the index from bottom up. + */ + buildstate.sortstate = tuplesort_begin_index_gist(heap, + index, + maintenance_work_mem, + NULL, + false); + + /* Scan the table, adding all tuples to the tuplesort */ + reltuples = table_index_build_scan(heap, index, indexInfo, true, true, + gistSortedBuildCallback, + (void *) &buildstate, NULL); + + /* + * Perform the sort and build index pages. + */ + tuplesort_performsort(buildstate.sortstate); + + gist_indexsortbuild(&buildstate); + + tuplesort_end(buildstate.sortstate); + } + else + { + /* + * Initialize an empty index and insert all tuples, possibly using + * buffers on intermediate levels. + */ + Buffer buffer; + Page page; + + /* initialize the root page */ + buffer = gistNewBuffer(index); + Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO); + page = BufferGetPage(buffer); + + START_CRIT_SECTION(); + + GISTInitBuffer(buffer, F_LEAF); + + MarkBufferDirty(buffer); + PageSetLSN(page, GistBuildLSN); + + UnlockReleaseBuffer(buffer); + + END_CRIT_SECTION(); + + /* Scan the table, inserting all the tuples to the index. */ + reltuples = table_index_build_scan(heap, index, indexInfo, true, true, + gistBuildCallback, + (void *) &buildstate, NULL); + + /* + * If buffering was used, flush out all the tuples that are still in + * the buffers. + */ + if (buildstate.buildMode == GIST_BUFFERING_ACTIVE) + { + elog(DEBUG1, "all tuples processed, emptying buffers"); + gistEmptyAllBuffers(&buildstate); + gistFreeBuildBuffers(buildstate.gfbb); + } + + /* + * We didn't write WAL records as we built the index, so if + * WAL-logging is required, write all pages to the WAL now. + */ + if (RelationNeedsWAL(index)) + { + log_newpage_range(index, MAIN_FORKNUM, + 0, RelationGetNumberOfBlocks(index), + true); + } + } + + /* okay, all heap tuples are indexed */ + MemoryContextSwitchTo(oldcxt); + MemoryContextDelete(buildstate.giststate->tempCxt); + + freeGISTstate(buildstate.giststate); /* - * Create a temporary memory context that is reset once for each tuple - * processed. (Note: we don't bother to make this a child of the - * giststate's scanCxt, so we have to delete it separately at the end.) + * Return statistics */ - buildstate.giststate->tempCxt = createTempGistContext(); + result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); - /* initialize the root page */ - buffer = gistNewBuffer(index); - Assert(BufferGetBlockNumber(buffer) == GIST_ROOT_BLKNO); - page = BufferGetPage(buffer); + result->heap_tuples = reltuples; + result->index_tuples = (double) buildstate.indtuples; + + return result; +} + +/*------------------------------------------------------------------------- + * Routines for sorted build + *------------------------------------------------------------------------- + */ + +/* + * Per-tuple callback for table_index_build_scan. + */ +static void +gistSortedBuildCallback(Relation index, + ItemPointer tid, + Datum *values, + bool *isnull, + bool tupleIsAlive, + void *state) +{ + GISTBuildState *buildstate = (GISTBuildState *) state; + MemoryContext oldCtx; + Datum compressed_values[INDEX_MAX_KEYS]; - START_CRIT_SECTION(); + oldCtx = MemoryContextSwitchTo(buildstate->giststate->tempCxt); - GISTInitBuffer(buffer, F_LEAF); + /* Form an index tuple and point it at the heap tuple */ + gistCompressValues(buildstate->giststate, index, + values, isnull, + true, compressed_values); - MarkBufferDirty(buffer); - PageSetLSN(page, GistBuildLSN); + tuplesort_putindextuplevalues(buildstate->sortstate, + buildstate->indexrel, + tid, + compressed_values, isnull); - UnlockReleaseBuffer(buffer); + MemoryContextSwitchTo(oldCtx); + MemoryContextReset(buildstate->giststate->tempCxt); - END_CRIT_SECTION(); + /* Update tuple count. */ + buildstate->indtuples += 1; +} - /* build the index */ - buildstate.indtuples = 0; - buildstate.indtuplesSize = 0; +/* + * Build GiST index from bottom up from pre-sorted tuples. + */ +static void +gist_indexsortbuild(GISTBuildState *state) +{ + IndexTuple itup; + GistSortedBuildPageState *leafstate; + GistSortedBuildPageState *pagestate; + Page page; + + state->pages_allocated = 0; + state->pages_written = 0; + state->ready_num_pages = 0; /* - * Do the heap scan. + * Write an empty page as a placeholder for the root page. It will be + * replaced with the real root page at the end. */ - reltuples = table_index_build_scan(heap, index, indexInfo, true, true, - gistBuildCallback, - (void *) &buildstate, NULL); + page = palloc0(BLCKSZ); + RelationOpenSmgr(state->indexrel); + smgrextend(state->indexrel->rd_smgr, MAIN_FORKNUM, GIST_ROOT_BLKNO, + page, true); + state->pages_allocated++; + state->pages_written++; + + /* Allocate a temporary buffer for the first leaf page. */ + leafstate = palloc(sizeof(GistSortedBuildPageState)); + leafstate->page = page; + leafstate->parent = NULL; + gistinitpage(page, F_LEAF); /* - * If buffering was used, flush out all the tuples that are still in the - * buffers. + * Fill index pages with tuples in the sorted order. */ - if (buildstate.bufferingMode == GIST_BUFFERING_ACTIVE) + while ((itup = tuplesort_getindextuple(state->sortstate, true)) != NULL) { - elog(DEBUG1, "all tuples processed, emptying buffers"); - gistEmptyAllBuffers(&buildstate); - gistFreeBuildBuffers(buildstate.gfbb); + gist_indexsortbuild_pagestate_add(state, leafstate, itup); + MemoryContextReset(state->giststate->tempCxt); } - /* okay, all heap tuples are indexed */ - MemoryContextSwitchTo(oldcxt); - MemoryContextDelete(buildstate.giststate->tempCxt); + /* + * Write out the partially full non-root pages. + * + * Keep in mind that flush can build a new root. + */ + pagestate = leafstate; + while (pagestate->parent != NULL) + { + GistSortedBuildPageState *parent; - freeGISTstate(buildstate.giststate); + gist_indexsortbuild_pagestate_flush(state, pagestate); + parent = pagestate->parent; + pfree(pagestate->page); + pfree(pagestate); + pagestate = parent; + } + + gist_indexsortbuild_flush_ready_pages(state); + + /* Write out the root */ + RelationOpenSmgr(state->indexrel); + PageSetLSN(pagestate->page, GistBuildLSN); + PageSetChecksumInplace(pagestate->page, GIST_ROOT_BLKNO); + smgrwrite(state->indexrel->rd_smgr, MAIN_FORKNUM, GIST_ROOT_BLKNO, + pagestate->page, true); + if (RelationNeedsWAL(state->indexrel)) + log_newpage(&state->indexrel->rd_node, MAIN_FORKNUM, GIST_ROOT_BLKNO, + pagestate->page, true); + + pfree(pagestate->page); + pfree(pagestate); +} + +/* + * Add tuple to a page. If the pages is full, write it out and re-initialize + * a new page first. + */ +static void +gist_indexsortbuild_pagestate_add(GISTBuildState *state, + GistSortedBuildPageState *pagestate, + IndexTuple itup) +{ + Size sizeNeeded; + + /* Does the tuple fit? If not, flush */ + sizeNeeded = IndexTupleSize(itup) + sizeof(ItemIdData) + state->freespace; + if (PageGetFreeSpace(pagestate->page) < sizeNeeded) + gist_indexsortbuild_pagestate_flush(state, pagestate); + + gistfillbuffer(pagestate->page, &itup, 1, InvalidOffsetNumber); +} + +static void +gist_indexsortbuild_pagestate_flush(GISTBuildState *state, + GistSortedBuildPageState *pagestate) +{ + GistSortedBuildPageState *parent; + IndexTuple *itvec; + IndexTuple union_tuple; + int vect_len; + bool isleaf; + BlockNumber blkno; + MemoryContext oldCtx; + + /* check once per page */ + CHECK_FOR_INTERRUPTS(); + + if (state->ready_num_pages == XLR_MAX_BLOCK_ID) + gist_indexsortbuild_flush_ready_pages(state); + + /* + * The page is now complete. Assign a block number to it, and add it to + * the list of finished pages. (We don't write it out immediately, because + * we want to WAL-log the pages in batches.) + */ + blkno = state->pages_allocated++; + state->ready_blknos[state->ready_num_pages] = blkno; + state->ready_pages[state->ready_num_pages] = pagestate->page; + state->ready_num_pages++; + + isleaf = GistPageIsLeaf(pagestate->page); + + /* + * Form a downlink tuple to represent all the tuples on the page. + */ + oldCtx = MemoryContextSwitchTo(state->giststate->tempCxt); + itvec = gistextractpage(pagestate->page, &vect_len); + union_tuple = gistunion(state->indexrel, itvec, vect_len, + state->giststate); + ItemPointerSetBlockNumber(&(union_tuple->t_tid), blkno); + MemoryContextSwitchTo(oldCtx); /* - * We didn't write WAL records as we built the index, so if WAL-logging is - * required, write all pages to the WAL now. + * Insert the downlink to the parent page. If this was the root, create a + * new page as the parent, which becomes the new root. */ - if (RelationNeedsWAL(index)) + parent = pagestate->parent; + if (parent == NULL) { - log_newpage_range(index, MAIN_FORKNUM, - 0, RelationGetNumberOfBlocks(index), - true); + parent = palloc(sizeof(GistSortedBuildPageState)); + parent->page = (Page) palloc(BLCKSZ); + parent->parent = NULL; + gistinitpage(parent->page, 0); + + pagestate->parent = parent; } + gist_indexsortbuild_pagestate_add(state, parent, union_tuple); + + /* Re-initialize the page buffer for next page on this level. */ + pagestate->page = palloc(BLCKSZ); + gistinitpage(pagestate->page, isleaf ? F_LEAF : 0); /* - * Return statistics + * Set the right link to point to the previous page. This is just for + * debugging purposes: GiST only follows the right link if a page is split + * concurrently to a scan, and that cannot happen during index build. + * + * It's a bit counterintuitive that we set the right link on the new page + * to point to the previous page, and not the other way round. But GiST + * pages are not ordered like B-tree pages are, so as long as the + * right-links form a chain through all the pages in the same level, the + * order doesn't matter. */ - result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); + GistPageGetOpaque(pagestate->page)->rightlink = blkno; +} - result->heap_tuples = reltuples; - result->index_tuples = (double) buildstate.indtuples; +static void +gist_indexsortbuild_flush_ready_pages(GISTBuildState *state) +{ + if (state->ready_num_pages == 0) + return; - return result; + RelationOpenSmgr(state->indexrel); + + for (int i = 0; i < state->ready_num_pages; i++) + { + Page page = state->ready_pages[i]; + BlockNumber blkno = state->ready_blknos[i]; + + /* Currently, the blocks must be buffered in order. */ + if (blkno != state->pages_written) + elog(ERROR, "unexpected block number to flush GiST sorting build"); + + PageSetLSN(page, GistBuildLSN); + PageSetChecksumInplace(page, blkno); + smgrextend(state->indexrel->rd_smgr, MAIN_FORKNUM, blkno, page, true); + + state->pages_written++; + } + + if (RelationNeedsWAL(state->indexrel)) + log_newpages(&state->indexrel->rd_node, MAIN_FORKNUM, state->ready_num_pages, + state->ready_blknos, state->ready_pages, true); + + for (int i = 0; i < state->ready_num_pages; i++) + pfree(state->ready_pages[i]); + + state->ready_num_pages = 0; } + +/*------------------------------------------------------------------------- + * Routines for non-sorted build + *------------------------------------------------------------------------- + */ + /* * Attempt to switch to buffering mode. * @@ -375,7 +735,7 @@ gistInitBuffering(GISTBuildState *buildstate) if (levelStep <= 0) { elog(DEBUG1, "failed to switch to buffered GiST build"); - buildstate->bufferingMode = GIST_BUFFERING_DISABLED; + buildstate->buildMode = GIST_BUFFERING_DISABLED; return; } @@ -392,7 +752,7 @@ gistInitBuffering(GISTBuildState *buildstate) gistInitParentMap(buildstate); - buildstate->bufferingMode = GIST_BUFFERING_ACTIVE; + buildstate->buildMode = GIST_BUFFERING_ACTIVE; elog(DEBUG1, "switched to buffered GiST build; level step = %d, pagesPerBuffer = %d", levelStep, pagesPerBuffer); @@ -453,10 +813,12 @@ gistBuildCallback(Relation index, oldCtx = MemoryContextSwitchTo(buildstate->giststate->tempCxt); /* form an index tuple and point it at the heap tuple */ - itup = gistFormTuple(buildstate->giststate, index, values, isnull, true); + itup = gistFormTuple(buildstate->giststate, index, + values, isnull, + true); itup->t_tid = *tid; - if (buildstate->bufferingMode == GIST_BUFFERING_ACTIVE) + if (buildstate->buildMode == GIST_BUFFERING_ACTIVE) { /* We have buffers, so use them. */ gistBufferingBuildInsert(buildstate, itup); @@ -478,7 +840,7 @@ gistBuildCallback(Relation index, MemoryContextSwitchTo(oldCtx); MemoryContextReset(buildstate->giststate->tempCxt); - if (buildstate->bufferingMode == GIST_BUFFERING_ACTIVE && + if (buildstate->buildMode == GIST_BUFFERING_ACTIVE && buildstate->indtuples % BUFFERING_MODE_TUPLE_SIZE_STATS_TARGET == 0) { /* Adjust the target buffer size now */ @@ -491,12 +853,15 @@ gistBuildCallback(Relation index, * and switch to buffering mode if it has. * * To avoid excessive calls to smgrnblocks(), only check this every - * BUFFERING_MODE_SWITCH_CHECK_STEP index tuples + * BUFFERING_MODE_SWITCH_CHECK_STEP index tuples. + * + * In 'stats' state, switch as soon as we have seen enough tuples to have + * some idea of the average tuple size. */ - if ((buildstate->bufferingMode == GIST_BUFFERING_AUTO && + if ((buildstate->buildMode == GIST_BUFFERING_AUTO && buildstate->indtuples % BUFFERING_MODE_SWITCH_CHECK_STEP == 0 && effective_cache_size < smgrnblocks(index->rd_smgr, MAIN_FORKNUM)) || - (buildstate->bufferingMode == GIST_BUFFERING_STATS && + (buildstate->buildMode == GIST_BUFFERING_STATS && buildstate->indtuples >= BUFFERING_MODE_TUPLE_SIZE_STATS_TARGET)) { /* diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c index 4eab9bb83ac0c..4ad67c88b4e5d 100644 --- a/src/backend/access/gist/gistbuildbuffers.c +++ b/src/backend/access/gist/gistbuildbuffers.c @@ -666,7 +666,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate, zero_penalty = true; /* Loop over index attributes. */ - for (j = 0; j < r->rd_att->natts; j++) + for (j = 0; j < IndexRelationGetNumberOfKeyAttributes(r); j++) { float usize; @@ -692,7 +692,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate, which = i; best_penalty[j] = usize; - if (j < r->rd_att->natts - 1) + if (j < IndexRelationGetNumberOfKeyAttributes(r) - 1) best_penalty[j + 1] = -1; } else if (best_penalty[j] == usize) diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c index 9ace64c3c4a9f..27d9c0f77c306 100644 --- a/src/backend/access/gist/gistproc.c +++ b/src/backend/access/gist/gistproc.c @@ -24,6 +24,7 @@ #include "utils/builtins.h" #include "utils/float.h" #include "utils/geo_decls.h" +#include "utils/sortsupport.h" static bool gist_box_leaf_consistent(BOX *key, BOX *query, @@ -31,6 +32,15 @@ static bool gist_box_leaf_consistent(BOX *key, BOX *query, static bool rtree_internal_consistent(BOX *key, BOX *query, StrategyNumber strategy); +static uint64 point_zorder_internal(float4 x, float4 y); +static uint64 part_bits32_by2(uint32 x); +static uint32 ieee_float32_to_uint32(float f); +static int gist_bbox_zorder_cmp(Datum a, Datum b, SortSupport ssup); +static Datum gist_bbox_zorder_abbrev_convert(Datum original, SortSupport ssup); +static int gist_bbox_zorder_cmp_abbrev(Datum z1, Datum z2, SortSupport ssup); +static bool gist_bbox_zorder_abbrev_abort(int memtupcount, SortSupport ssup); + + /* Minimum accepted ratio of split */ #define LIMIT_RATIO 0.3 @@ -1540,3 +1550,222 @@ gist_poly_distance(PG_FUNCTION_ARGS) PG_RETURN_FLOAT8(distance); } + +/* + * Z-order routines for fast index build + */ + +/* + * Compute Z-value of a point + * + * Z-order (also known as Morton Code) maps a two-dimensional point to a + * single integer, in a way that preserves locality. Points that are close in + * the two-dimensional space are mapped to integer that are not far from each + * other. We do that by interleaving the bits in the X and Y components. + * + * Morton Code is normally defined only for integers, but the X and Y values + * of a point are floating point. We expect floats to be in IEEE format. + */ +static uint64 +point_zorder_internal(float4 x, float4 y) +{ + uint32 ix = ieee_float32_to_uint32(x); + uint32 iy = ieee_float32_to_uint32(y); + + /* Interleave the bits */ + return part_bits32_by2(ix) | (part_bits32_by2(iy) << 1); +} + +/* Interleave 32 bits with zeroes */ +static uint64 +part_bits32_by2(uint32 x) +{ + uint64 n = x; + + n = (n | (n << 16)) & UINT64CONST(0x0000FFFF0000FFFF); + n = (n | (n << 8)) & UINT64CONST(0x00FF00FF00FF00FF); + n = (n | (n << 4)) & UINT64CONST(0x0F0F0F0F0F0F0F0F); + n = (n | (n << 2)) & UINT64CONST(0x3333333333333333); + n = (n | (n << 1)) & UINT64CONST(0x5555555555555555); + + return n; +} + +/* + * Convert a 32-bit IEEE float to uint32 in a way that preserves the ordering + */ +static uint32 +ieee_float32_to_uint32(float f) +{ + /*---- + * + * IEEE 754 floating point format + * ------------------------------ + * + * IEEE 754 floating point numbers have this format: + * + * exponent (8 bits) + * | + * s eeeeeeee mmmmmmmmmmmmmmmmmmmmmmm + * | | + * sign mantissa (23 bits) + * + * Infinity has all bits in the exponent set and the mantissa is all + * zeros. Negative infinity is the same but with the sign bit set. + * + * NaNs are represented with all bits in the exponent set, and the least + * significant bit in the mantissa also set. The rest of the mantissa bits + * can be used to distinguish different kinds of NaNs. + * + * The IEEE format has the nice property that when you take the bit + * representation and interpret it as an integer, the order is preserved, + * except for the sign. That holds for the +-Infinity values too. + * + * Mapping to uint32 + * ----------------- + * + * In order to have a smooth transition from negative to positive numbers, + * we map floats to unsigned integers like this: + * + * x < 0 to range 0-7FFFFFFF + * x = 0 to value 8000000 (both positive and negative zero) + * x > 0 to range 8000001-FFFFFFFF + * + * We don't care to distinguish different kind of NaNs, so they are all + * mapped to the same arbitrary value, FFFFFFFF. Because of the IEEE bit + * representation of NaNs, there aren't any non-NaN values that would be + * mapped to FFFFFFFF. In fact, there is a range of unused values on both + * ends of the uint32 space. + */ + if (isnan(f)) + return 0xFFFFFFFF; + else + { + union + { + float f; + uint32 i; + } u; + + u.f = f; + + /* Check the sign bit */ + if ((u.i & 0x80000000) != 0) + { + /* + * Map the negative value to range 0-7FFFFFFF. This flips the sign + * bit to 0 in the same instruction. + */ + Assert(f <= 0); /* can be -0 */ + u.i ^= 0xFFFFFFFF; + } + else + { + /* Map the positive value (or 0) to range 80000000-FFFFFFFF */ + u.i |= 0x80000000; + } + + return u.i; + } +} + +/* + * Compare the Z-order of points + */ +static int +gist_bbox_zorder_cmp(Datum a, Datum b, SortSupport ssup) +{ + Point *p1 = &(DatumGetBoxP(a)->low); + Point *p2 = &(DatumGetBoxP(b)->low); + uint64 z1; + uint64 z2; + + /* + * Do a quick check for equality first. It's not clear if this is worth it + * in general, but certainly is when used as tie-breaker with abbreviated + * keys, + */ + if (p1->x == p2->x && p1->y == p2->y) + return 0; + + z1 = point_zorder_internal(p1->x, p1->y); + z2 = point_zorder_internal(p2->x, p2->y); + if (z1 > z2) + return 1; + else if (z1 < z2) + return -1; + else + return 0; +} + +/* + * Abbreviated version of Z-order comparison + * + * The abbreviated format is a Z-order value computed from the two 32-bit + * floats. If SIZEOF_DATUM == 8, the 64-bit Z-order value fits fully in the + * abbreviated Datum, otherwise use its most significant bits. + */ +static Datum +gist_bbox_zorder_abbrev_convert(Datum original, SortSupport ssup) +{ + Point *p = &(DatumGetBoxP(original)->low); + uint64 z; + + z = point_zorder_internal(p->x, p->y); + +#if SIZEOF_DATUM == 8 + return (Datum) z; +#else + return (Datum) (z >> 32); +#endif +} + +static int +gist_bbox_zorder_cmp_abbrev(Datum z1, Datum z2, SortSupport ssup) +{ + /* + * Compare the pre-computed Z-orders as unsigned integers. Datum is a + * typedef for 'uintptr_t', so no casting is required. + */ + if (z1 > z2) + return 1; + else if (z1 < z2) + return -1; + else + return 0; +} + +/* + * We never consider aborting the abbreviation. + * + * On 64-bit systems, the abbreviation is not lossy so it is always + * worthwhile. (Perhaps it's not on 32-bit systems, but we don't bother + * with logic to decide.) + */ +static bool +gist_bbox_zorder_abbrev_abort(int memtupcount, SortSupport ssup) +{ + return false; +} + +/* + * Sort support routine for fast GiST index build by sorting. + */ +Datum +gist_point_sortsupport(PG_FUNCTION_ARGS) +{ + SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); + + if (ssup->abbreviate) + { + ssup->comparator = gist_bbox_zorder_cmp_abbrev; + ssup->abbrev_converter = gist_bbox_zorder_abbrev_convert; + ssup->abbrev_abort = gist_bbox_zorder_abbrev_abort; + ssup->abbrev_full_comparator = gist_bbox_zorder_cmp; + } + else + { + ssup->comparator = gist_bbox_zorder_cmp; + } + PG_RETURN_VOID(); +} diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index 0516059e3ddc9..615b5ade23310 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -572,12 +572,31 @@ gistdentryinit(GISTSTATE *giststate, int nkey, GISTENTRY *e, IndexTuple gistFormTuple(GISTSTATE *giststate, Relation r, - Datum attdata[], bool isnull[], bool isleaf) + Datum *attdata, bool *isnull, bool isleaf) { Datum compatt[INDEX_MAX_KEYS]; - int i; IndexTuple res; + gistCompressValues(giststate, r, attdata, isnull, isleaf, compatt); + + res = index_form_tuple(isleaf ? giststate->leafTupdesc : + giststate->nonLeafTupdesc, + compatt, isnull); + + /* + * The offset number on tuples on internal pages is unused. For historical + * reasons, it is set to 0xffff. + */ + ItemPointerSetOffsetNumber(&(res->t_tid), 0xffff); + return res; +} + +void +gistCompressValues(GISTSTATE *giststate, Relation r, + Datum *attdata, bool *isnull, bool isleaf, Datum *compatt) +{ + int i; + /* * Call the compress method on each attribute. */ @@ -617,17 +636,6 @@ gistFormTuple(GISTSTATE *giststate, Relation r, compatt[i] = attdata[i]; } } - - res = index_form_tuple(isleaf ? giststate->leafTupdesc : - giststate->nonLeafTupdesc, - compatt, isnull); - - /* - * The offset number on tuples on internal pages is unused. For historical - * reasons, it is set to 0xffff. - */ - ItemPointerSetOffsetNumber(&(res->t_tid), 0xffff); - return res; } /* @@ -745,14 +753,11 @@ gistpenalty(GISTSTATE *giststate, int attno, * Initialize a new index page */ void -GISTInitBuffer(Buffer b, uint32 f) +gistinitpage(Page page, uint32 f) { GISTPageOpaque opaque; - Page page; - Size pageSize; + Size pageSize = BLCKSZ; - pageSize = BufferGetPageSize(b); - page = BufferGetPage(b); PageInit(page, pageSize, sizeof(GISTPageOpaqueData)); opaque = GistPageGetOpaque(page); @@ -763,6 +768,18 @@ GISTInitBuffer(Buffer b, uint32 f) opaque->gist_page_id = GIST_PAGE_ID; } +/* + * Initialize a new index buffer + */ +void +GISTInitBuffer(Buffer b, uint32 f) +{ + Page page; + + page = BufferGetPage(b); + gistinitpage(page, f); +} + /* * Verify that a freshly-read page looks sane. */ diff --git a/src/backend/access/gist/gistvalidate.c b/src/backend/access/gist/gistvalidate.c index 2b9ab693be188..8a14620fab278 100644 --- a/src/backend/access/gist/gistvalidate.c +++ b/src/backend/access/gist/gistvalidate.c @@ -143,6 +143,10 @@ gistvalidate(Oid opclassoid) case GIST_OPTIONS_PROC: ok = check_amoptsproc_signature(procform->amproc); break; + case GIST_SORTSUPPORT_PROC: + ok = check_amproc_signature(procform->amproc, VOIDOID, true, + 1, 1, INTERNALOID); + break; default: ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -263,7 +267,7 @@ gistvalidate(Oid opclassoid) continue; /* got it */ if (i == GIST_DISTANCE_PROC || i == GIST_FETCH_PROC || i == GIST_COMPRESS_PROC || i == GIST_DECOMPRESS_PROC || - i == GIST_OPTIONS_PROC) + i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC) continue; /* optional methods */ ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 9b5f417eac442..1585861a021dd 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2044,12 +2044,10 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options) { /* - * Parallel operations are required to be strictly read-only in a parallel - * worker. Parallel inserts are not safe even in the leader in the - * general case, because group locking means that heavyweight locks for - * relation extension or GIN page locks will not conflict between members - * of a lock group, but we don't prohibit that case here because there are - * useful special cases that we can safely allow, such as CREATE TABLE AS. + * To allow parallel inserts, we need to ensure that they are safe to be + * performed in workers. We have the infrastructure to allow parallel + * inserts in general except for the cases where inserts generate a new + * CommandId (eg. inserts into a table having a foreign key column). */ if (IsParallelWorker()) ereport(ERROR, @@ -5725,10 +5723,10 @@ heap_inplace_update(Relation relation, HeapTuple tuple) uint32 newlen; /* - * For now, parallel operations are required to be strictly read-only. - * Unlike a regular update, this should never create a combo CID, so it - * might be possible to relax this restriction, but not without more - * thought and testing. It's not clear that it would be useful, anyway. + * For now, we don't allow parallel updates. Unlike a regular update, + * this should never create a combo CID, so it might be possible to relax + * this restriction, but not without more thought and testing. It's not + * clear that it would be useful, anyway. */ if (IsInParallelMode()) ereport(ERROR, diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index aa3f14c019c44..ca357410a293c 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -47,6 +47,17 @@ RelationPutHeapTuple(Relation relation, */ Assert(!token || HeapTupleHeaderIsSpeculative(tuple->t_data)); + /* + * Do not allow tuples with invalid combinations of hint bits to be placed + * on a page. These combinations are detected as corruption by the + * contrib/amcheck logic, so if you disable one or both of these + * assertions, make corresponding changes there. + */ + Assert(!((tuple->t_data->t_infomask & HEAP_XMAX_LOCK_ONLY) && + (tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED))); + Assert(!((tuple->t_data->t_infomask & HEAP_XMAX_COMMITTED) && + (tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI))); + /* Add the tuple to the page */ pageHeader = BufferGetPage(buffer); diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 92389e6666bc3..4f2f38168dc2f 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -677,11 +677,10 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params, read_rate, write_rate); appendStringInfo(&buf, _("system usage: %s\n"), pg_rusage_show(&ru0)); appendStringInfo(&buf, - _("WAL usage: %ld records, %ld full page images, " - UINT64_FORMAT " bytes"), + _("WAL usage: %ld records, %ld full page images, %llu bytes"), walusage.wal_records, walusage.wal_fpi, - walusage.wal_bytes); + (unsigned long long) walusage.wal_bytes); ereport(LOG, (errmsg_internal("%s", buf.data))); @@ -3523,9 +3522,10 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) false); elevel = lvshared->elevel; - ereport(DEBUG1, - (errmsg("starting parallel vacuum worker for %s", - lvshared->for_cleanup ? "cleanup" : "bulk delete"))); + if (lvshared->for_cleanup) + elog(DEBUG1, "starting parallel vacuum worker for cleanup"); + else + elog(DEBUG1, "starting parallel vacuum worker for bulk delete"); /* Set debug_query_string for individual workers */ sharedquery = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_QUERY_TEXT, false); diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 1e628a33d77ec..8f6575fdf15c2 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -880,7 +880,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * never be satisfied (eg, x == 1 AND x > 2). */ if (!so->qual_ok) + { + /* Notify any other workers that we're done with this scan key. */ + _bt_parallel_done(scan); return false; + } /* * For parallel scans, get the starting page from shared state. If the diff --git a/src/backend/access/rmgrdesc/dbasedesc.c b/src/backend/access/rmgrdesc/dbasedesc.c index d82484b9db400..47580feaeae41 100644 --- a/src/backend/access/rmgrdesc/dbasedesc.c +++ b/src/backend/access/rmgrdesc/dbasedesc.c @@ -37,7 +37,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record) xl_dbase_drop_rec *xlrec = (xl_dbase_drop_rec *) rec; int i; - appendStringInfo(buf, "dir"); + appendStringInfoString(buf, "dir"); for (i = 0; i < xlrec->ntablespaces; i++) appendStringInfo(buf, " %u/%u", xlrec->tablespace_ids[i], xlrec->db_id); diff --git a/src/backend/access/rmgrdesc/logicalmsgdesc.c b/src/backend/access/rmgrdesc/logicalmsgdesc.c index bff298c9287fe..83ab93a24be9e 100644 --- a/src/backend/access/rmgrdesc/logicalmsgdesc.c +++ b/src/backend/access/rmgrdesc/logicalmsgdesc.c @@ -24,10 +24,21 @@ logicalmsg_desc(StringInfo buf, XLogReaderState *record) if (info == XLOG_LOGICAL_MESSAGE) { xl_logical_message *xlrec = (xl_logical_message *) rec; + char *prefix = xlrec->message; + char *message = xlrec->message + xlrec->prefix_size; + char *sep = ""; - appendStringInfo(buf, "%s message size %zu bytes", - xlrec->transactional ? "transactional" : "nontransactional", - xlrec->message_size); + Assert(prefix[xlrec->prefix_size] != '\0'); + + appendStringInfo(buf, "%s, prefix \"%s\"; payload (%zu bytes): ", + xlrec->transactional ? "transactional" : "non-transactional", + prefix, xlrec->message_size); + /* Write message payload as a series of hex bytes */ + for (int cnt = 0; cnt < xlrec->message_size; cnt++) + { + appendStringInfo(buf, "%s%02X", sep, (unsigned char) message[cnt]); + sep = " "; + } } } diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 65aa8841f7ce0..034349aa7b986 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -42,6 +42,7 @@ #include "pg_trace.h" #include "pgstat.h" #include "storage/proc.h" +#include "storage/sync.h" /* * Defines for CLOG page sizes. A page is the same BLCKSZ as is used @@ -691,7 +692,8 @@ CLOGShmemInit(void) { XactCtl->PagePrecedes = CLOGPagePrecedes; SimpleLruInit(XactCtl, "Xact", CLOGShmemBuffers(), CLOG_LSNS_PER_PAGE, - XactSLRULock, "pg_xact", LWTRANCHE_XACT_BUFFER); + XactSLRULock, "pg_xact", LWTRANCHE_XACT_BUFFER, + SYNC_HANDLER_CLOG); } /* @@ -808,41 +810,19 @@ TrimCLOG(void) LWLockRelease(XactSLRULock); } -/* - * This must be called ONCE during postmaster or standalone-backend shutdown - */ -void -ShutdownCLOG(void) -{ - /* Flush dirty CLOG pages to disk */ - TRACE_POSTGRESQL_CLOG_CHECKPOINT_START(false); - SimpleLruFlush(XactCtl, false); - - /* - * fsync pg_xact to ensure that any files flushed previously are durably - * on disk. - */ - fsync_fname("pg_xact", true); - - TRACE_POSTGRESQL_CLOG_CHECKPOINT_DONE(false); -} - /* * Perform a checkpoint --- either during shutdown, or on-the-fly */ void CheckPointCLOG(void) { - /* Flush dirty CLOG pages to disk */ - TRACE_POSTGRESQL_CLOG_CHECKPOINT_START(true); - SimpleLruFlush(XactCtl, true); - /* - * fsync pg_xact to ensure that any files flushed previously are durably - * on disk. + * Write dirty CLOG pages to disk. This may result in sync requests + * queued for later handling by ProcessSyncRequests(), as part of the + * checkpoint. */ - fsync_fname("pg_xact", true); - + TRACE_POSTGRESQL_CLOG_CHECKPOINT_START(true); + SimpleLruWriteAll(XactCtl, true); TRACE_POSTGRESQL_CLOG_CHECKPOINT_DONE(true); } @@ -1033,3 +1013,12 @@ clog_redo(XLogReaderState *record) else elog(PANIC, "clog_redo: unknown op code %u", info); } + +/* + * Entrypoint for sync.c to sync clog files. + */ +int +clogsyncfiletag(const FileTag *ftag, char *path) +{ + return SlruSyncFileTag(XactCtl, ftag, path); +} diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index 5244b06a2b651..cb8a968801873 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -555,7 +555,8 @@ CommitTsShmemInit(void) CommitTsCtl->PagePrecedes = CommitTsPagePrecedes; SimpleLruInit(CommitTsCtl, "CommitTs", CommitTsShmemBuffers(), 0, CommitTsSLRULock, "pg_commit_ts", - LWTRANCHE_COMMITTS_BUFFER); + LWTRANCHE_COMMITTS_BUFFER, + SYNC_HANDLER_COMMIT_TS); commitTsShared = ShmemInitStruct("CommitTs shared", sizeof(CommitTimestampShared), @@ -798,36 +799,18 @@ DeactivateCommitTs(void) LWLockRelease(CommitTsSLRULock); } -/* - * This must be called ONCE during postmaster or standalone-backend shutdown - */ -void -ShutdownCommitTs(void) -{ - /* Flush dirty CommitTs pages to disk */ - SimpleLruFlush(CommitTsCtl, false); - - /* - * fsync pg_commit_ts to ensure that any files flushed previously are - * durably on disk. - */ - fsync_fname("pg_commit_ts", true); -} - /* * Perform a checkpoint --- either during shutdown, or on-the-fly */ void CheckPointCommitTs(void) { - /* Flush dirty CommitTs pages to disk */ - SimpleLruFlush(CommitTsCtl, true); - /* - * fsync pg_commit_ts to ensure that any files flushed previously are - * durably on disk. + * Write dirty CommitTs pages to disk. This may result in sync requests + * queued for later handling by ProcessSyncRequests(), as part of the + * checkpoint. */ - fsync_fname("pg_commit_ts", true); + SimpleLruWriteAll(CommitTsCtl, true); } /* @@ -1083,3 +1066,12 @@ commit_ts_redo(XLogReaderState *record) else elog(PANIC, "commit_ts_redo: unknown op code %u", info); } + +/* + * Entrypoint for sync.c to sync commit_ts files. + */ +int +committssyncfiletag(const FileTag *ftag, char *path) +{ + return SlruSyncFileTag(CommitTsCtl, ftag, path); +} diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index b8bedca04a4da..43653fe572127 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -735,6 +735,25 @@ ReadNextMultiXactId(void) return mxid; } +/* + * ReadMultiXactIdRange + * Get the range of IDs that may still be referenced by a relation. + */ +void +ReadMultiXactIdRange(MultiXactId *oldest, MultiXactId *next) +{ + LWLockAcquire(MultiXactGenLock, LW_SHARED); + *oldest = MultiXactState->oldestMultiXactId; + *next = MultiXactState->nextMXact; + LWLockRelease(MultiXactGenLock); + + if (*oldest < FirstMultiXactId) + *oldest = FirstMultiXactId; + if (*next < FirstMultiXactId) + *next = FirstMultiXactId; +} + + /* * MultiXactIdCreateFromMembers * Make a new MultiXactId from the specified set of members @@ -1742,7 +1761,7 @@ PostPrepare_MultiXact(TransactionId xid) OldestVisibleMXactId[MyBackendId] = InvalidMultiXactId; /* - * Discard the local MultiXactId cache like in AtEOX_MultiXact + * Discard the local MultiXactId cache like in AtEOXact_MultiXact. */ MXactContext = NULL; dlist_init(&MXactCache); @@ -1772,7 +1791,7 @@ multixact_twophase_recover(TransactionId xid, uint16 info, /* * multixact_twophase_postcommit - * Similar to AtEOX_MultiXact but for COMMIT PREPARED + * Similar to AtEOXact_MultiXact but for COMMIT PREPARED */ void multixact_twophase_postcommit(TransactionId xid, uint16 info, @@ -1831,11 +1850,13 @@ MultiXactShmemInit(void) SimpleLruInit(MultiXactOffsetCtl, "MultiXactOffset", NUM_MULTIXACTOFFSET_BUFFERS, 0, MultiXactOffsetSLRULock, "pg_multixact/offsets", - LWTRANCHE_MULTIXACTOFFSET_BUFFER); + LWTRANCHE_MULTIXACTOFFSET_BUFFER, + SYNC_HANDLER_MULTIXACT_OFFSET); SimpleLruInit(MultiXactMemberCtl, "MultiXactMember", NUM_MULTIXACTMEMBER_BUFFERS, 0, MultiXactMemberSLRULock, "pg_multixact/members", - LWTRANCHE_MULTIXACTMEMBER_BUFFER); + LWTRANCHE_MULTIXACTMEMBER_BUFFER, + SYNC_HANDLER_MULTIXACT_MEMBER); /* Initialize our shared state struct */ MultiXactState = ShmemInitStruct("Shared MultiXact State", @@ -2100,19 +2121,6 @@ TrimMultiXact(void) SetMultiXactIdLimit(oldestMXact, oldestMXactDB, true); } -/* - * This must be called ONCE during postmaster or standalone-backend shutdown - */ -void -ShutdownMultiXact(void) -{ - /* Flush dirty MultiXact pages to disk */ - TRACE_POSTGRESQL_MULTIXACT_CHECKPOINT_START(false); - SimpleLruFlush(MultiXactOffsetCtl, false); - SimpleLruFlush(MultiXactMemberCtl, false); - TRACE_POSTGRESQL_MULTIXACT_CHECKPOINT_DONE(false); -} - /* * Get the MultiXact data to save in a checkpoint record */ @@ -2143,9 +2151,13 @@ CheckPointMultiXact(void) { TRACE_POSTGRESQL_MULTIXACT_CHECKPOINT_START(true); - /* Flush dirty MultiXact pages to disk */ - SimpleLruFlush(MultiXactOffsetCtl, true); - SimpleLruFlush(MultiXactMemberCtl, true); + /* + * Write dirty MultiXact pages to disk. This may result in sync requests + * queued for later handling by ProcessSyncRequests(), as part of the + * checkpoint. + */ + SimpleLruWriteAll(MultiXactOffsetCtl, true); + SimpleLruWriteAll(MultiXactMemberCtl, true); TRACE_POSTGRESQL_MULTIXACT_CHECKPOINT_DONE(true); } @@ -2728,14 +2740,10 @@ find_multixact_start(MultiXactId multi, MultiXactOffset *result) entryno = MultiXactIdToOffsetEntry(multi); /* - * Flush out dirty data, so PhysicalPageExists can work correctly. - * SimpleLruFlush() is a pretty big hammer for that. Alternatively we - * could add an in-memory version of page exists, but find_multixact_start - * is called infrequently, and it doesn't seem bad to flush buffers to - * disk before truncation. + * Write out dirty data, so PhysicalPageExists can work correctly. */ - SimpleLruFlush(MultiXactOffsetCtl, true); - SimpleLruFlush(MultiXactMemberCtl, true); + SimpleLruWriteAll(MultiXactOffsetCtl, true); + SimpleLruWriteAll(MultiXactMemberCtl, true); if (!SimpleLruDoesPhysicalPageExist(MultiXactOffsetCtl, pageno)) return false; @@ -3386,3 +3394,21 @@ pg_get_multixact_members(PG_FUNCTION_ARGS) SRF_RETURN_DONE(funccxt); } + +/* + * Entrypoint for sync.c to sync offsets files. + */ +int +multixactoffsetssyncfiletag(const FileTag *ftag, char *path) +{ + return SlruSyncFileTag(MultiXactOffsetCtl, ftag, path); +} + +/* + * Entrypoint for sync.c to sync members files. + */ +int +multixactmemberssyncfiletag(const FileTag *ftag, char *path) +{ + return SlruSyncFileTag(MultiXactMemberCtl, ftag, path); +} diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index 7640f153c227b..16a78986971ff 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -63,22 +63,33 @@ snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir, seg) /* - * During SimpleLruFlush(), we will usually not need to write/fsync more - * than one or two physical files, but we may need to write several pages - * per file. We can consolidate the I/O requests by leaving files open - * until control returns to SimpleLruFlush(). This data structure remembers - * which files are open. + * During SimpleLruWriteAll(), we will usually not need to write more than one + * or two physical files, but we may need to write several pages per file. We + * can consolidate the I/O requests by leaving files open until control returns + * to SimpleLruWriteAll(). This data structure remembers which files are open. */ -#define MAX_FLUSH_BUFFERS 16 +#define MAX_WRITEALL_BUFFERS 16 -typedef struct SlruFlushData +typedef struct SlruWriteAllData { int num_files; /* # files actually open */ - int fd[MAX_FLUSH_BUFFERS]; /* their FD's */ - int segno[MAX_FLUSH_BUFFERS]; /* their log seg#s */ -} SlruFlushData; + int fd[MAX_WRITEALL_BUFFERS]; /* their FD's */ + int segno[MAX_WRITEALL_BUFFERS]; /* their log seg#s */ +} SlruWriteAllData; -typedef struct SlruFlushData *SlruFlush; +typedef struct SlruWriteAllData *SlruWriteAll; + +/* + * Populate a file tag describing a segment file. We only use the segment + * number, since we can derive everything else we need by having separate + * sync handler functions for clog, multixact etc. + */ +#define INIT_SLRUFILETAG(a,xx_handler,xx_segno) \ +( \ + memset(&(a), 0, sizeof(FileTag)), \ + (a).handler = (xx_handler), \ + (a).segno = (xx_segno) \ +) /* * Macro to mark a buffer slot "most recently used". Note multiple evaluation @@ -125,10 +136,10 @@ static int slru_errno; static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno); static void SimpleLruWaitIO(SlruCtl ctl, int slotno); -static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruFlush fdata); +static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata); static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno); static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, - SlruFlush fdata); + SlruWriteAll fdata); static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid); static int SlruSelectLRUPage(SlruCtl ctl, int pageno); @@ -173,7 +184,8 @@ SimpleLruShmemSize(int nslots, int nlsns) */ void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, - LWLock *ctllock, const char *subdir, int tranche_id) + LWLock *ctllock, const char *subdir, int tranche_id, + SyncRequestHandler sync_handler) { SlruShared shared; bool found; @@ -251,7 +263,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, * assume caller set PagePrecedes. */ ctl->shared = shared; - ctl->do_fsync = true; /* default behavior */ + ctl->sync_handler = sync_handler; strlcpy(ctl->Dir, subdir, sizeof(ctl->Dir)); } @@ -523,7 +535,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid) * Control lock must be held at entry, and will be held at exit. */ static void -SlruInternalWritePage(SlruCtl ctl, int slotno, SlruFlush fdata) +SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata) { SlruShared shared = ctl->shared; int pageno = shared->page_number[slotno]; @@ -587,6 +599,10 @@ SlruInternalWritePage(SlruCtl ctl, int slotno, SlruFlush fdata) /* Now it's okay to ereport if we failed */ if (!ok) SlruReportIOError(ctl, pageno, InvalidTransactionId); + + /* If part of a checkpoint, count this as a buffer written. */ + if (fdata) + CheckpointStats.ckpt_bufs_written++; } /* @@ -730,13 +746,13 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) * * For now, assume it's not worth keeping a file pointer open across * independent read/write operations. We do batch operations during - * SimpleLruFlush, though. + * SimpleLruWriteAll, though. * * fdata is NULL for a standalone write, pointer to open-file info during - * SimpleLruFlush. + * SimpleLruWriteAll. */ static bool -SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) +SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata) { SlruShared shared = ctl->shared; int segno = pageno / SLRU_PAGES_PER_SEGMENT; @@ -791,7 +807,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) } /* - * During a Flush, we may already have the desired file open. + * During a WriteAll, we may already have the desired file open. */ if (fdata) { @@ -837,7 +853,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) if (fdata) { - if (fdata->num_files < MAX_FLUSH_BUFFERS) + if (fdata->num_files < MAX_WRITEALL_BUFFERS) { fdata->fd[fdata->num_files] = fd; fdata->segno[fdata->num_files] = segno; @@ -870,23 +886,31 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) } pgstat_report_wait_end(); - /* - * If not part of Flush, need to fsync now. We assume this happens - * infrequently enough that it's not a performance issue. - */ - if (!fdata) + /* Queue up a sync request for the checkpointer. */ + if (ctl->sync_handler != SYNC_HANDLER_NONE) { - pgstat_report_wait_start(WAIT_EVENT_SLRU_SYNC); - if (ctl->do_fsync && pg_fsync(fd) != 0) + FileTag tag; + + INIT_SLRUFILETAG(tag, ctl->sync_handler, segno); + if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false)) { + /* No space to enqueue sync request. Do it synchronously. */ + pgstat_report_wait_start(WAIT_EVENT_SLRU_SYNC); + if (pg_fsync(fd) != 0) + { + pgstat_report_wait_end(); + slru_errcause = SLRU_FSYNC_FAILED; + slru_errno = errno; + CloseTransientFile(fd); + return false; + } pgstat_report_wait_end(); - slru_errcause = SLRU_FSYNC_FAILED; - slru_errno = errno; - CloseTransientFile(fd); - return false; } - pgstat_report_wait_end(); + } + /* Close file, unless part of flush request. */ + if (!fdata) + { if (CloseTransientFile(fd) != 0) { slru_errcause = SLRU_CLOSE_FAILED; @@ -1122,13 +1146,16 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) } /* - * Flush dirty pages to disk during checkpoint or database shutdown + * Write dirty pages to disk during checkpoint or database shutdown. Flushing + * is deferred until the next call to ProcessSyncRequests(), though we do fsync + * the containing directory here to make sure that newly created directory + * entries are on disk. */ void -SimpleLruFlush(SlruCtl ctl, bool allow_redirtied) +SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied) { SlruShared shared = ctl->shared; - SlruFlushData fdata; + SlruWriteAllData fdata; int slotno; int pageno = 0; int i; @@ -1162,21 +1189,11 @@ SimpleLruFlush(SlruCtl ctl, bool allow_redirtied) LWLockRelease(shared->ControlLock); /* - * Now fsync and close any files that were open + * Now close any files that were open */ ok = true; for (i = 0; i < fdata.num_files; i++) { - pgstat_report_wait_start(WAIT_EVENT_SLRU_FLUSH_SYNC); - if (ctl->do_fsync && pg_fsync(fdata.fd[i]) != 0) - { - slru_errcause = SLRU_FSYNC_FAILED; - slru_errno = errno; - pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT; - ok = false; - } - pgstat_report_wait_end(); - if (CloseTransientFile(fdata.fd[i]) != 0) { slru_errcause = SLRU_CLOSE_FAILED; @@ -1187,6 +1204,10 @@ SimpleLruFlush(SlruCtl ctl, bool allow_redirtied) } if (!ok) SlruReportIOError(ctl, pageno, InvalidTransactionId); + + /* Ensure that directory entries for new files are on disk. */ + if (ctl->sync_handler != SYNC_HANDLER_NONE) + fsync_fname(ctl->Dir, true); } /* @@ -1346,6 +1367,19 @@ SlruDeleteSegment(SlruCtl ctl, int segno) snprintf(path, MAXPGPATH, "%s/%04X", ctl->Dir, segno); ereport(DEBUG2, (errmsg("removing file \"%s\"", path))); + + /* + * Tell the checkpointer to forget any sync requests, before we unlink the + * file. + */ + if (ctl->sync_handler != SYNC_HANDLER_NONE) + { + FileTag tag; + + INIT_SLRUFILETAG(tag, ctl->sync_handler, segno); + RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true); + } + unlink(path); LWLockRelease(shared->ControlLock); @@ -1444,3 +1478,31 @@ SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data) return retval; } + +/* + * Individual SLRUs (clog, ...) have to provide a sync.c handler function so + * that they can provide the correct "SlruCtl" (otherwise we don't know how to + * build the path), but they just forward to this common implementation that + * performs the fsync. + */ +int +SlruSyncFileTag(SlruCtl ctl, const FileTag *ftag, char *path) +{ + int fd; + int save_errno; + int result; + + SlruFileName(ctl, path, ftag->segno); + + fd = OpenTransientFile(path, O_RDWR | PG_BINARY); + if (fd < 0) + return -1; + + result = pg_fsync(fd); + save_errno = errno; + + CloseTransientFile(fd); + + errno = save_errno; + return result; +} diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index a50f60b99af28..0111e867c79a2 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -193,9 +193,7 @@ SUBTRANSShmemInit(void) SubTransCtl->PagePrecedes = SubTransPagePrecedes; SimpleLruInit(SubTransCtl, "Subtrans", NUM_SUBTRANS_BUFFERS, 0, SubtransSLRULock, "pg_subtrans", - LWTRANCHE_SUBTRANS_BUFFER); - /* Override default assumption that writes should be fsync'd */ - SubTransCtl->do_fsync = false; + LWTRANCHE_SUBTRANS_BUFFER, SYNC_HANDLER_NONE); } /* @@ -278,23 +276,6 @@ StartupSUBTRANS(TransactionId oldestActiveXID) LWLockRelease(SubtransSLRULock); } -/* - * This must be called ONCE during postmaster or standalone-backend shutdown - */ -void -ShutdownSUBTRANS(void) -{ - /* - * Flush dirty SUBTRANS pages to disk - * - * This is not actually necessary from a correctness point of view. We do - * it merely as a debugging aid. - */ - TRACE_POSTGRESQL_SUBTRANS_CHECKPOINT_START(false); - SimpleLruFlush(SubTransCtl, false); - TRACE_POSTGRESQL_SUBTRANS_CHECKPOINT_DONE(false); -} - /* * Perform a checkpoint --- either during shutdown, or on-the-fly */ @@ -302,14 +283,14 @@ void CheckPointSUBTRANS(void) { /* - * Flush dirty SUBTRANS pages to disk + * Write dirty SUBTRANS pages to disk * * This is not actually necessary from a correctness point of view. We do * it merely to improve the odds that writing of dirty pages is done by * the checkpoint process and not by backends. */ TRACE_POSTGRESQL_SUBTRANS_CHECKPOINT_START(true); - SimpleLruFlush(SubTransCtl, true); + SimpleLruWriteAll(SubTransCtl, true); TRACE_POSTGRESQL_SUBTRANS_CHECKPOINT_DONE(true); } diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index ef4f9981e359f..7940060443112 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -1243,10 +1243,10 @@ ReadTwoPhaseFile(TransactionId xid, bool missing_ok) stat.st_size > MaxAllocSize) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg_plural("incorrect size of file \"%s\": %zu byte", - "incorrect size of file \"%s\": %zu bytes", - (Size) stat.st_size, path, - (Size) stat.st_size))); + errmsg_plural("incorrect size of file \"%s\": %lld byte", + "incorrect size of file \"%s\": %lld bytes", + (long long int) stat.st_size, path, + (long long int) stat.st_size))); crc_offset = stat.st_size - sizeof(pg_crc32c); if (crc_offset != MAXALIGN(crc_offset)) @@ -1270,8 +1270,8 @@ ReadTwoPhaseFile(TransactionId xid, bool missing_ok) errmsg("could not read file \"%s\": %m", path))); else ereport(ERROR, - (errmsg("could not read file \"%s\": read %d of %zu", - path, r, (Size) stat.st_size))); + (errmsg("could not read file \"%s\": read %d of %lld", + path, r, (long long int) stat.st_size))); } pgstat_report_wait_end(); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 09c01ed4ae486..52a67b1170156 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -76,6 +76,7 @@ #include "utils/memutils.h" #include "utils/ps_status.h" #include "utils/relmapper.h" +#include "utils/pg_rusage.h" #include "utils/snapmgr.h" #include "utils/timestamp.h" @@ -939,6 +940,7 @@ static bool CheckForStandbyTrigger(void); #ifdef WAL_DEBUG static void xlog_outrec(StringInfo buf, XLogReaderState *record); #endif +static void xlog_block_info(StringInfo buf, XLogReaderState *record); static void xlog_outdesc(StringInfo buf, XLogReaderState *record); static void pg_start_backup_callback(int code, Datum arg); static void pg_stop_backup_callback(int code, Datum arg); @@ -2194,6 +2196,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) WriteRqst.Flush = 0; XLogWrite(WriteRqst, false); LWLockRelease(WALWriteLock); + WalStats.m_wal_buffers_full++; TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_DONE(); } /* Re-acquire WALBufMappingLock and retry */ @@ -7169,6 +7172,9 @@ StartupXLOG(void) { ErrorContextCallback errcallback; TimestampTz xtime; + PGRUsage ru0; + + pg_rusage_init(&ru0); InRedo = true; @@ -7435,8 +7441,9 @@ StartupXLOG(void) } ereport(LOG, - (errmsg("redo done at %X/%X", - (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr))); + (errmsg("redo done at %X/%X system usage: %s", + (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr, + pg_rusage_show(&ru0)))); xtime = GetLatestXTime(); if (xtime) ereport(LOG, @@ -8523,10 +8530,6 @@ ShutdownXLOG(int code, Datum arg) CreateCheckPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE); } - ShutdownCLOG(); - ShutdownCommitTs(); - ShutdownSUBTRANS(); - ShutdownMultiXact(); } /* @@ -9171,17 +9174,29 @@ CreateEndOfRecoveryRecord(void) static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags) { - CheckPointCLOG(); - CheckPointCommitTs(); - CheckPointSUBTRANS(); - CheckPointMultiXact(); - CheckPointPredicate(); CheckPointRelationMap(); CheckPointReplicationSlots(); CheckPointSnapBuild(); CheckPointLogicalRewriteHeap(); - CheckPointBuffers(flags); /* performs all required fsyncs */ CheckPointReplicationOrigin(); + + /* Write out all dirty data in SLRUs and the main buffer pool */ + TRACE_POSTGRESQL_BUFFER_CHECKPOINT_START(flags); + CheckpointStats.ckpt_write_t = GetCurrentTimestamp(); + CheckPointCLOG(); + CheckPointCommitTs(); + CheckPointSUBTRANS(); + CheckPointMultiXact(); + CheckPointPredicate(); + CheckPointBuffers(flags); + + /* Perform all queued up fsyncs */ + TRACE_POSTGRESQL_BUFFER_CHECKPOINT_SYNC_START(); + CheckpointStats.ckpt_sync_t = GetCurrentTimestamp(); + ProcessSyncRequests(); + CheckpointStats.ckpt_sync_end_t = GetCurrentTimestamp(); + TRACE_POSTGRESQL_BUFFER_CHECKPOINT_DONE(); + /* We deliberately delay 2PC checkpointing as long as possible */ CheckPointTwoPhase(checkPointRedo); } @@ -10235,8 +10250,6 @@ xlog_redo(XLogReaderState *record) static void xlog_outrec(StringInfo buf, XLogReaderState *record) { - int block_id; - appendStringInfo(buf, "prev %X/%X; xid %u", (uint32) (XLogRecGetPrev(record) >> 32), (uint32) XLogRecGetPrev(record), @@ -10245,6 +10258,19 @@ xlog_outrec(StringInfo buf, XLogReaderState *record) appendStringInfo(buf, "; len %u", XLogRecGetDataLen(record)); + xlog_block_info(buf, record); +} +#endif /* WAL_DEBUG */ + +/* + * Returns a string giving information about all the blocks in an + * XLogRecord. + */ +static void +xlog_block_info(StringInfo buf, XLogReaderState *record) +{ + int block_id; + /* decode block references */ for (block_id = 0; block_id <= record->max_block_id; block_id++) { @@ -10271,7 +10297,6 @@ xlog_outrec(StringInfo buf, XLogReaderState *record) appendStringInfoString(buf, " FPW"); } } -#endif /* WAL_DEBUG */ /* * Returns a string describing an XLogRecord, consisting of its identity @@ -11752,6 +11777,7 @@ rm_redo_error_callback(void *arg) initStringInfo(&buf); xlog_outdesc(&buf, record); + xlog_block_info(&buf, record); /* translator: %s is a WAL record description */ errcontext("WAL redo at %X/%X for %s", @@ -12504,7 +12530,7 @@ StartupRequestWalReceiverRestart(void) if (currentSource == XLOG_FROM_STREAM && WalRcvRunning()) { ereport(LOG, - (errmsg("wal receiver process shutdown requested"))); + (errmsg("WAL receiver process shutdown requested"))); pendingWalRcvRestart = true; } diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index 8f8734dc1d4ec..cae93ab69dd71 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -202,10 +202,10 @@ RestoreArchivedFile(char *path, const char *xlogfname, else elevel = FATAL; ereport(elevel, - (errmsg("archive file \"%s\" has wrong size: %lu instead of %lu", + (errmsg("archive file \"%s\" has wrong size: %lld instead of %lld", xlogfname, - (unsigned long) stat_buf.st_size, - (unsigned long) expectedSize))); + (long long int) stat_buf.st_size, + (long long int) expectedSize))); return false; } else diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index c526bb19281e0..1f0e4e01e69b1 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -1019,6 +1019,63 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, return recptr; } +/* + * Like log_newpage(), but allows logging multiple pages in one operation. + * It is more efficient than calling log_newpage() for each page separately, + * because we can write multiple pages in a single WAL record. + */ +void +log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages, + BlockNumber *blknos, Page *pages, bool page_std) +{ + int flags; + XLogRecPtr recptr; + int i; + int j; + + flags = REGBUF_FORCE_IMAGE; + if (page_std) + flags |= REGBUF_STANDARD; + + /* + * Iterate over all the pages. They are collected into batches of + * XLR_MAX_BLOCK_ID pages, and a single WAL-record is written for each + * batch. + */ + XLogEnsureRecordSpace(XLR_MAX_BLOCK_ID - 1, 0); + + i = 0; + while (i < num_pages) + { + int batch_start = i; + int nbatch; + + XLogBeginInsert(); + + nbatch = 0; + while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages) + { + XLogRegisterBlock(nbatch, rnode, forkNum, blknos[i], pages[i], flags); + i++; + nbatch++; + } + + recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI); + + for (j = batch_start; j < i; j++) + { + /* + * The page may be uninitialized. If so, we can't set the LSN because that + * would corrupt the page. + */ + if (!PageIsNew(pages[j])) + { + PageSetLSN(pages[j], recptr); + } + } + } +} + /* * Write a WAL record containing a full image of a page. * diff --git a/src/backend/bootstrap/bootparse.y b/src/backend/bootstrap/bootparse.y index 5eaca279ee83f..6bb0c6ed1ea9c 100644 --- a/src/backend/bootstrap/bootparse.y +++ b/src/backend/bootstrap/bootparse.y @@ -18,16 +18,10 @@ #include -#include "access/attnum.h" -#include "access/htup.h" -#include "access/itup.h" -#include "access/tupdesc.h" #include "bootstrap/bootstrap.h" -#include "catalog/catalog.h" #include "catalog/heap.h" #include "catalog/namespace.h" #include "catalog/pg_am.h" -#include "catalog/pg_attribute.h" #include "catalog/pg_authid.h" #include "catalog/pg_class.h" #include "catalog/pg_namespace.h" @@ -36,20 +30,7 @@ #include "commands/defrem.h" #include "miscadmin.h" #include "nodes/makefuncs.h" -#include "nodes/nodes.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "nodes/primnodes.h" -#include "rewrite/prs2lock.h" -#include "storage/block.h" -#include "storage/fd.h" -#include "storage/ipc.h" -#include "storage/itemptr.h" -#include "storage/off.h" -#include "storage/smgr.h" -#include "tcop/dest.h" #include "utils/memutils.h" -#include "utils/rel.h" /* diff --git a/src/backend/bootstrap/bootscanner.l b/src/backend/bootstrap/bootscanner.l index 1048e70d05377..6a0bed6c8d604 100644 --- a/src/backend/bootstrap/bootscanner.l +++ b/src/backend/bootstrap/bootscanner.l @@ -15,25 +15,8 @@ */ #include "postgres.h" -#include "access/attnum.h" -#include "access/htup.h" -#include "access/itup.h" -#include "access/tupdesc.h" #include "bootstrap/bootstrap.h" -#include "catalog/pg_am.h" -#include "catalog/pg_attribute.h" -#include "catalog/pg_class.h" -#include "nodes/nodes.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "nodes/primnodes.h" -#include "parser/scansup.h" -#include "rewrite/prs2lock.h" -#include "storage/block.h" -#include "storage/fd.h" -#include "storage/itemptr.h" -#include "storage/off.h" -#include "utils/rel.h" +#include "utils/guc.h" /* Not needed now that this file is compiled as part of bootparse. */ /* #include "bootparse.h" */ @@ -66,7 +49,7 @@ static int yyline = 1; /* line number for error reporting */ id [-A-Za-z0-9_]+ -sid \"([^\"])*\" +sid \'([^']|\'\')*\' /* * Keyword tokens return the keyword text (as a constant string) in yylval.kw, @@ -120,14 +103,12 @@ NOT { yylval.kw = "NOT"; return XNOT; } NULL { yylval.kw = "NULL"; return XNULL; } {id} { - yylval.str = scanstr(yytext); + yylval.str = pstrdup(yytext); return ID; } {sid} { - /* leading and trailing quotes are not passed to scanstr */ - yytext[strlen(yytext) - 1] = '\0'; - yylval.str = scanstr(yytext+1); - yytext[strlen(yytext)] = '"'; /* restore yytext */ + /* strip quotes and escapes */ + yylval.str = DeescapeQuotedString(yytext); return ID; } diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl index dc5f442397a4c..ef3105af44bbe 100644 --- a/src/backend/catalog/genbki.pl +++ b/src/backend/catalog/genbki.pl @@ -845,17 +845,15 @@ sub print_bki_insert # since that represents a NUL char in C code. $bki_value = '' if $bki_value eq '\0'; - # Handle single quotes by doubling them, and double quotes by - # converting them to octal escapes, because that's what the + # Handle single quotes by doubling them, because that's what the # bootstrap scanner requires. We do not process backslashes # specially; this allows escape-string-style backslash escapes # to be used in catalog data. $bki_value =~ s/'/''/g; - $bki_value =~ s/"/\\042/g; # Quote value if needed. We need not quote values that satisfy # the "id" pattern in bootscanner.l, currently "[-A-Za-z0-9_]+". - $bki_value = sprintf(qq'"%s"', $bki_value) + $bki_value = sprintf("'%s'", $bki_value) if length($bki_value) == 0 or $bki_value =~ /[^-A-Za-z0-9_]/; diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 117e3fdef7dcb..0974f3e23a237 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -3311,18 +3311,10 @@ validate_index_callback(ItemPointer itemptr, void *opaque) * index_set_state_flags - adjust pg_index state flags * * This is used during CREATE/DROP INDEX CONCURRENTLY to adjust the pg_index - * flags that denote the index's state. Because the update is not - * transactional and will not roll back on error, this must only be used as - * the last step in a transaction that has not made any transactional catalog - * updates! + * flags that denote the index's state. * - * Note that heap_inplace_update does send a cache inval message for the + * Note that CatalogTupleUpdate() sends a cache invalidation message for the * tuple, so other sessions will hear about the update as soon as we commit. - * - * NB: In releases prior to PostgreSQL 9.4, the use of a non-transactional - * update here would have been unsafe; now that MVCC rules apply even for - * system catalog scans, we could potentially use a transactional update here - * instead. */ void index_set_state_flags(Oid indexId, IndexStateFlagsAction action) @@ -3331,9 +3323,6 @@ index_set_state_flags(Oid indexId, IndexStateFlagsAction action) HeapTuple indexTuple; Form_pg_index indexForm; - /* Assert that current xact hasn't done any transactional updates */ - Assert(GetTopTransactionIdIfAny() == InvalidTransactionId); - /* Open pg_index and fetch a writable copy of the index's tuple */ pg_index = table_open(IndexRelationId, RowExclusiveLock); @@ -3397,8 +3386,8 @@ index_set_state_flags(Oid indexId, IndexStateFlagsAction action) break; } - /* ... and write it back in-place */ - heap_inplace_update(pg_index, indexTuple); + /* ... and update it */ + CatalogTupleUpdate(pg_index, &indexTuple->t_self, indexTuple); table_close(pg_index, RowExclusiveLock); } diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 0152e3869abe2..391a9b225db79 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -1473,8 +1473,7 @@ FunctionIsVisible(Oid funcid) * Given a possibly-qualified operator name and exact input datatypes, * look up the operator. Returns InvalidOid if not found. * - * Pass oprleft = InvalidOid for a prefix op, oprright = InvalidOid for - * a postfix op. + * Pass oprleft = InvalidOid for a prefix op. * * If the operator name is not schema-qualified, it is sought in the current * namespace search path. If the name is schema-qualified and the given @@ -1580,8 +1579,8 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright) * namespace case, we arrange for entries in earlier namespaces to mask * identical entries in later namespaces. * - * The returned items always have two args[] entries --- one or the other - * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too. + * The returned items always have two args[] entries --- the first will be + * InvalidOid for a prefix oprkind. nargs is always 2, too. */ FuncCandidateList OpernameGetCandidates(List *names, char oprkind, bool missing_schema_ok) diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 6dfe1be2cc008..4815f6ca7e3a6 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -1506,7 +1506,7 @@ get_object_address_attribute(ObjectType objtype, List *object, ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("column name must be qualified"))); - attname = strVal(lfirst(list_tail(object))); + attname = strVal(llast(object)); relname = list_truncate(list_copy(object), list_length(object) - 1); /* XXX no missing_ok support here */ relation = relation_openrv(makeRangeVarFromNameList(relname), lockmode); diff --git a/src/backend/catalog/pg_cast.c b/src/backend/catalog/pg_cast.c index c03e82d74fefe..d3f2db41863b9 100644 --- a/src/backend/catalog/pg_cast.c +++ b/src/backend/catalog/pg_cast.c @@ -3,7 +3,7 @@ * pg_cast.c * routines to support manipulation of the pg_cast relation * - * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index f7c07c9b5b891..904cb8ef820c1 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -245,7 +245,7 @@ OperatorShellMake(const char *operatorName, values[Anum_pg_operator_oprname - 1] = NameGetDatum(&oname); values[Anum_pg_operator_oprnamespace - 1] = ObjectIdGetDatum(operatorNamespace); values[Anum_pg_operator_oprowner - 1] = ObjectIdGetDatum(GetUserId()); - values[Anum_pg_operator_oprkind - 1] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); + values[Anum_pg_operator_oprkind - 1] = CharGetDatum(leftTypeId ? 'b' : 'l'); values[Anum_pg_operator_oprcanmerge - 1] = BoolGetDatum(false); values[Anum_pg_operator_oprcanhash - 1] = BoolGetDatum(false); values[Anum_pg_operator_oprleft - 1] = ObjectIdGetDatum(leftTypeId); @@ -494,7 +494,7 @@ OperatorCreate(const char *operatorName, values[Anum_pg_operator_oprname - 1] = NameGetDatum(&oname); values[Anum_pg_operator_oprnamespace - 1] = ObjectIdGetDatum(operatorNamespace); values[Anum_pg_operator_oprowner - 1] = ObjectIdGetDatum(GetUserId()); - values[Anum_pg_operator_oprkind - 1] = CharGetDatum(leftTypeId ? (rightTypeId ? 'b' : 'r') : 'l'); + values[Anum_pg_operator_oprkind - 1] = CharGetDatum(leftTypeId ? 'b' : 'l'); values[Anum_pg_operator_oprcanmerge - 1] = BoolGetDatum(canMerge); values[Anum_pg_operator_oprcanhash - 1] = BoolGetDatum(canHash); values[Anum_pg_operator_oprleft - 1] = ObjectIdGetDatum(leftTypeId); diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index 40d65dc6bab1f..1dd9ecc0634aa 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -249,6 +249,9 @@ ProcedureCreate(const char *procedureName, elog(ERROR, "variadic parameter must be last"); break; case PROARGMODE_OUT: + if (OidIsValid(variadicType) && prokind == PROKIND_PROCEDURE) + elog(ERROR, "variadic parameter must be last"); + break; case PROARGMODE_TABLE: /* okay */ break; @@ -462,10 +465,12 @@ ProcedureCreate(const char *procedureName, if (isnull) proargmodes = PointerGetDatum(NULL); /* just to be sure */ - n_old_arg_names = get_func_input_arg_names(proargnames, + n_old_arg_names = get_func_input_arg_names(prokind, + proargnames, proargmodes, &old_arg_names); - n_new_arg_names = get_func_input_arg_names(parameterNames, + n_new_arg_names = get_func_input_arg_names(prokind, + parameterNames, parameterModes, &new_arg_names); for (j = 0; j < n_old_arg_names; j++) @@ -908,8 +913,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS) (ParserSetupHook) sql_fn_parser_setup, pinfo, NULL); - querytree_list = list_concat(querytree_list, - querytree_sublist); + querytree_list = lappend(querytree_list, + querytree_sublist); } check_sql_fn_statements(querytree_list); diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c index 311d46225adf0..ca78d395181a9 100644 --- a/src/backend/catalog/pg_subscription.c +++ b/src/backend/catalog/pg_subscription.c @@ -328,20 +328,16 @@ UpdateSubscriptionRelState(Oid subid, Oid relid, char state, /* * Get state of subscription table. * - * Returns SUBREL_STATE_UNKNOWN when not found and missing_ok is true. + * Returns SUBREL_STATE_UNKNOWN when the table is not in the subscription. */ char -GetSubscriptionRelState(Oid subid, Oid relid, XLogRecPtr *sublsn, - bool missing_ok) +GetSubscriptionRelState(Oid subid, Oid relid, XLogRecPtr *sublsn) { - Relation rel; HeapTuple tup; char substate; bool isnull; Datum d; - rel = table_open(SubscriptionRelRelationId, AccessShareLock); - /* Try finding the mapping. */ tup = SearchSysCache2(SUBSCRIPTIONRELMAP, ObjectIdGetDatum(relid), @@ -349,22 +345,14 @@ GetSubscriptionRelState(Oid subid, Oid relid, XLogRecPtr *sublsn, if (!HeapTupleIsValid(tup)) { - if (missing_ok) - { - table_close(rel, AccessShareLock); - *sublsn = InvalidXLogRecPtr; - return SUBREL_STATE_UNKNOWN; - } - - elog(ERROR, "subscription table %u in subscription %u does not exist", - relid, subid); + *sublsn = InvalidXLogRecPtr; + return SUBREL_STATE_UNKNOWN; } /* Get the state. */ - d = SysCacheGetAttr(SUBSCRIPTIONRELMAP, tup, - Anum_pg_subscription_rel_srsubstate, &isnull); - Assert(!isnull); - substate = DatumGetChar(d); + substate = ((Form_pg_subscription_rel) GETSTRUCT(tup))->srsubstate; + + /* Get the LSN */ d = SysCacheGetAttr(SUBSCRIPTIONRELMAP, tup, Anum_pg_subscription_rel_srsublsn, &isnull); if (isnull) @@ -374,7 +362,6 @@ GetSubscriptionRelState(Oid subid, Oid relid, XLogRecPtr *sublsn, /* Cleanup */ ReleaseSysCache(tup); - table_close(rel, AccessShareLock); return substate; } diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index ed4f3f142d87d..85cd147e21bbf 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -796,6 +796,15 @@ CREATE VIEW pg_stat_replication AS JOIN pg_stat_get_wal_senders() AS W ON (S.pid = W.pid) LEFT JOIN pg_authid AS U ON (S.usesysid = U.oid); +CREATE VIEW pg_stat_replication_slots AS + SELECT + s.slot_name, + s.spill_txns, + s.spill_count, + s.spill_bytes, + s.stats_reset + FROM pg_stat_get_replication_slots() AS s; + CREATE VIEW pg_stat_slru AS SELECT s.name, @@ -979,6 +988,12 @@ CREATE VIEW pg_stat_bgwriter AS pg_stat_get_buf_alloc() AS buffers_alloc, pg_stat_get_bgwriter_stat_reset_time() AS stats_reset; +CREATE VIEW pg_stat_wal AS + SELECT + w.wal_buffers_full, + w.stats_reset + FROM pg_stat_get_wal() w; + CREATE VIEW pg_stat_progress_analyze AS SELECT S.pid AS pid, S.datid AS datid, D.datname AS datname, @@ -1447,6 +1462,7 @@ REVOKE EXECUTE ON FUNCTION pg_stat_reset_shared(text) FROM public; REVOKE EXECUTE ON FUNCTION pg_stat_reset_slru(text) FROM public; REVOKE EXECUTE ON FUNCTION pg_stat_reset_single_table_counters(oid) FROM public; REVOKE EXECUTE ON FUNCTION pg_stat_reset_single_function_counters(oid) FROM public; +REVOKE EXECUTE ON FUNCTION pg_stat_reset_replication_slot(text) FROM public; REVOKE EXECUTE ON FUNCTION lo_import(text) FROM public; REVOKE EXECUTE ON FUNCTION lo_import(text, oid) FROM public; diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index cb341365df465..8dbcace3f9315 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -554,9 +554,8 @@ AsyncShmemInit(void) */ NotifyCtl->PagePrecedes = asyncQueuePagePrecedes; SimpleLruInit(NotifyCtl, "Notify", NUM_NOTIFY_BUFFERS, 0, - NotifySLRULock, "pg_notify", LWTRANCHE_NOTIFY_BUFFER); - /* Override default assumption that writes should be fsync'd */ - NotifyCtl->do_fsync = false; + NotifySLRULock, "pg_notify", LWTRANCHE_NOTIFY_BUFFER, + SYNC_HANDLER_NONE); if (!found) { diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index db7d24a511e3b..36ddcdccdb837 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -1159,6 +1159,8 @@ ProcessCopyOptions(ParseState *pstate, List *options) { bool format_specified = false; + bool freeze_specified = false; + bool header_specified = false; ListCell *option; /* Support external use for option sanity checking */ @@ -1198,11 +1200,12 @@ ProcessCopyOptions(ParseState *pstate, } else if (strcmp(defel->defname, "freeze") == 0) { - if (cstate->freeze) + if (freeze_specified) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"), parser_errposition(pstate, defel->location))); + freeze_specified = true; cstate->freeze = defGetBoolean(defel); } else if (strcmp(defel->defname, "delimiter") == 0) @@ -1225,11 +1228,12 @@ ProcessCopyOptions(ParseState *pstate, } else if (strcmp(defel->defname, "header") == 0) { - if (cstate->header_line) + if (header_specified) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"), parser_errposition(pstate, defel->location))); + header_specified = true; cstate->header_line = defGetBoolean(defel); } else if (strcmp(defel->defname, "quote") == 0) @@ -2485,9 +2489,6 @@ CopyMultiInsertBufferFlush(CopyMultiInsertInfo *miinfo, ResultRelInfo *resultRelInfo = buffer->resultRelInfo; TupleTableSlot **slots = buffer->slots; - /* Set es_result_relation_info to the ResultRelInfo we're flushing. */ - estate->es_result_relation_info = resultRelInfo; - /* * Print error context information correctly, if one of the operations * below fail. @@ -2520,7 +2521,8 @@ CopyMultiInsertBufferFlush(CopyMultiInsertInfo *miinfo, cstate->cur_lineno = buffer->linenos[i]; recheckIndexes = - ExecInsertIndexTuples(buffer->slots[i], estate, false, NULL, + ExecInsertIndexTuples(resultRelInfo, + buffer->slots[i], estate, false, NULL, NIL); ExecARInsertTriggers(estate, resultRelInfo, slots[i], recheckIndexes, @@ -2723,6 +2725,7 @@ CopyFrom(CopyState cstate) bool leafpart_use_multi_insert = false; Assert(cstate->rel); + Assert(list_length(cstate->range_table) == 1); /* * The target must be a plain, foreign, or partitioned relation, or have @@ -2825,25 +2828,15 @@ CopyFrom(CopyState cstate) * index-entry-making machinery. (There used to be a huge amount of code * here that basically duplicated execUtils.c ...) */ - resultRelInfo = makeNode(ResultRelInfo); - InitResultRelInfo(resultRelInfo, - cstate->rel, - 1, /* must match rel's position in range_table */ - NULL, - 0); - target_resultRelInfo = resultRelInfo; + ExecInitRangeTable(estate, cstate->range_table); + resultRelInfo = target_resultRelInfo = makeNode(ResultRelInfo); + ExecInitResultRelation(estate, resultRelInfo, 1); /* Verify the named relation is a valid target for INSERT */ CheckValidResultRel(resultRelInfo, CMD_INSERT); ExecOpenIndices(resultRelInfo, false); - estate->es_result_relations = resultRelInfo; - estate->es_num_result_relations = 1; - estate->es_result_relation_info = resultRelInfo; - - ExecInitRangeTable(estate, cstate->range_table); - /* * Set up a ModifyTableState so we can let FDW(s) init themselves for * foreign-table result relation(s). @@ -2852,7 +2845,7 @@ CopyFrom(CopyState cstate) mtstate->ps.plan = NULL; mtstate->ps.state = estate; mtstate->operation = CMD_INSERT; - mtstate->resultRelInfo = estate->es_result_relations; + mtstate->resultRelInfo = resultRelInfo; if (resultRelInfo->ri_FdwRoutine != NULL && resultRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL) @@ -3111,44 +3104,22 @@ CopyFrom(CopyState cstate) prevResultRelInfo = resultRelInfo; } - /* - * For ExecInsertIndexTuples() to work on the partition's indexes - */ - estate->es_result_relation_info = resultRelInfo; - /* * If we're capturing transition tuples, we might need to convert - * from the partition rowtype to root rowtype. + * from the partition rowtype to root rowtype. But if there are no + * BEFORE triggers on the partition that could change the tuple, + * we can just remember the original unconverted tuple to avoid a + * needless round trip conversion. */ if (cstate->transition_capture != NULL) - { - if (has_before_insert_row_trig) - { - /* - * If there are any BEFORE triggers on the partition, - * we'll have to be ready to convert their result back to - * tuplestore format. - */ - cstate->transition_capture->tcs_original_insert_tuple = NULL; - cstate->transition_capture->tcs_map = - resultRelInfo->ri_PartitionInfo->pi_PartitionToRootMap; - } - else - { - /* - * Otherwise, just remember the original unconverted - * tuple, to avoid a needless round trip conversion. - */ - cstate->transition_capture->tcs_original_insert_tuple = myslot; - cstate->transition_capture->tcs_map = NULL; - } - } + cstate->transition_capture->tcs_original_insert_tuple = + !has_before_insert_row_trig ? myslot : NULL; /* * We might need to convert from the root rowtype to the partition * rowtype. */ - map = resultRelInfo->ri_PartitionInfo->pi_RootToPartitionMap; + map = resultRelInfo->ri_RootToPartitionMap; if (insertMethod == CIM_SINGLE || !leafpart_use_multi_insert) { /* non batch insert */ @@ -3156,7 +3127,7 @@ CopyFrom(CopyState cstate) { TupleTableSlot *new_slot; - new_slot = resultRelInfo->ri_PartitionInfo->pi_PartitionTupleSlot; + new_slot = resultRelInfo->ri_PartitionTupleSlot; myslot = execute_attr_map_slot(map->attrMap, myslot, new_slot); } } @@ -3220,7 +3191,8 @@ CopyFrom(CopyState cstate) /* Compute stored generated columns */ if (resultRelInfo->ri_RelationDesc->rd_att->constr && resultRelInfo->ri_RelationDesc->rd_att->constr->has_generated_stored) - ExecComputeStoredGenerated(estate, myslot, CMD_INSERT); + ExecComputeStoredGenerated(resultRelInfo, estate, myslot, + CMD_INSERT); /* * If the target is a plain table, check the constraints of @@ -3236,7 +3208,7 @@ CopyFrom(CopyState cstate) * we don't need to if there's no BR trigger defined on the * partition. */ - if (resultRelInfo->ri_PartitionCheck && + if (resultRelInfo->ri_RelationDesc->rd_rel->relispartition && (proute == NULL || has_before_insert_row_trig)) ExecPartitionCheck(resultRelInfo, myslot, estate, true); @@ -3291,7 +3263,8 @@ CopyFrom(CopyState cstate) myslot, mycid, ti_options, bistate); if (resultRelInfo->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(myslot, + recheckIndexes = ExecInsertIndexTuples(resultRelInfo, + myslot, estate, false, NULL, @@ -3355,14 +3328,13 @@ CopyFrom(CopyState cstate) if (insertMethod != CIM_SINGLE) CopyMultiInsertInfoCleanup(&multiInsertInfo); - ExecCloseIndices(target_resultRelInfo); - /* Close all the partitioned tables, leaf partitions, and their indices */ if (proute) ExecCleanupTupleRouting(mtstate, proute); - /* Close any trigger target relations */ - ExecCleanUpTriggerState(estate); + /* Close the result relations, including any trigger target relations */ + ExecCloseResultRelations(estate); + ExecCloseRangeTableRelations(estate); FreeExecutorState(estate); diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index 7844880170aeb..8bb17c34f5b06 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -1646,9 +1646,15 @@ EventTriggerAlterTableEnd(void) /* If no subcommands, don't collect */ if (list_length(currentEventTriggerState->currentCommand->d.alterTable.subcmds) != 0) { + MemoryContext oldcxt; + + oldcxt = MemoryContextSwitchTo(currentEventTriggerState->cxt); + currentEventTriggerState->commandList = lappend(currentEventTriggerState->commandList, currentEventTriggerState->currentCommand); + + MemoryContextSwitchTo(oldcxt); } else pfree(currentEventTriggerState->currentCommand); diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index c98c9b5547c5a..43f9b01e833b1 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -769,27 +769,24 @@ ExplainPrintTriggers(ExplainState *es, QueryDesc *queryDesc) { ResultRelInfo *rInfo; bool show_relname; - int numrels = queryDesc->estate->es_num_result_relations; - int numrootrels = queryDesc->estate->es_num_root_result_relations; + List *resultrels; List *routerels; List *targrels; - int nr; ListCell *l; + resultrels = queryDesc->estate->es_opened_result_relations; routerels = queryDesc->estate->es_tuple_routing_result_relations; targrels = queryDesc->estate->es_trig_target_relations; ExplainOpenGroup("Triggers", "Triggers", false, es); - show_relname = (numrels > 1 || numrootrels > 0 || + show_relname = (list_length(resultrels) > 1 || routerels != NIL || targrels != NIL); - rInfo = queryDesc->estate->es_result_relations; - for (nr = 0; nr < numrels; rInfo++, nr++) - report_triggers(rInfo, show_relname, es); - - rInfo = queryDesc->estate->es_root_result_relations; - for (nr = 0; nr < numrootrels; rInfo++, nr++) + foreach(l, resultrels) + { + rInfo = (ResultRelInfo *) lfirst(l); report_triggers(rInfo, show_relname, es); + } foreach(l, routerels) { @@ -2771,14 +2768,14 @@ show_incremental_sort_group_info(IncrementalSortGroupInfo *groupInfo, groupInfo->groupCount); /* plural/singular based on methodNames size */ if (list_length(methodNames) > 1) - appendStringInfo(es->str, "s: "); + appendStringInfoString(es->str, "s: "); else - appendStringInfo(es->str, ": "); + appendStringInfoString(es->str, ": "); foreach(methodCell, methodNames) { - appendStringInfo(es->str, "%s", (char *) methodCell->ptr_value); + appendStringInfoString(es->str, (char *) methodCell->ptr_value); if (foreach_current_index(methodCell) < list_length(methodNames) - 1) - appendStringInfo(es->str, ", "); + appendStringInfoString(es->str, ", "); } if (groupInfo->maxMemorySpaceUsed > 0) @@ -2830,7 +2827,7 @@ show_incremental_sort_group_info(IncrementalSortGroupInfo *groupInfo, ExplainPropertyInteger("Peak Sort Space Used", "kB", groupInfo->maxMemorySpaceUsed, es); - ExplainCloseGroup("Sort Spaces", memoryName.data, true, es); + ExplainCloseGroup("Sort Space", memoryName.data, true, es); } if (groupInfo->maxDiskSpaceUsed > 0) { @@ -2847,7 +2844,7 @@ show_incremental_sort_group_info(IncrementalSortGroupInfo *groupInfo, ExplainPropertyInteger("Peak Sort Space Used", "kB", groupInfo->maxDiskSpaceUsed, es); - ExplainCloseGroup("Sort Spaces", diskName.data, true, es); + ExplainCloseGroup("Sort Space", diskName.data, true, es); } ExplainCloseGroup("Incremental Sort Groups", groupName.data, true, es); @@ -2885,11 +2882,11 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate, if (prefixsortGroupInfo->groupCount > 0) { if (es->format == EXPLAIN_FORMAT_TEXT) - appendStringInfo(es->str, "\n"); + appendStringInfoChar(es->str, '\n'); show_incremental_sort_group_info(prefixsortGroupInfo, "Pre-sorted", true, es); } if (es->format == EXPLAIN_FORMAT_TEXT) - appendStringInfo(es->str, "\n"); + appendStringInfoChar(es->str, '\n'); } if (incrsortstate->shared_info != NULL) @@ -2928,11 +2925,11 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate, if (prefixsortGroupInfo->groupCount > 0) { if (es->format == EXPLAIN_FORMAT_TEXT) - appendStringInfo(es->str, "\n"); + appendStringInfoChar(es->str, '\n'); show_incremental_sort_group_info(prefixsortGroupInfo, "Pre-sorted", true, es); } if (es->format == EXPLAIN_FORMAT_TEXT) - appendStringInfo(es->str, "\n"); + appendStringInfoChar(es->str, '\n'); if (es->workers_state) ExplainCloseWorker(n, es); diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index e236581a8e06c..c3ce480c8f568 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -194,8 +194,8 @@ interpret_function_parameter_list(ParseState *pstate, Oid *requiredResultType) { int parameterCount = list_length(parameters); - Oid *inTypes; - int inCount = 0; + Oid *sigArgTypes; + int sigArgCount = 0; Datum *allTypes; Datum *paramModes; Datum *paramNames; @@ -209,7 +209,7 @@ interpret_function_parameter_list(ParseState *pstate, *variadicArgType = InvalidOid; /* default result */ *requiredResultType = InvalidOid; /* default result */ - inTypes = (Oid *) palloc(parameterCount * sizeof(Oid)); + sigArgTypes = (Oid *) palloc(parameterCount * sizeof(Oid)); allTypes = (Datum *) palloc(parameterCount * sizeof(Datum)); paramModes = (Datum *) palloc(parameterCount * sizeof(Datum)); paramNames = (Datum *) palloc0(parameterCount * sizeof(Datum)); @@ -281,25 +281,21 @@ interpret_function_parameter_list(ParseState *pstate, errmsg("functions cannot accept set arguments"))); } - if (objtype == OBJECT_PROCEDURE) - { - if (fp->mode == FUNC_PARAM_OUT) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("procedures cannot have OUT arguments"), - errhint("INOUT arguments are permitted."))); - } - /* handle input parameters */ if (fp->mode != FUNC_PARAM_OUT && fp->mode != FUNC_PARAM_TABLE) + isinput = true; + + /* handle signature parameters */ + if (fp->mode == FUNC_PARAM_IN || fp->mode == FUNC_PARAM_INOUT || + (objtype == OBJECT_PROCEDURE && fp->mode == FUNC_PARAM_OUT) || + fp->mode == FUNC_PARAM_VARIADIC) { - /* other input parameters can't follow a VARIADIC parameter */ + /* other signature parameters can't follow a VARIADIC parameter */ if (varCount > 0) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("VARIADIC parameter must be the last input parameter"))); - inTypes[inCount++] = toid; - isinput = true; + errmsg("VARIADIC parameter must be the last signature parameter"))); + sigArgTypes[sigArgCount++] = toid; } /* handle output parameters */ @@ -429,7 +425,7 @@ interpret_function_parameter_list(ParseState *pstate, } /* Now construct the proper outputs as needed */ - *parameterTypes = buildoidvector(inTypes, inCount); + *parameterTypes = buildoidvector(sigArgTypes, sigArgCount); if (outCount > 0 || varCount > 0) { @@ -2067,6 +2063,9 @@ ExecuteCallStmt(CallStmt *stmt, ParamListInfo params, bool atomic, DestReceiver int nargs; int i; AclResult aclresult; + Oid *argtypes; + char **argnames; + char *argmodes; FmgrInfo flinfo; CallContext *callcontext; EState *estate; @@ -2127,6 +2126,8 @@ ExecuteCallStmt(CallStmt *stmt, ParamListInfo params, bool atomic, DestReceiver tp); nargs = list_length(fexpr->args); + get_func_arg_info(tp, &argtypes, &argnames, &argmodes); + ReleaseSysCache(tp); /* safety check; see ExecInitFunc() */ @@ -2156,16 +2157,24 @@ ExecuteCallStmt(CallStmt *stmt, ParamListInfo params, bool atomic, DestReceiver i = 0; foreach(lc, fexpr->args) { - ExprState *exprstate; - Datum val; - bool isnull; + if (argmodes && argmodes[i] == PROARGMODE_OUT) + { + fcinfo->args[i].value = 0; + fcinfo->args[i].isnull = true; + } + else + { + ExprState *exprstate; + Datum val; + bool isnull; - exprstate = ExecPrepareExpr(lfirst(lc), estate); + exprstate = ExecPrepareExpr(lfirst(lc), estate); - val = ExecEvalExprSwitchContext(exprstate, econtext, &isnull); + val = ExecEvalExprSwitchContext(exprstate, econtext, &isnull); - fcinfo->args[i].value = val; - fcinfo->args[i].isnull = isnull; + fcinfo->args[i].value = val; + fcinfo->args[i].isnull = isnull; + } i++; } diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index f1b5f87e6a8cc..75552c64ed23c 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -1002,8 +1002,7 @@ DefineIndex(Oid relationId, key->partattrs[i] - 1); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("insufficient columns in %s constraint definition", - constraint_type), + errmsg("unique constraint on partitioned table must include all partitioning columns"), errdetail("%s constraint on table \"%s\" lacks column \"%s\" which is part of the partition key.", constraint_type, RelationGetRelationName(rel), NameStr(att->attname)))); @@ -3015,6 +3014,13 @@ ReindexRelationConcurrently(Oid relationOid, int options) char *relationName = NULL; char *relationNamespace = NULL; PGRUsage ru0; + const int progress_index[] = { + PROGRESS_CREATEIDX_COMMAND, + PROGRESS_CREATEIDX_PHASE, + PROGRESS_CREATEIDX_INDEX_OID, + PROGRESS_CREATEIDX_ACCESS_METHOD_OID + }; + int64 progress_vals[4]; /* * Create a memory context that will survive forced transaction commits we @@ -3294,12 +3300,11 @@ ReindexRelationConcurrently(Oid relationOid, int options) pgstat_progress_start_command(PROGRESS_COMMAND_CREATE_INDEX, RelationGetRelid(heapRel)); - pgstat_progress_update_param(PROGRESS_CREATEIDX_COMMAND, - PROGRESS_CREATEIDX_COMMAND_REINDEX_CONCURRENTLY); - pgstat_progress_update_param(PROGRESS_CREATEIDX_INDEX_OID, - indexId); - pgstat_progress_update_param(PROGRESS_CREATEIDX_ACCESS_METHOD_OID, - indexRel->rd_rel->relam); + progress_vals[0] = PROGRESS_CREATEIDX_COMMAND_REINDEX_CONCURRENTLY; + progress_vals[1] = 0; /* initializing */ + progress_vals[2] = indexId; + progress_vals[3] = indexRel->rd_rel->relam; + pgstat_progress_update_multi_param(4, progress_index, progress_vals); /* Choose a temporary relation name for the new index */ concurrentName = ChooseRelationName(get_rel_name(indexId), @@ -3403,12 +3408,12 @@ ReindexRelationConcurrently(Oid relationOid, int options) WaitForLockersMultiple(lockTags, ShareLock, true); CommitTransactionCommand(); - forboth(lc, indexIds, lc2, newIndexIds) + foreach(lc, newIndexIds) { - Relation indexRel; - Oid oldIndexId = lfirst_oid(lc); - Oid newIndexId = lfirst_oid(lc2); + Relation newIndexRel; + Oid newIndexId = lfirst_oid(lc); Oid heapId; + Oid indexam; /* Start new transaction for this index's concurrent build */ StartTransactionCommand(); @@ -3427,9 +3432,21 @@ ReindexRelationConcurrently(Oid relationOid, int options) * Index relation has been closed by previous commit, so reopen it to * get its information. */ - indexRel = index_open(oldIndexId, ShareUpdateExclusiveLock); - heapId = indexRel->rd_index->indrelid; - index_close(indexRel, NoLock); + newIndexRel = index_open(newIndexId, ShareUpdateExclusiveLock); + heapId = newIndexRel->rd_index->indrelid; + indexam = newIndexRel->rd_rel->relam; + index_close(newIndexRel, NoLock); + + /* + * Update progress for the index to build, with the correct parent + * table involved. + */ + pgstat_progress_start_command(PROGRESS_COMMAND_CREATE_INDEX, heapId); + progress_vals[0] = PROGRESS_CREATEIDX_COMMAND_REINDEX_CONCURRENTLY; + progress_vals[1] = PROGRESS_CREATEIDX_PHASE_BUILD; + progress_vals[2] = newIndexId; + progress_vals[3] = indexam; + pgstat_progress_update_multi_param(4, progress_index, progress_vals); /* Perform concurrent build of new index */ index_concurrently_build(heapId, newIndexId); @@ -3458,6 +3475,8 @@ ReindexRelationConcurrently(Oid relationOid, int options) Oid heapId; TransactionId limitXmin; Snapshot snapshot; + Relation newIndexRel; + Oid indexam; StartTransactionCommand(); @@ -3468,8 +3487,6 @@ ReindexRelationConcurrently(Oid relationOid, int options) */ CHECK_FOR_INTERRUPTS(); - heapId = IndexGetRelation(newIndexId, false); - /* * Take the "reference snapshot" that will be used by validate_index() * to filter candidate tuples. @@ -3477,6 +3494,26 @@ ReindexRelationConcurrently(Oid relationOid, int options) snapshot = RegisterSnapshot(GetTransactionSnapshot()); PushActiveSnapshot(snapshot); + /* + * Index relation has been closed by previous commit, so reopen it to + * get its information. + */ + newIndexRel = index_open(newIndexId, ShareUpdateExclusiveLock); + heapId = newIndexRel->rd_index->indrelid; + indexam = newIndexRel->rd_rel->relam; + index_close(newIndexRel, NoLock); + + /* + * Update progress for the index to build, with the correct parent + * table involved. + */ + pgstat_progress_start_command(PROGRESS_COMMAND_CREATE_INDEX, heapId); + progress_vals[0] = PROGRESS_CREATEIDX_COMMAND_REINDEX_CONCURRENTLY; + progress_vals[1] = PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN; + progress_vals[2] = newIndexId; + progress_vals[3] = indexam; + pgstat_progress_update_multi_param(4, progress_index, progress_vals); + validate_index(heapId, newIndexId, snapshot); /* @@ -3611,7 +3648,7 @@ ReindexRelationConcurrently(Oid relationOid, int options) */ pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE, - PROGRESS_CREATEIDX_PHASE_WAIT_4); + PROGRESS_CREATEIDX_PHASE_WAIT_5); WaitForLockersMultiple(lockTags, AccessExclusiveLock, true); PushActiveSnapshot(GetTransactionSnapshot()); diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index 28395d5946f3a..c46db7d11cb47 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -1212,14 +1212,14 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid, (OidIsValid(member->righttype) && member->righttype != typeoid)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("associated data types for opclass options parsing functions must match opclass input type"))); + errmsg("associated data types for operator class options parsing functions must match opclass input type"))); } else { if (member->lefttype != member->righttype) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("left and right associated data types for opclass options parsing functions must match"))); + errmsg("left and right associated data types for operator class options parsing functions must match"))); } if (procform->prorettype != VOIDOID || @@ -1227,8 +1227,8 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid, procform->proargtypes.values[0] != INTERNALOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("invalid opclass options parsing function"), - errhint("Valid signature of opclass options parsing function is '%s'.", + errmsg("invalid operator class options parsing function"), + errhint("Valid signature of operator class options parsing function is %s.", "(internal) RETURNS void"))); } diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index bf23937849c9f..a791e99092d5b 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -168,10 +168,22 @@ DefineOperator(List *names, List *parameters) if (typeName2) typeId2 = typenameTypeId(NULL, typeName2); + /* + * If only the right argument is missing, the user is likely trying to + * create a postfix operator, so give them a hint about why that does not + * work. But if both arguments are missing, do not mention postfix + * operators, as the user most likely simply neglected to mention the + * arguments. + */ if (!OidIsValid(typeId1) && !OidIsValid(typeId2)) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("at least one of leftarg or rightarg must be specified"))); + errmsg("operator argument types must be specified"))); + if (!OidIsValid(typeId2)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("operator right argument type must be specified"), + errdetail("Postfix operators are not supported."))); if (typeName1) { diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 4b4e4694930e2..d3f8e8f06c136 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -187,159 +187,139 @@ policy_role_list_to_array(List *roles, int *num_roles) /* * Load row security policy from the catalog, and store it in * the relation's relcache entry. + * + * Note that caller should have verified that pg_class.relrowsecurity + * is true for this relation. */ void RelationBuildRowSecurity(Relation relation) { MemoryContext rscxt; MemoryContext oldcxt = CurrentMemoryContext; - RowSecurityDesc *volatile rsdesc = NULL; + RowSecurityDesc *rsdesc; + Relation catalog; + ScanKeyData skey; + SysScanDesc sscan; + HeapTuple tuple; /* * Create a memory context to hold everything associated with this * relation's row security policy. This makes it easy to clean up during - * a relcache flush. + * a relcache flush. However, to cover the possibility of an error + * partway through, we don't make the context long-lived till we're done. */ - rscxt = AllocSetContextCreate(CacheMemoryContext, + rscxt = AllocSetContextCreate(CurrentMemoryContext, "row security descriptor", ALLOCSET_SMALL_SIZES); + MemoryContextCopyAndSetIdentifier(rscxt, + RelationGetRelationName(relation)); + + rsdesc = MemoryContextAllocZero(rscxt, sizeof(RowSecurityDesc)); + rsdesc->rscxt = rscxt; /* - * Since rscxt lives under CacheMemoryContext, it is long-lived. Use a - * PG_TRY block to ensure it'll get freed if we fail partway through. + * Now scan pg_policy for RLS policies associated with this relation. + * Because we use the index on (polrelid, polname), we should consistently + * visit the rel's policies in name order, at least when system indexes + * aren't disabled. This simplifies equalRSDesc(). */ - PG_TRY(); - { - Relation catalog; - ScanKeyData skey; - SysScanDesc sscan; - HeapTuple tuple; - - MemoryContextCopyAndSetIdentifier(rscxt, - RelationGetRelationName(relation)); + catalog = table_open(PolicyRelationId, AccessShareLock); - rsdesc = MemoryContextAllocZero(rscxt, sizeof(RowSecurityDesc)); - rsdesc->rscxt = rscxt; + ScanKeyInit(&skey, + Anum_pg_policy_polrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationGetRelid(relation))); - catalog = table_open(PolicyRelationId, AccessShareLock); + sscan = systable_beginscan(catalog, PolicyPolrelidPolnameIndexId, true, + NULL, 1, &skey); - ScanKeyInit(&skey, - Anum_pg_policy_polrelid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(RelationGetRelid(relation))); + while (HeapTupleIsValid(tuple = systable_getnext(sscan))) + { + Form_pg_policy policy_form = (Form_pg_policy) GETSTRUCT(tuple); + RowSecurityPolicy *policy; + Datum datum; + bool isnull; + char *str_value; - sscan = systable_beginscan(catalog, PolicyPolrelidPolnameIndexId, true, - NULL, 1, &skey); + policy = MemoryContextAllocZero(rscxt, sizeof(RowSecurityPolicy)); /* - * Loop through the row level security policies for this relation, if - * any. + * Note: we must be sure that pass-by-reference data gets copied into + * rscxt. We avoid making that context current over wider spans than + * we have to, though. */ - while (HeapTupleIsValid(tuple = systable_getnext(sscan))) - { - Datum value_datum; - char cmd_value; - bool permissive_value; - Datum roles_datum; - char *qual_value; - Expr *qual_expr; - char *with_check_value; - Expr *with_check_qual; - char *policy_name_value; - bool isnull; - RowSecurityPolicy *policy; - - /* - * Note: all the pass-by-reference data we collect here is either - * still stored in the tuple, or constructed in the caller's - * short-lived memory context. We must copy it into rscxt - * explicitly below. - */ - - /* Get policy command */ - value_datum = heap_getattr(tuple, Anum_pg_policy_polcmd, - RelationGetDescr(catalog), &isnull); - Assert(!isnull); - cmd_value = DatumGetChar(value_datum); - - /* Get policy permissive or restrictive */ - value_datum = heap_getattr(tuple, Anum_pg_policy_polpermissive, - RelationGetDescr(catalog), &isnull); - Assert(!isnull); - permissive_value = DatumGetBool(value_datum); - - /* Get policy name */ - value_datum = heap_getattr(tuple, Anum_pg_policy_polname, - RelationGetDescr(catalog), &isnull); - Assert(!isnull); - policy_name_value = NameStr(*(DatumGetName(value_datum))); - - /* Get policy roles */ - roles_datum = heap_getattr(tuple, Anum_pg_policy_polroles, - RelationGetDescr(catalog), &isnull); - /* shouldn't be null, but initdb doesn't mark it so, so check */ - if (isnull) - elog(ERROR, "unexpected null value in pg_policy.polroles"); - - /* Get policy qual */ - value_datum = heap_getattr(tuple, Anum_pg_policy_polqual, - RelationGetDescr(catalog), &isnull); - if (!isnull) - { - qual_value = TextDatumGetCString(value_datum); - qual_expr = (Expr *) stringToNode(qual_value); - } - else - qual_expr = NULL; - /* Get WITH CHECK qual */ - value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck, - RelationGetDescr(catalog), &isnull); - if (!isnull) - { - with_check_value = TextDatumGetCString(value_datum); - with_check_qual = (Expr *) stringToNode(with_check_value); - } - else - with_check_qual = NULL; + /* Get policy command */ + policy->polcmd = policy_form->polcmd; - /* Now copy everything into the cache context */ - MemoryContextSwitchTo(rscxt); + /* Get policy, permissive or restrictive */ + policy->permissive = policy_form->polpermissive; - policy = palloc0(sizeof(RowSecurityPolicy)); - policy->policy_name = pstrdup(policy_name_value); - policy->polcmd = cmd_value; - policy->permissive = permissive_value; - policy->roles = DatumGetArrayTypePCopy(roles_datum); - policy->qual = copyObject(qual_expr); - policy->with_check_qual = copyObject(with_check_qual); - policy->hassublinks = checkExprHasSubLink((Node *) qual_expr) || - checkExprHasSubLink((Node *) with_check_qual); + /* Get policy name */ + policy->policy_name = + MemoryContextStrdup(rscxt, NameStr(policy_form->polname)); - rsdesc->policies = lcons(policy, rsdesc->policies); + /* Get policy roles */ + datum = heap_getattr(tuple, Anum_pg_policy_polroles, + RelationGetDescr(catalog), &isnull); + /* shouldn't be null, but let's check for luck */ + if (isnull) + elog(ERROR, "unexpected null value in pg_policy.polroles"); + MemoryContextSwitchTo(rscxt); + policy->roles = DatumGetArrayTypePCopy(datum); + MemoryContextSwitchTo(oldcxt); + /* Get policy qual */ + datum = heap_getattr(tuple, Anum_pg_policy_polqual, + RelationGetDescr(catalog), &isnull); + if (!isnull) + { + str_value = TextDatumGetCString(datum); + MemoryContextSwitchTo(rscxt); + policy->qual = (Expr *) stringToNode(str_value); MemoryContextSwitchTo(oldcxt); + pfree(str_value); + } + else + policy->qual = NULL; - /* clean up some (not all) of the junk ... */ - if (qual_expr != NULL) - pfree(qual_expr); - if (with_check_qual != NULL) - pfree(with_check_qual); + /* Get WITH CHECK qual */ + datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck, + RelationGetDescr(catalog), &isnull); + if (!isnull) + { + str_value = TextDatumGetCString(datum); + MemoryContextSwitchTo(rscxt); + policy->with_check_qual = (Expr *) stringToNode(str_value); + MemoryContextSwitchTo(oldcxt); + pfree(str_value); } + else + policy->with_check_qual = NULL; - systable_endscan(sscan); - table_close(catalog, AccessShareLock); - } - PG_CATCH(); - { - /* Delete rscxt, first making sure it isn't active */ + /* We want to cache whether there are SubLinks in these expressions */ + policy->hassublinks = checkExprHasSubLink((Node *) policy->qual) || + checkExprHasSubLink((Node *) policy->with_check_qual); + + /* + * Add this object to list. For historical reasons, the list is built + * in reverse order. + */ + MemoryContextSwitchTo(rscxt); + rsdesc->policies = lcons(policy, rsdesc->policies); MemoryContextSwitchTo(oldcxt); - MemoryContextDelete(rscxt); - PG_RE_THROW(); } - PG_END_TRY(); - /* Success --- attach the policy descriptor to the relcache entry */ + systable_endscan(sscan); + table_close(catalog, AccessShareLock); + + /* + * Success. Reparent the descriptor's memory context under + * CacheMemoryContext so that it will live indefinitely, then attach the + * policy descriptor to the relcache entry. + */ + MemoryContextSetParent(rscxt, CacheMemoryContext); + relation->rd_rsdesc = rsdesc; } diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 6aab73bfd447b..632b34af61005 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -1668,7 +1668,7 @@ process_owned_by(Relation seqrel, List *owned_by, bool for_identity) /* Separate relname and attr name */ relname = list_truncate(list_copy(owned_by), nnames - 1); - attrname = strVal(lfirst(list_tail(owned_by))); + attrname = strVal(llast(owned_by)); /* Open and lock rel to ensure it won't go away meanwhile */ rel = makeRangeVarFromNameList(relname); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 3e57c7f9e1dbe..a29c14bf1cf0a 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -543,7 +543,8 @@ static void ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partPa static void CreateInheritance(Relation child_rel, Relation parent_rel); static void RemoveInheritance(Relation child_rel, Relation parent_rel); static ObjectAddress ATExecAttachPartition(List **wqueue, Relation rel, - PartitionCmd *cmd); + PartitionCmd *cmd, + AlterTableUtilityContext *context); static void AttachPartitionEnsureIndexes(Relation rel, Relation attachrel); static void QueuePartitionConstraintValidation(List **wqueue, Relation scanrel, List *partConstraint, @@ -1007,7 +1008,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId, * Check first that the new partition's bound is valid and does not * overlap with any of existing partitions of the parent. */ - check_new_partition_bound(relname, parent, bound); + check_new_partition_bound(relname, parent, bound, pstate); /* * If the default partition exists, its partition constraints will @@ -1786,6 +1787,11 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, /* * To fire triggers, we'll need an EState as well as a ResultRelInfo for * each relation. We don't need to call ExecOpenIndices, though. + * + * We put the ResultRelInfos in the es_opened_result_relations list, even + * though we don't have a range table and don't populate the + * es_result_relations array. That's a big bogus, but it's enough to make + * ExecGetTriggerResultRel() find them. */ estate = CreateExecutorState(); resultRelInfos = (ResultRelInfo *) @@ -1800,10 +1806,10 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, 0, /* dummy rangetable index */ NULL, 0); + estate->es_opened_result_relations = + lappend(estate->es_opened_result_relations, resultRelInfo); resultRelInfo++; } - estate->es_result_relations = resultRelInfos; - estate->es_num_result_relations = list_length(rels); /* * Process all BEFORE STATEMENT TRUNCATE triggers before we begin @@ -1814,7 +1820,6 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, resultRelInfo = resultRelInfos; foreach(cell, rels) { - estate->es_result_relation_info = resultRelInfo; ExecBSTruncateTriggers(estate, resultRelInfo); resultRelInfo++; } @@ -1944,7 +1949,6 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged, resultRelInfo = resultRelInfos; foreach(cell, rels) { - estate->es_result_relation_info = resultRelInfo; ExecASTruncateTriggers(estate, resultRelInfo); resultRelInfo++; } @@ -4317,6 +4321,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, case AT_DisableTrigAll: case AT_DisableTrigUser: ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE); + if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode, context); pass = AT_PASS_MISC; break; case AT_EnableRule: /* ENABLE/DISABLE RULE variants */ @@ -4718,7 +4724,8 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, cur_pass, context); Assert(cmd != NULL); if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) - ATExecAttachPartition(wqueue, rel, (PartitionCmd *) cmd->def); + ATExecAttachPartition(wqueue, rel, (PartitionCmd *) cmd->def, + context); else ATExecAttachPartitionIdx(wqueue, rel, ((PartitionCmd *) cmd->def)->name); @@ -5681,14 +5688,10 @@ ATSimpleRecursion(List **wqueue, Relation rel, AlterTableUtilityContext *context) { /* - * Propagate to children if desired. Only plain tables, foreign tables - * and partitioned tables have children, so no need to search for other - * relkinds. + * Propagate to children, if desired and if there are (or might be) any + * children. */ - if (recurse && - (rel->rd_rel->relkind == RELKIND_RELATION || - rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE || - rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)) + if (recurse && rel->rd_rel->relhassubclass) { Oid relid = RelationGetRelid(rel); ListCell *child; @@ -5734,7 +5737,7 @@ ATCheckPartitionsNotInUse(Relation rel, LOCKMODE lockmode) inh = find_all_inheritors(RelationGetRelid(rel), lockmode, NULL); /* first element is the parent rel; must ignore it */ - for_each_cell(cell, inh, list_second_cell(inh)) + for_each_from(cell, inh, 1) { Relation childrel; @@ -6698,6 +6701,41 @@ ATPrepSetNotNull(List **wqueue, Relation rel, if (recursing) return; + /* + * If the target column is already marked NOT NULL, we can skip recursing + * to children, because their columns should already be marked NOT NULL as + * well. But there's no point in checking here unless the relation has + * some children; else we can just wait till execution to check. (If it + * does have children, however, this can save taking per-child locks + * unnecessarily. This greatly improves concurrency in some parallel + * restore scenarios.) + * + * Unfortunately, we can only apply this optimization to partitioned + * tables, because traditional inheritance doesn't enforce that child + * columns be NOT NULL when their parent is. (That's a bug that should + * get fixed someday.) + */ + if (rel->rd_rel->relhassubclass && + rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) + { + HeapTuple tuple; + bool attnotnull; + + tuple = SearchSysCacheAttName(RelationGetRelid(rel), cmd->name); + + /* Might as well throw the error now, if name is bad */ + if (!HeapTupleIsValid(tuple)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" of relation \"%s\" does not exist", + cmd->name, RelationGetRelationName(rel)))); + + attnotnull = ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull; + ReleaseSysCache(tuple); + if (attnotnull) + return; + } + /* * If we have ALTER TABLE ONLY ... SET NOT NULL on a partitioned table, * apply ALTER TABLE ... CHECK NOT NULL to every child. Otherwise, use @@ -6855,7 +6893,7 @@ NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr) if (ConstraintImpliedByRelConstraint(rel, list_make1(nnulltest), NIL)) { ereport(DEBUG1, - (errmsg("existing constraints on column \"%s\".\"%s\" are sufficient to prove that it does not contain nulls", + (errmsg("existing constraints on column \"%s.%s\" are sufficient to prove that it does not contain nulls", RelationGetRelationName(rel), NameStr(attr->attname)))); return true; } @@ -16249,7 +16287,8 @@ QueuePartitionConstraintValidation(List **wqueue, Relation scanrel, * Return the address of the newly attached partition. */ static ObjectAddress -ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) +ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd, + AlterTableUtilityContext *context) { Relation attachrel, catalog; @@ -16264,6 +16303,9 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) const char *trigger_name; Oid defaultPartOid; List *partBoundConstraint; + ParseState *pstate = make_parsestate(NULL); + + pstate->p_sourcetext = context->queryString; /* * We must lock the default partition if one exists, because attaching a @@ -16429,7 +16471,7 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd) * error. */ check_new_partition_bound(RelationGetRelationName(attachrel), rel, - cmd->bound); + cmd->bound, pstate); /* OK to create inheritance. Rest of the checks performed there */ CreateInheritance(attachrel, rel); diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 672fccff5bd15..28b98d10ae85b 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -35,6 +35,7 @@ #include "commands/defrem.h" #include "commands/trigger.h" #include "executor/executor.h" +#include "executor/execPartition.h" #include "miscadmin.h" #include "nodes/bitmapset.h" #include "nodes/makefuncs.h" @@ -1530,27 +1531,6 @@ EnableDisableTrigger(Relation rel, const char *tgname, heap_freetuple(newtup); - /* - * When altering FOR EACH ROW triggers on a partitioned table, do - * the same on the partitions as well. - */ - if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE && - (TRIGGER_FOR_ROW(oldtrig->tgtype))) - { - PartitionDesc partdesc = RelationGetPartitionDesc(rel); - int i; - - for (i = 0; i < partdesc->nparts; i++) - { - Relation part; - - part = relation_open(partdesc->oids[i], lockmode); - EnableDisableTrigger(part, NameStr(oldtrig->tgname), - fires_when, skip_system, lockmode); - table_close(part, NoLock); /* keep lock till commit */ - } - } - changed = true; } @@ -4227,7 +4207,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, if (local_estate) { - ExecCleanUpTriggerState(estate); + ExecCloseResultRelations(estate); ExecResetTupleTable(estate->es_tupleTable, false); FreeExecutorState(estate); } @@ -4292,9 +4272,10 @@ GetAfterTriggersTableData(Oid relid, CmdType cmdType) * If there are no triggers in 'trigdesc' that request relevant transition * tables, then return NULL. * - * The resulting object can be passed to the ExecAR* functions. The caller - * should set tcs_map or tcs_original_insert_tuple as appropriate when dealing - * with child tables. + * The resulting object can be passed to the ExecAR* functions. When + * dealing with child tables, the caller can set tcs_original_insert_tuple + * to avoid having to reconstruct the original tuple in the root table's + * format. * * Note that we copy the flags from a parent table into this struct (rather * than subsequently using the relation's TriggerDesc directly) so that we can @@ -5389,7 +5370,7 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, if (row_trigger && transition_capture != NULL) { TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple; - TupleConversionMap *map = transition_capture->tcs_map; + TupleConversionMap *map = relinfo->ri_ChildToRootMap; bool delete_old_table = transition_capture->tcs_delete_old_table; bool update_old_table = transition_capture->tcs_update_old_table; bool update_new_table = transition_capture->tcs_update_new_table; diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index 236413f62aaff..868f8b0858f87 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -1104,23 +1104,6 @@ ExecInitExprRec(Expr *node, ExprState *state, break; } - case T_AlternativeSubPlan: - { - AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; - AlternativeSubPlanState *asstate; - - if (!state->parent) - elog(ERROR, "AlternativeSubPlan found with no parent plan"); - - asstate = ExecInitAlternativeSubPlan(asplan, state->parent); - - scratch.opcode = EEOP_ALTERNATIVE_SUBPLAN; - scratch.d.alternative_subplan.asstate = asstate; - - ExprEvalPushStep(state, &scratch); - break; - } - case T_FieldSelect: { FieldSelect *fselect = (FieldSelect *) node; diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index b812bbaceef8c..26c2b4963215e 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -431,7 +431,6 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) &&CASE_EEOP_GROUPING_FUNC, &&CASE_EEOP_WINDOW_FUNC, &&CASE_EEOP_SUBPLAN, - &&CASE_EEOP_ALTERNATIVE_SUBPLAN, &&CASE_EEOP_AGG_STRICT_DESERIALIZE, &&CASE_EEOP_AGG_DESERIALIZE, &&CASE_EEOP_AGG_STRICT_INPUT_CHECK_ARGS, @@ -1536,14 +1535,6 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) EEO_NEXT(); } - EEO_CASE(EEOP_ALTERNATIVE_SUBPLAN) - { - /* too complex for an inline implementation */ - ExecEvalAlternativeSubPlan(state, op, econtext); - - EEO_NEXT(); - } - /* evaluate a strict aggregate deserialization function */ EEO_CASE(EEOP_AGG_STRICT_DESERIALIZE) { @@ -3868,20 +3859,6 @@ ExecEvalSubPlan(ExprState *state, ExprEvalStep *op, ExprContext *econtext) *op->resvalue = ExecSubPlan(sstate, econtext, op->resnull); } -/* - * Hand off evaluation of an alternative subplan to nodeSubplan.c - */ -void -ExecEvalAlternativeSubPlan(ExprState *state, ExprEvalStep *op, ExprContext *econtext) -{ - AlternativeSubPlanState *asstate = op->d.alternative_subplan.asstate; - - /* could potentially be nested, so make sure there's enough stack */ - check_stack_depth(); - - *op->resvalue = ExecAlternativeSubPlan(asstate, econtext, op->resnull); -} - /* * Evaluate a wholerow Var expression. * diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index 1862af621be0d..c6b5bcba7b47f 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -270,7 +270,8 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo) * ---------------------------------------------------------------- */ List * -ExecInsertIndexTuples(TupleTableSlot *slot, +ExecInsertIndexTuples(ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, EState *estate, bool noDupErr, bool *specConflict, @@ -278,7 +279,6 @@ ExecInsertIndexTuples(TupleTableSlot *slot, { ItemPointer tupleid = &slot->tts_tid; List *result = NIL; - ResultRelInfo *resultRelInfo; int i; int numIndices; RelationPtr relationDescs; @@ -293,7 +293,6 @@ ExecInsertIndexTuples(TupleTableSlot *slot, /* * Get information from the result relation info structure. */ - resultRelInfo = estate->es_result_relation_info; numIndices = resultRelInfo->ri_NumIndices; relationDescs = resultRelInfo->ri_IndexRelationDescs; indexInfoArray = resultRelInfo->ri_IndexRelationInfo; @@ -479,11 +478,10 @@ ExecInsertIndexTuples(TupleTableSlot *slot, * ---------------------------------------------------------------- */ bool -ExecCheckIndexConstraints(TupleTableSlot *slot, +ExecCheckIndexConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate, ItemPointer conflictTid, List *arbiterIndexes) { - ResultRelInfo *resultRelInfo; int i; int numIndices; RelationPtr relationDescs; @@ -501,7 +499,6 @@ ExecCheckIndexConstraints(TupleTableSlot *slot, /* * Get information from the result relation info structure. */ - resultRelInfo = estate->es_result_relation_info; numIndices = resultRelInfo->ri_NumIndices; relationDescs = resultRelInfo->ri_IndexRelationDescs; indexInfoArray = resultRelInfo->ri_IndexRelationInfo; diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 4fdffad6f35df..7179f589f9491 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -827,87 +827,6 @@ InitPlan(QueryDesc *queryDesc, int eflags) estate->es_plannedstmt = plannedstmt; - /* - * Initialize ResultRelInfo data structures, and open the result rels. - */ - if (plannedstmt->resultRelations) - { - List *resultRelations = plannedstmt->resultRelations; - int numResultRelations = list_length(resultRelations); - ResultRelInfo *resultRelInfos; - ResultRelInfo *resultRelInfo; - - resultRelInfos = (ResultRelInfo *) - palloc(numResultRelations * sizeof(ResultRelInfo)); - resultRelInfo = resultRelInfos; - foreach(l, resultRelations) - { - Index resultRelationIndex = lfirst_int(l); - Relation resultRelation; - - resultRelation = ExecGetRangeTableRelation(estate, - resultRelationIndex); - InitResultRelInfo(resultRelInfo, - resultRelation, - resultRelationIndex, - NULL, - estate->es_instrument); - resultRelInfo++; - } - estate->es_result_relations = resultRelInfos; - estate->es_num_result_relations = numResultRelations; - - /* es_result_relation_info is NULL except when within ModifyTable */ - estate->es_result_relation_info = NULL; - - /* - * In the partitioned result relation case, also build ResultRelInfos - * for all the partitioned table roots, because we will need them to - * fire statement-level triggers, if any. - */ - if (plannedstmt->rootResultRelations) - { - int num_roots = list_length(plannedstmt->rootResultRelations); - - resultRelInfos = (ResultRelInfo *) - palloc(num_roots * sizeof(ResultRelInfo)); - resultRelInfo = resultRelInfos; - foreach(l, plannedstmt->rootResultRelations) - { - Index resultRelIndex = lfirst_int(l); - Relation resultRelDesc; - - resultRelDesc = ExecGetRangeTableRelation(estate, - resultRelIndex); - InitResultRelInfo(resultRelInfo, - resultRelDesc, - resultRelIndex, - NULL, - estate->es_instrument); - resultRelInfo++; - } - - estate->es_root_result_relations = resultRelInfos; - estate->es_num_root_result_relations = num_roots; - } - else - { - estate->es_root_result_relations = NULL; - estate->es_num_root_result_relations = 0; - } - } - else - { - /* - * if no result relation, then set state appropriately - */ - estate->es_result_relations = NULL; - estate->es_num_result_relations = 0; - estate->es_result_relation_info = NULL; - estate->es_root_result_relations = NULL; - estate->es_num_root_result_relations = 0; - } - /* * Next, build the ExecRowMark array from the PlanRowMark(s), if any. */ @@ -1280,8 +1199,6 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation partition_root, int instrument_options) { - List *partition_check = NIL; - MemSet(resultRelInfo, 0, sizeof(ResultRelInfo)); resultRelInfo->type = T_ResultRelInfo; resultRelInfo->ri_RangeTableIndex = resultRelationIndex; @@ -1325,25 +1242,11 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, resultRelInfo->ri_ReturningSlot = NULL; resultRelInfo->ri_TrigOldSlot = NULL; resultRelInfo->ri_TrigNewSlot = NULL; - - /* - * Partition constraint, which also includes the partition constraint of - * all the ancestors that are partitions. Note that it will be checked - * even in the case of tuple-routing where this table is the target leaf - * partition, if there any BR triggers defined on the table. Although - * tuple-routing implicitly preserves the partition constraint of the - * target partition for a given row, the BR triggers may change the row - * such that the constraint is no longer satisfied, which we must fail for - * by checking it explicitly. - * - * If this is a partitioned table, the partition constraint (if any) of a - * given row will be checked just before performing tuple-routing. - */ - partition_check = RelationGetPartitionQual(resultRelationDesc); - - resultRelInfo->ri_PartitionCheck = partition_check; resultRelInfo->ri_PartitionRoot = partition_root; - resultRelInfo->ri_PartitionInfo = NULL; /* may be set later */ + resultRelInfo->ri_RootToPartitionMap = NULL; /* set by + * ExecInitRoutingInfo */ + resultRelInfo->ri_PartitionTupleSlot = NULL; /* ditto */ + resultRelInfo->ri_ChildToRootMap = NULL; resultRelInfo->ri_CopyMultiInsertBuffer = NULL; } @@ -1353,8 +1256,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, * * Most of the time, triggers are fired on one of the result relations of the * query, and so we can just return a member of the es_result_relations array, - * or the es_root_result_relations array (if any), or the - * es_tuple_routing_result_relations list (if any). (Note: in self-join + * or the es_tuple_routing_result_relations list (if any). (Note: in self-join * situations there might be multiple members with the same OID; if so it * doesn't matter which one we pick.) * @@ -1371,30 +1273,16 @@ ResultRelInfo * ExecGetTriggerResultRel(EState *estate, Oid relid) { ResultRelInfo *rInfo; - int nr; ListCell *l; Relation rel; MemoryContext oldcontext; /* First, search through the query result relations */ - rInfo = estate->es_result_relations; - nr = estate->es_num_result_relations; - while (nr > 0) + foreach(l, estate->es_opened_result_relations) { + rInfo = lfirst(l); if (RelationGetRelid(rInfo->ri_RelationDesc) == relid) return rInfo; - rInfo++; - nr--; - } - /* Second, search through the root result relations, if any */ - rInfo = estate->es_root_result_relations; - nr = estate->es_num_root_result_relations; - while (nr > 0) - { - if (RelationGetRelid(rInfo->ri_RelationDesc) == relid) - return rInfo; - rInfo++; - nr--; } /* @@ -1447,35 +1335,6 @@ ExecGetTriggerResultRel(EState *estate, Oid relid) return rInfo; } -/* - * Close any relations that have been opened by ExecGetTriggerResultRel(). - */ -void -ExecCleanUpTriggerState(EState *estate) -{ - ListCell *l; - - foreach(l, estate->es_trig_target_relations) - { - ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l); - - /* - * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we - * might be issuing a duplicate close against a Relation opened by - * ExecGetRangeTableRelation. - */ - Assert(resultRelInfo->ri_RangeTableIndex == 0); - - /* - * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for - * these rels, we needn't call ExecCloseIndices either. - */ - Assert(resultRelInfo->ri_NumIndices == 0); - - table_close(resultRelInfo->ri_RelationDesc, NoLock); - } -} - /* ---------------------------------------------------------------- * ExecPostprocessPlan * @@ -1531,9 +1390,6 @@ ExecPostprocessPlan(EState *estate) static void ExecEndPlan(PlanState *planstate, EState *estate) { - ResultRelInfo *resultRelInfo; - Index num_relations; - Index i; ListCell *l; /* @@ -1560,29 +1416,69 @@ ExecEndPlan(PlanState *planstate, EState *estate) ExecResetTupleTable(estate->es_tupleTable, false); /* - * close indexes of result relation(s) if any. (Rels themselves get - * closed next.) + * Close any Relations that have been opened for range table entries or + * result relations. + */ + ExecCloseResultRelations(estate); + ExecCloseRangeTableRelations(estate); +} + +/* + * Close any relations that have been opened for ResultRelInfos. + */ +void +ExecCloseResultRelations(EState *estate) +{ + ListCell *l; + + /* + * close indexes of result relation(s) if any. (Rels themselves are + * closed in ExecCloseRangeTableRelations()) */ - resultRelInfo = estate->es_result_relations; - for (i = estate->es_num_result_relations; i > 0; i--) + foreach(l, estate->es_opened_result_relations) { + ResultRelInfo *resultRelInfo = lfirst(l); + ExecCloseIndices(resultRelInfo); - resultRelInfo++; } - /* - * close whatever rangetable Relations have been opened. We do not - * release any locks we might hold on those rels. - */ - num_relations = estate->es_range_table_size; - for (i = 0; i < num_relations; i++) + /* Close any relations that have been opened by ExecGetTriggerResultRel(). */ + foreach(l, estate->es_trig_target_relations) + { + ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l); + + /* + * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we + * might be issuing a duplicate close against a Relation opened by + * ExecGetRangeTableRelation. + */ + Assert(resultRelInfo->ri_RangeTableIndex == 0); + + /* + * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for + * these rels, we needn't call ExecCloseIndices either. + */ + Assert(resultRelInfo->ri_NumIndices == 0); + + table_close(resultRelInfo->ri_RelationDesc, NoLock); + } +} + +/* + * Close all relations opened by ExecGetRangeTableRelation(). + * + * We do not release any locks we might hold on those rels. + */ +void +ExecCloseRangeTableRelations(EState *estate) +{ + int i; + + for (i = 0; i < estate->es_range_table_size; i++) { if (estate->es_relations[i]) table_close(estate->es_relations[i], NoLock); } - - /* likewise close any trigger target relations */ - ExecCleanUpTriggerState(estate); } /* ---------------------------------------------------------------- @@ -1776,7 +1672,7 @@ ExecRelCheck(ResultRelInfo *resultRelInfo, * ExecPartitionCheck --- check that tuple meets the partition constraint. * * Returns true if it meets the partition constraint. If the constraint - * fails and we're asked to emit to error, do so and don't return; otherwise + * fails and we're asked to emit an error, do so and don't return; otherwise * return false. */ bool @@ -1788,14 +1684,22 @@ ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, /* * If first time through, build expression state tree for the partition - * check expression. Keep it in the per-query memory context so they'll - * survive throughout the query. + * check expression. (In the corner case where the partition check + * expression is empty, ie there's a default partition and nothing else, + * we'll be fooled into executing this code each time through. But it's + * pretty darn cheap in that case, so we don't worry about it.) */ if (resultRelInfo->ri_PartitionCheckExpr == NULL) { - List *qual = resultRelInfo->ri_PartitionCheck; + /* + * Ensure that the qual tree and prepared expression are in the + * query-lifespan context. + */ + MemoryContext oldcxt = MemoryContextSwitchTo(estate->es_query_cxt); + List *qual = RelationGetPartitionQual(resultRelInfo->ri_RelationDesc); resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate); + MemoryContextSwitchTo(oldcxt); } /* @@ -1904,9 +1808,9 @@ ExecConstraints(ResultRelInfo *resultRelInfo, Bitmapset *insertedCols; Bitmapset *updatedCols; - Assert(constr || resultRelInfo->ri_PartitionCheck); + Assert(constr); /* we should not be called otherwise */ - if (constr && constr->has_not_null) + if (constr->has_not_null) { int natts = tupdesc->natts; int attrChk; @@ -1967,7 +1871,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo, } } - if (constr && constr->num_check > 0) + if (constr->num_check > 0) { const char *failed; @@ -2769,17 +2673,9 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) /* * Child EPQ EStates share the parent's copy of unchanging state such as - * the snapshot, rangetable, result-rel info, and external Param info. - * They need their own copies of local state, including a tuple table, - * es_param_exec_vals, etc. - * - * The ResultRelInfo array management is trickier than it looks. We - * create fresh arrays for the child but copy all the content from the - * parent. This is because it's okay for the child to share any - * per-relation state the parent has already created --- but if the child - * sets up any ResultRelInfo fields, such as its own junkfilter, that - * state must *not* propagate back to the parent. (For one thing, the - * pointed-to data is in a memory context that won't last long enough.) + * the snapshot, rangetable, and external Param info. They need their own + * copies of local state, including a tuple table, es_param_exec_vals, + * result-rel info, etc. */ rcestate->es_direction = ForwardScanDirection; rcestate->es_snapshot = parentestate->es_snapshot; @@ -2792,31 +2688,12 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) rcestate->es_plannedstmt = parentestate->es_plannedstmt; rcestate->es_junkFilter = parentestate->es_junkFilter; rcestate->es_output_cid = parentestate->es_output_cid; - if (parentestate->es_num_result_relations > 0) - { - int numResultRelations = parentestate->es_num_result_relations; - int numRootResultRels = parentestate->es_num_root_result_relations; - ResultRelInfo *resultRelInfos; - - resultRelInfos = (ResultRelInfo *) - palloc(numResultRelations * sizeof(ResultRelInfo)); - memcpy(resultRelInfos, parentestate->es_result_relations, - numResultRelations * sizeof(ResultRelInfo)); - rcestate->es_result_relations = resultRelInfos; - rcestate->es_num_result_relations = numResultRelations; - - /* Also transfer partitioned root result relations. */ - if (numRootResultRels > 0) - { - resultRelInfos = (ResultRelInfo *) - palloc(numRootResultRels * sizeof(ResultRelInfo)); - memcpy(resultRelInfos, parentestate->es_root_result_relations, - numRootResultRels * sizeof(ResultRelInfo)); - rcestate->es_root_result_relations = resultRelInfos; - rcestate->es_num_root_result_relations = numRootResultRels; - } - } - /* es_result_relation_info must NOT be copied */ + + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. + */ + rcestate->es_result_relations = NULL; /* es_trig_target_relations must NOT be copied */ rcestate->es_top_eflags = parentestate->es_top_eflags; rcestate->es_instrument = parentestate->es_instrument; @@ -2925,8 +2802,9 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) * This is a cut-down version of ExecutorEnd(); basically we want to do most * of the normal cleanup, but *not* close result relations (which we are * just sharing from the outer query). We do, however, have to close any - * trigger target relations that got opened, since those are not shared. - * (There probably shouldn't be any of the latter, but just in case...) + * result and trigger target relations that got opened, since those are not + * shared. (There probably shouldn't be any of the latter, but just in + * case...) */ void EvalPlanQualEnd(EPQState *epqstate) @@ -2968,8 +2846,8 @@ EvalPlanQualEnd(EPQState *epqstate) /* throw away the per-estate tuple table, some node may have used it */ ExecResetTupleTable(estate->es_tupleTable, false); - /* close any trigger target relations attached to this EState */ - ExecCleanUpTriggerState(estate); + /* Close any result and trigger target relations attached to this EState */ + ExecCloseResultRelations(estate); MemoryContextSwitchTo(oldcontext); diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 382e78fb7fed2..befde526910ae 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -184,7 +184,6 @@ ExecSerializePlan(Plan *plan, EState *estate) pstmt->planTree = plan; pstmt->rtable = estate->es_range_table; pstmt->resultRelations = NIL; - pstmt->rootResultRelations = NIL; pstmt->appendRelations = NIL; /* diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c index bd2ea2580475f..86594bd0565b6 100644 --- a/src/backend/executor/execPartition.c +++ b/src/backend/executor/execPartition.c @@ -261,7 +261,7 @@ ExecSetupPartitionTupleRouting(EState *estate, ModifyTableState *mtstate, * If the partition's ResultRelInfo does not yet exist in 'proute' then we set * one up or reuse one from mtstate's resultRelInfo array. When reusing a * ResultRelInfo from the mtstate we verify that the relation is a valid - * target for INSERTs and then set up a PartitionRoutingInfo for it. + * target for INSERTs and initialize tuple routing information. * * rootResultRelInfo is the relation named in the query. * @@ -299,7 +299,7 @@ ExecFindPartition(ModifyTableState *mtstate, * First check the root table's partition constraint, if any. No point in * routing the tuple if it doesn't belong in the root table itself. */ - if (rootResultRelInfo->ri_PartitionCheck) + if (rootResultRelInfo->ri_RelationDesc->rd_rel->relispartition) ExecPartitionCheck(rootResultRelInfo, slot, estate, true); /* start with the root partitioned table */ @@ -307,6 +307,7 @@ ExecFindPartition(ModifyTableState *mtstate, while (dispatch != NULL) { int partidx = -1; + bool is_leaf; CHECK_FOR_INTERRUPTS(); @@ -346,8 +347,10 @@ ExecFindPartition(ModifyTableState *mtstate, errtable(rel))); } - if (partdesc->is_leaf[partidx]) + is_leaf = partdesc->is_leaf[partidx]; + if (is_leaf) { + /* * We've reached the leaf -- hurray, we're done. Look to see if * we've already got a ResultRelInfo for this partition. @@ -382,7 +385,10 @@ ExecFindPartition(ModifyTableState *mtstate, /* Verify this ResultRelInfo allows INSERTs */ CheckValidResultRel(rri, CMD_INSERT); - /* Set up the PartitionRoutingInfo for it */ + /* + * Initialize information needed to insert this and + * subsequent tuples routed to this partition. + */ ExecInitRoutingInfo(mtstate, estate, proute, dispatch, rri, partidx); } @@ -464,8 +470,6 @@ ExecFindPartition(ModifyTableState *mtstate, */ if (partidx == partdesc->boundinfo->default_index) { - PartitionRoutingInfo *partrouteinfo = rri->ri_PartitionInfo; - /* * The tuple must match the partition's layout for the constraint * expression to be evaluated successfully. If the partition is @@ -478,13 +482,13 @@ ExecFindPartition(ModifyTableState *mtstate, * So if we have to convert, do it from the root slot; if not, use * the root slot as-is. */ - if (partrouteinfo) + if (is_leaf) { - TupleConversionMap *map = partrouteinfo->pi_RootToPartitionMap; + TupleConversionMap *map = rri->ri_RootToPartitionMap; if (map) slot = execute_attr_map_slot(map->attrMap, rootslot, - partrouteinfo->pi_PartitionTupleSlot); + rri->ri_PartitionTupleSlot); else slot = rootslot; } @@ -788,7 +792,7 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, { TupleConversionMap *map; - map = leaf_part_rri->ri_PartitionInfo->pi_RootToPartitionMap; + map = leaf_part_rri->ri_RootToPartitionMap; Assert(node->onConflictSet != NIL); Assert(rootResultRelInfo->ri_onConflict != NULL); @@ -907,6 +911,15 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate, } } + /* + * Also, if transition capture is required, store a map to convert tuples + * from partition's rowtype to the root partition table's. + */ + if (mtstate->mt_transition_capture || mtstate->mt_oc_transition_capture) + leaf_part_rri->ri_ChildToRootMap = + convert_tuples_by_name(RelationGetDescr(leaf_part_rri->ri_RelationDesc), + RelationGetDescr(leaf_part_rri->ri_PartitionRoot)); + /* * Since we've just initialized this ResultRelInfo, it's not in any list * attached to the estate as yet. Add it, so that it can be found later. @@ -940,18 +953,15 @@ ExecInitRoutingInfo(ModifyTableState *mtstate, int partidx) { MemoryContext oldcxt; - PartitionRoutingInfo *partrouteinfo; int rri_index; oldcxt = MemoryContextSwitchTo(proute->memcxt); - partrouteinfo = palloc(sizeof(PartitionRoutingInfo)); - /* * Set up a tuple conversion map to convert a tuple routed to the * partition from the parent's type to the partition's. */ - partrouteinfo->pi_RootToPartitionMap = + partRelInfo->ri_RootToPartitionMap = convert_tuples_by_name(RelationGetDescr(partRelInfo->ri_PartitionRoot), RelationGetDescr(partRelInfo->ri_RelationDesc)); @@ -961,7 +971,7 @@ ExecInitRoutingInfo(ModifyTableState *mtstate, * for various operations that are applied to tuples after routing, such * as checking constraints. */ - if (partrouteinfo->pi_RootToPartitionMap != NULL) + if (partRelInfo->ri_RootToPartitionMap != NULL) { Relation partrel = partRelInfo->ri_RelationDesc; @@ -970,25 +980,11 @@ ExecInitRoutingInfo(ModifyTableState *mtstate, * partition's TupleDesc; TupleDesc reference will be released at the * end of the command. */ - partrouteinfo->pi_PartitionTupleSlot = + partRelInfo->ri_PartitionTupleSlot = table_slot_create(partrel, &estate->es_tupleTable); } else - partrouteinfo->pi_PartitionTupleSlot = NULL; - - /* - * Also, if transition capture is required, store a map to convert tuples - * from partition's rowtype to the root partition table's. - */ - if (mtstate && - (mtstate->mt_transition_capture || mtstate->mt_oc_transition_capture)) - { - partrouteinfo->pi_PartitionToRootMap = - convert_tuples_by_name(RelationGetDescr(partRelInfo->ri_RelationDesc), - RelationGetDescr(partRelInfo->ri_PartitionRoot)); - } - else - partrouteinfo->pi_PartitionToRootMap = NULL; + partRelInfo->ri_PartitionTupleSlot = NULL; /* * If the partition is a foreign table, let the FDW init itself for @@ -998,7 +994,6 @@ ExecInitRoutingInfo(ModifyTableState *mtstate, partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL) partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo); - partRelInfo->ri_PartitionInfo = partrouteinfo; partRelInfo->ri_CopyMultiInsertBuffer = NULL; /* diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index 8f474faed066a..01d26881e770c 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -404,10 +404,10 @@ RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, * Caller is responsible for opening the indexes. */ void -ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) +ExecSimpleRelationInsert(ResultRelInfo *resultRelInfo, + EState *estate, TupleTableSlot *slot) { bool skip_tuple = false; - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; Relation rel = resultRelInfo->ri_RelationDesc; /* For now we support only tables. */ @@ -430,19 +430,21 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) /* Compute stored generated columns */ if (rel->rd_att->constr && rel->rd_att->constr->has_generated_stored) - ExecComputeStoredGenerated(estate, slot, CMD_INSERT); + ExecComputeStoredGenerated(resultRelInfo, estate, slot, + CMD_INSERT); /* Check the constraints of the tuple */ if (rel->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); - if (resultRelInfo->ri_PartitionCheck) + if (rel->rd_rel->relispartition) ExecPartitionCheck(resultRelInfo, slot, estate, true); /* OK, store the tuple and create index entries for it */ simple_table_tuple_insert(resultRelInfo->ri_RelationDesc, slot); if (resultRelInfo->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, + recheckIndexes = ExecInsertIndexTuples(resultRelInfo, + slot, estate, false, NULL, NIL); /* AFTER ROW INSERT Triggers */ @@ -466,11 +468,11 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) * Caller is responsible for opening the indexes. */ void -ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, +ExecSimpleRelationUpdate(ResultRelInfo *resultRelInfo, + EState *estate, EPQState *epqstate, TupleTableSlot *searchslot, TupleTableSlot *slot) { bool skip_tuple = false; - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; Relation rel = resultRelInfo->ri_RelationDesc; ItemPointer tid = &(searchslot->tts_tid); @@ -496,19 +498,21 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, /* Compute stored generated columns */ if (rel->rd_att->constr && rel->rd_att->constr->has_generated_stored) - ExecComputeStoredGenerated(estate, slot, CMD_UPDATE); + ExecComputeStoredGenerated(resultRelInfo, estate, slot, + CMD_UPDATE); /* Check the constraints of the tuple */ if (rel->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); - if (resultRelInfo->ri_PartitionCheck) + if (rel->rd_rel->relispartition) ExecPartitionCheck(resultRelInfo, slot, estate, true); simple_table_tuple_update(rel, tid, slot, estate->es_snapshot, &update_indexes); if (resultRelInfo->ri_NumIndices > 0 && update_indexes) - recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, + recheckIndexes = ExecInsertIndexTuples(resultRelInfo, + slot, estate, false, NULL, NIL); /* AFTER ROW UPDATE Triggers */ @@ -527,11 +531,11 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, * Caller is responsible for opening the indexes. */ void -ExecSimpleRelationDelete(EState *estate, EPQState *epqstate, +ExecSimpleRelationDelete(ResultRelInfo *resultRelInfo, + EState *estate, EPQState *epqstate, TupleTableSlot *searchslot) { bool skip_tuple = false; - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; Relation rel = resultRelInfo->ri_RelationDesc; ItemPointer tid = &searchslot->tts_tid; diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index d0e65b86473d6..071a0007ebcd8 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -124,14 +124,8 @@ CreateExecutorState(void) estate->es_output_cid = (CommandId) 0; estate->es_result_relations = NULL; - estate->es_num_result_relations = 0; - estate->es_result_relation_info = NULL; - - estate->es_root_result_relations = NULL; - estate->es_num_root_result_relations = 0; - + estate->es_opened_result_relations = NIL; estate->es_tuple_routing_result_relations = NIL; - estate->es_trig_target_relations = NIL; estate->es_param_list_info = NULL; @@ -711,16 +705,7 @@ ExecCreateScanSlotFromOuterPlan(EState *estate, bool ExecRelationIsTargetRelation(EState *estate, Index scanrelid) { - ResultRelInfo *resultRelInfos; - int i; - - resultRelInfos = estate->es_result_relations; - for (i = 0; i < estate->es_num_result_relations; i++) - { - if (resultRelInfos[i].ri_RangeTableIndex == scanrelid) - return true; - } - return false; + return list_member_int(estate->es_plannedstmt->resultRelations, scanrelid); } /* ---------------------------------------------------------------- @@ -779,9 +764,10 @@ ExecInitRangeTable(EState *estate, List *rangeTable) palloc0(estate->es_range_table_size * sizeof(Relation)); /* - * es_rowmarks is also parallel to the es_range_table, but it's allocated - * only if needed. + * es_result_relations and es_rowmarks are also parallel to + * es_range_table, but are allocated only if needed. */ + estate->es_result_relations = NULL; estate->es_rowmarks = NULL; } @@ -835,6 +821,40 @@ ExecGetRangeTableRelation(EState *estate, Index rti) return rel; } +/* + * ExecInitResultRelation + * Open relation given by the passed-in RT index and fill its + * ResultRelInfo node + * + * Here, we also save the ResultRelInfo in estate->es_result_relations array + * such that it can be accessed later using the RT index. + */ +void +ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, + Index rti) +{ + Relation resultRelationDesc; + + resultRelationDesc = ExecGetRangeTableRelation(estate, rti); + InitResultRelInfo(resultRelInfo, + resultRelationDesc, + rti, + NULL, + estate->es_instrument); + + if (estate->es_result_relations == NULL) + estate->es_result_relations = (ResultRelInfo **) + palloc0(estate->es_range_table_size * sizeof(ResultRelInfo *)); + estate->es_result_relations[rti - 1] = resultRelInfo; + + /* + * Saving in the list allows to avoid needlessly traversing the whole + * array when only a few of its entries are possibly non-NULL. + */ + estate->es_opened_result_relations = + lappend(estate->es_opened_result_relations, resultRelInfo); +} + /* * UpdateChangedParamSet * Add changed parameters to a plan node's chgParam set diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index f940f48c6dae1..459a33375b14d 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -259,7 +259,8 @@ prepare_sql_fn_parse_info(HeapTuple procedureTuple, if (isNull) proargmodes = PointerGetDatum(NULL); /* just to be sure */ - n_arg_names = get_func_input_arg_names(proargnames, proargmodes, + n_arg_names = get_func_input_arg_names(procedureStruct->prokind, + proargnames, proargmodes, &pinfo->argnames); /* Paranoia: ignore the result if too few array entries */ @@ -608,7 +609,6 @@ init_sql_fcache(FunctionCallInfo fcinfo, Oid collation, bool lazyEvalOK) SQLFunctionCachePtr fcache; List *raw_parsetree_list; List *queryTree_list; - List *flat_query_list; List *resulttlist; ListCell *lc; Datum tmp; @@ -688,13 +688,7 @@ init_sql_fcache(FunctionCallInfo fcinfo, Oid collation, bool lazyEvalOK) /* * Parse and rewrite the queries in the function text. Use sublists to - * keep track of the original query boundaries. But we also build a - * "flat" list of the rewritten queries to pass to check_sql_fn_retval. - * This is because the last canSetTag query determines the result type - * independently of query boundaries --- and it might not be in the last - * sublist, for example if the last query rewrites to DO INSTEAD NOTHING. - * (It might not be unreasonable to throw an error in such a case, but - * this is the historical behavior and it doesn't seem worth changing.) + * keep track of the original query boundaries. * * Note: since parsing and planning is done in fcontext, we will generate * a lot of cruft that lives as long as the fcache does. This is annoying @@ -704,7 +698,6 @@ init_sql_fcache(FunctionCallInfo fcinfo, Oid collation, bool lazyEvalOK) raw_parsetree_list = pg_parse_query(fcache->src); queryTree_list = NIL; - flat_query_list = NIL; foreach(lc, raw_parsetree_list) { RawStmt *parsetree = lfirst_node(RawStmt, lc); @@ -716,10 +709,12 @@ init_sql_fcache(FunctionCallInfo fcinfo, Oid collation, bool lazyEvalOK) fcache->pinfo, NULL); queryTree_list = lappend(queryTree_list, queryTree_sublist); - flat_query_list = list_concat(flat_query_list, queryTree_sublist); } - check_sql_fn_statements(flat_query_list); + /* + * Check that there are no statements we don't want to allow. + */ + check_sql_fn_statements(queryTree_list); /* * Check that the function returns the type it claims to. Although in @@ -739,7 +734,7 @@ init_sql_fcache(FunctionCallInfo fcinfo, Oid collation, bool lazyEvalOK) * the rowtype column into multiple columns, since we have no way to * notify the caller that it should do that.) */ - fcache->returnsTuple = check_sql_fn_retval(flat_query_list, + fcache->returnsTuple = check_sql_fn_retval(queryTree_list, rettype, rettupdesc, false, @@ -1509,51 +1504,63 @@ ShutdownSQLFunction(Datum arg) * is not acceptable. */ void -check_sql_fn_statements(List *queryTreeList) +check_sql_fn_statements(List *queryTreeLists) { ListCell *lc; - foreach(lc, queryTreeList) + /* We are given a list of sublists of Queries */ + foreach(lc, queryTreeLists) { - Query *query = lfirst_node(Query, lc); + List *sublist = lfirst_node(List, lc); + ListCell *lc2; - /* - * Disallow procedures with output arguments. The current - * implementation would just throw the output values away, unless the - * statement is the last one. Per SQL standard, we should assign the - * output values by name. By disallowing this here, we preserve an - * opportunity for future improvement. - */ - if (query->commandType == CMD_UTILITY && - IsA(query->utilityStmt, CallStmt)) + foreach(lc2, sublist) { - CallStmt *stmt = castNode(CallStmt, query->utilityStmt); - HeapTuple tuple; - int numargs; - Oid *argtypes; - char **argnames; - char *argmodes; - int i; - - tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(stmt->funcexpr->funcid)); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for function %u", stmt->funcexpr->funcid); - numargs = get_func_arg_info(tuple, &argtypes, &argnames, &argmodes); - ReleaseSysCache(tuple); - - for (i = 0; i < numargs; i++) + Query *query = lfirst_node(Query, lc2); + + /* + * Disallow procedures with output arguments. The current + * implementation would just throw the output values away, unless + * the statement is the last one. Per SQL standard, we should + * assign the output values by name. By disallowing this here, we + * preserve an opportunity for future improvement. + */ + if (query->commandType == CMD_UTILITY && + IsA(query->utilityStmt, CallStmt)) { - if (argmodes && (argmodes[i] == PROARGMODE_INOUT || argmodes[i] == PROARGMODE_OUT)) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("calling procedures with output arguments is not supported in SQL functions"))); + CallStmt *stmt = castNode(CallStmt, query->utilityStmt); + HeapTuple tuple; + int numargs; + Oid *argtypes; + char **argnames; + char *argmodes; + int i; + + tuple = SearchSysCache1(PROCOID, + ObjectIdGetDatum(stmt->funcexpr->funcid)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for function %u", + stmt->funcexpr->funcid); + numargs = get_func_arg_info(tuple, + &argtypes, &argnames, &argmodes); + ReleaseSysCache(tuple); + + for (i = 0; i < numargs; i++) + { + if (argmodes && (argmodes[i] == PROARGMODE_INOUT || + argmodes[i] == PROARGMODE_OUT)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("calling procedures with output arguments is not supported in SQL functions"))); + } } } } } /* - * check_sql_fn_retval() -- check return value of a list of sql parse trees. + * check_sql_fn_retval() + * Check return value of a list of lists of sql parse trees. * * The return value of a sql function is the value returned by the last * canSetTag query in the function. We do some ad-hoc type checking and @@ -1591,7 +1598,7 @@ check_sql_fn_statements(List *queryTreeList) * function is defined to return VOID then *resultTargetList is set to NIL. */ bool -check_sql_fn_retval(List *queryTreeList, +check_sql_fn_retval(List *queryTreeLists, Oid rettype, TupleDesc rettupdesc, bool insertDroppedCols, List **resultTargetList) @@ -1618,20 +1625,30 @@ check_sql_fn_retval(List *queryTreeList, return false; /* - * Find the last canSetTag query in the list. This isn't necessarily the - * last parsetree, because rule rewriting can insert queries after what - * the user wrote. + * Find the last canSetTag query in the function body (which is presented + * to us as a list of sublists of Query nodes). This isn't necessarily + * the last parsetree, because rule rewriting can insert queries after + * what the user wrote. Note that it might not even be in the last + * sublist, for example if the last query rewrites to DO INSTEAD NOTHING. + * (It might not be unreasonable to throw an error in such a case, but + * this is the historical behavior and it doesn't seem worth changing.) */ parse = NULL; parse_cell = NULL; - foreach(lc, queryTreeList) + foreach(lc, queryTreeLists) { - Query *q = lfirst_node(Query, lc); + List *sublist = lfirst_node(List, lc); + ListCell *lc2; - if (q->canSetTag) + foreach(lc2, sublist) { - parse = q; - parse_cell = lc; + Query *q = lfirst_node(Query, lc2); + + if (q->canSetTag) + { + parse = q; + parse_cell = lc2; + } } } diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 9776263ae75ac..75e5bbf209d53 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -2639,8 +2639,6 @@ agg_refill_hash_table(AggState *aggstate) */ hashagg_recompile_expressions(aggstate, true, true); - LogicalTapeRewindForRead(tapeinfo->tapeset, batch->input_tapenum, - HASHAGG_READ_BUFFER_SIZE); for (;;) { TupleTableSlot *spillslot = aggstate->hash_spill_rslot; @@ -2706,8 +2704,8 @@ agg_refill_hash_table(AggState *aggstate) if (spill_initialized) { - hash_agg_update_metrics(aggstate, true, spill.npartitions); hashagg_spill_finish(aggstate, &spill, batch->setno); + hash_agg_update_metrics(aggstate, true, spill.npartitions); } else hash_agg_update_metrics(aggstate, true, 0); @@ -2882,7 +2880,7 @@ hashagg_tapeinfo_init(AggState *aggstate) HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo)); int init_tapes = 16; /* expanded dynamically */ - tapeinfo->tapeset = LogicalTapeSetCreate(init_tapes, NULL, NULL, -1); + tapeinfo->tapeset = LogicalTapeSetCreate(init_tapes, true, NULL, NULL, -1); tapeinfo->ntapes = init_tapes; tapeinfo->nfreetapes = init_tapes; tapeinfo->freetapes_alloc = init_tapes; @@ -2923,6 +2921,7 @@ hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *partitions, static void hashagg_tapeinfo_release(HashTapeInfo *tapeinfo, int tapenum) { + /* rewinding frees the buffer while not in use */ LogicalTapeRewindForWrite(tapeinfo->tapeset, tapenum); if (tapeinfo->freetapes_alloc == tapeinfo->nfreetapes) { @@ -3152,6 +3151,7 @@ hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno) for (i = 0; i < spill->npartitions; i++) { + LogicalTapeSet *tapeset = aggstate->hash_tapeinfo->tapeset; int tapenum = spill->partitions[i]; HashAggBatch *new_batch; double cardinality; @@ -3163,9 +3163,13 @@ hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno) cardinality = estimateHyperLogLog(&spill->hll_card[i]); freeHyperLogLog(&spill->hll_card[i]); - new_batch = hashagg_batch_new(aggstate->hash_tapeinfo->tapeset, - tapenum, setno, spill->ntuples[i], - cardinality, used_bits); + /* rewinding frees the buffer while not in use */ + LogicalTapeRewindForRead(tapeset, tapenum, + HASHAGG_READ_BUFFER_SIZE); + + new_batch = hashagg_batch_new(tapeset, tapenum, setno, + spill->ntuples[i], cardinality, + used_bits); aggstate->hash_batches = lcons(new_batch, aggstate->hash_batches); aggstate->hash_batches_used++; } diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index 513471ab9b907..0b20f94035edd 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -215,6 +215,13 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags) scanstate->fdwroutine = fdwroutine; scanstate->fdw_state = NULL; + /* + * For the FDW's convenience, look up the modification target relation's. + * ResultRelInfo. + */ + if (node->resultRelation > 0) + scanstate->resultRelInfo = estate->es_result_relations[node->resultRelation - 1]; + /* Initialize any outer plan. */ if (outerPlan(node)) outerPlanState(scanstate) = diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 20a4c474cc47f..a33423c896e37 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -70,11 +70,8 @@ static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, ResultRelInfo *targetRelInfo, - TupleTableSlot *slot); -static ResultRelInfo *getTargetResultRelInfo(ModifyTableState *node); -static void ExecSetupChildParentMapForSubplan(ModifyTableState *mtstate); -static TupleConversionMap *tupconv_map_for_subplan(ModifyTableState *node, - int whichplan); + TupleTableSlot *slot, + ResultRelInfo **partRelInfo); /* * Verify that the tuples to be produced by INSERT or UPDATE match the @@ -246,9 +243,10 @@ ExecCheckTIDVisible(EState *estate, * Compute stored generated columns for a tuple */ void -ExecComputeStoredGenerated(EState *estate, TupleTableSlot *slot, CmdType cmdtype) +ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo, + EState *estate, TupleTableSlot *slot, + CmdType cmdtype) { - ResultRelInfo *resultRelInfo = estate->es_result_relation_info; Relation rel = resultRelInfo->ri_RelationDesc; TupleDesc tupdesc = RelationGetDescr(rel); int natts = tupdesc->natts; @@ -366,32 +364,48 @@ ExecComputeStoredGenerated(EState *estate, TupleTableSlot *slot, CmdType cmdtype * ExecInsert * * For INSERT, we have to insert the tuple into the target relation - * and insert appropriate tuples into the index relations. + * (or partition thereof) and insert appropriate tuples into the index + * relations. * * Returns RETURNING result if any, otherwise NULL. + * + * This may change the currently active tuple conversion map in + * mtstate->mt_transition_capture, so the callers must take care to + * save the previous value to avoid losing track of it. * ---------------------------------------------------------------- */ static TupleTableSlot * ExecInsert(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo, TupleTableSlot *slot, TupleTableSlot *planSlot, EState *estate, bool canSetTag) { - ResultRelInfo *resultRelInfo; Relation resultRelationDesc; List *recheckIndexes = NIL; TupleTableSlot *result = NULL; TransitionCaptureState *ar_insert_trig_tcs; ModifyTable *node = (ModifyTable *) mtstate->ps.plan; OnConflictAction onconflict = node->onConflictAction; - - ExecMaterializeSlot(slot); + PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing; /* - * get information on the (current) result relation + * If the input result relation is a partitioned table, find the leaf + * partition to insert the tuple into. */ - resultRelInfo = estate->es_result_relation_info; + if (proute) + { + ResultRelInfo *partRelInfo; + + slot = ExecPrepareTupleRouting(mtstate, estate, proute, + resultRelInfo, slot, + &partRelInfo); + resultRelInfo = partRelInfo; + } + + ExecMaterializeSlot(slot); + resultRelationDesc = resultRelInfo->ri_RelationDesc; /* @@ -424,7 +438,8 @@ ExecInsert(ModifyTableState *mtstate, */ if (resultRelationDesc->rd_att->constr && resultRelationDesc->rd_att->constr->has_generated_stored) - ExecComputeStoredGenerated(estate, slot, CMD_INSERT); + ExecComputeStoredGenerated(resultRelInfo, estate, slot, + CMD_INSERT); /* * insert into foreign table: let the FDW do it @@ -459,7 +474,8 @@ ExecInsert(ModifyTableState *mtstate, */ if (resultRelationDesc->rd_att->constr && resultRelationDesc->rd_att->constr->has_generated_stored) - ExecComputeStoredGenerated(estate, slot, CMD_INSERT); + ExecComputeStoredGenerated(resultRelInfo, estate, slot, + CMD_INSERT); /* * Check any RLS WITH CHECK policies. @@ -491,7 +507,7 @@ ExecInsert(ModifyTableState *mtstate, * one; except that if we got here via tuple-routing, we don't need to * if there's no BR trigger defined on the partition. */ - if (resultRelInfo->ri_PartitionCheck && + if (resultRelationDesc->rd_rel->relispartition && (resultRelInfo->ri_PartitionRoot == NULL || (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_insert_before_row))) @@ -521,8 +537,8 @@ ExecInsert(ModifyTableState *mtstate, */ vlock: specConflict = false; - if (!ExecCheckIndexConstraints(slot, estate, &conflictTid, - arbiterIndexes)) + if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate, + &conflictTid, arbiterIndexes)) { /* committed conflict tuple found */ if (onconflict == ONCONFLICT_UPDATE) @@ -582,7 +598,8 @@ ExecInsert(ModifyTableState *mtstate, specToken); /* insert index entries for tuple */ - recheckIndexes = ExecInsertIndexTuples(slot, estate, true, + recheckIndexes = ExecInsertIndexTuples(resultRelInfo, + slot, estate, true, &specConflict, arbiterIndexes); @@ -621,8 +638,9 @@ ExecInsert(ModifyTableState *mtstate, /* insert index entries for tuple */ if (resultRelInfo->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, - NIL); + recheckIndexes = ExecInsertIndexTuples(resultRelInfo, + slot, estate, false, + NULL, NIL); } } @@ -707,6 +725,7 @@ ExecInsert(ModifyTableState *mtstate, */ static TupleTableSlot * ExecDelete(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *planSlot, @@ -718,8 +737,7 @@ ExecDelete(ModifyTableState *mtstate, bool *tupleDeleted, TupleTableSlot **epqreturnslot) { - ResultRelInfo *resultRelInfo; - Relation resultRelationDesc; + Relation resultRelationDesc = resultRelInfo->ri_RelationDesc; TM_Result result; TM_FailureData tmfd; TupleTableSlot *slot = NULL; @@ -728,12 +746,6 @@ ExecDelete(ModifyTableState *mtstate, if (tupleDeleted) *tupleDeleted = false; - /* - * get information on the (current) result relation - */ - resultRelInfo = estate->es_result_relation_info; - resultRelationDesc = resultRelInfo->ri_RelationDesc; - /* BEFORE ROW DELETE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_delete_before_row) @@ -1043,6 +1055,133 @@ ldelete:; return NULL; } +/* + * ExecCrossPartitionUpdate --- Move an updated tuple to another partition. + * + * This works by first deleting the old tuple from the current partition, + * followed by inserting the new tuple into the root parent table, that is, + * mtstate->rootResultRelInfo. It will be re-routed from there to the + * correct partition. + * + * Returns true if the tuple has been successfully moved, or if it's found + * that the tuple was concurrently deleted so there's nothing more to do + * for the caller. + * + * False is returned if the tuple we're trying to move is found to have been + * concurrently updated. In that case, the caller must to check if the + * updated tuple that's returned in *retry_slot still needs to be re-routed, + * and call this function again or perform a regular update accordingly. + */ +static bool +ExecCrossPartitionUpdate(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo, + ItemPointer tupleid, HeapTuple oldtuple, + TupleTableSlot *slot, TupleTableSlot *planSlot, + EPQState *epqstate, bool canSetTag, + TupleTableSlot **retry_slot, + TupleTableSlot **inserted_tuple) +{ + EState *estate = mtstate->ps.state; + PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing; + TupleConversionMap *tupconv_map; + bool tuple_deleted; + TupleTableSlot *epqslot = NULL; + + *inserted_tuple = NULL; + *retry_slot = NULL; + + /* + * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row + * to migrate to a different partition. Maybe this can be implemented + * some day, but it seems a fringe feature with little redeeming value. + */ + if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("invalid ON UPDATE specification"), + errdetail("The result tuple would appear in a different partition than the original tuple."))); + + /* + * When an UPDATE is run on a leaf partition, we will not have partition + * tuple routing set up. In that case, fail with partition constraint + * violation error. + */ + if (proute == NULL) + ExecPartitionCheckEmitError(resultRelInfo, slot, estate); + + /* + * Row movement, part 1. Delete the tuple, but skip RETURNING processing. + * We want to return rows from INSERT. + */ + ExecDelete(mtstate, resultRelInfo, tupleid, oldtuple, planSlot, + epqstate, estate, + false, /* processReturning */ + false, /* canSetTag */ + true, /* changingPart */ + &tuple_deleted, &epqslot); + + /* + * For some reason if DELETE didn't happen (e.g. trigger prevented it, or + * it was already deleted by self, or it was concurrently deleted by + * another transaction), then we should skip the insert as well; + * otherwise, an UPDATE could cause an increase in the total number of + * rows across all partitions, which is clearly wrong. + * + * For a normal UPDATE, the case where the tuple has been the subject of a + * concurrent UPDATE or DELETE would be handled by the EvalPlanQual + * machinery, but for an UPDATE that we've translated into a DELETE from + * this partition and an INSERT into some other partition, that's not + * available, because CTID chains can't span relation boundaries. We + * mimic the semantics to a limited extent by skipping the INSERT if the + * DELETE fails to find a tuple. This ensures that two concurrent + * attempts to UPDATE the same tuple at the same time can't turn one tuple + * into two, and that an UPDATE of a just-deleted tuple can't resurrect + * it. + */ + if (!tuple_deleted) + { + /* + * epqslot will be typically NULL. But when ExecDelete() finds that + * another transaction has concurrently updated the same row, it + * re-fetches the row, skips the delete, and epqslot is set to the + * re-fetched tuple slot. In that case, we need to do all the checks + * again. + */ + if (TupIsNull(epqslot)) + return true; + else + { + *retry_slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot); + return false; + } + } + + /* + * resultRelInfo is one of the per-subplan resultRelInfos. So we should + * convert the tuple into root's tuple descriptor if needed, since + * ExecInsert() starts the search from root. + */ + tupconv_map = resultRelInfo->ri_ChildToRootMap; + if (tupconv_map != NULL) + slot = execute_attr_map_slot(tupconv_map->attrMap, + slot, + mtstate->mt_root_tuple_slot); + + /* Tuple routing starts from the root table. */ + *inserted_tuple = ExecInsert(mtstate, mtstate->rootResultRelInfo, slot, + planSlot, estate, canSetTag); + + /* + * Reset the transition state that may possibly have been written by + * INSERT. + */ + if (mtstate->mt_transition_capture) + mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; + + /* We're done moving. */ + return true; +} + /* ---------------------------------------------------------------- * ExecUpdate * @@ -1067,6 +1206,7 @@ ldelete:; */ static TupleTableSlot * ExecUpdate(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, @@ -1075,12 +1215,10 @@ ExecUpdate(ModifyTableState *mtstate, EState *estate, bool canSetTag) { - ResultRelInfo *resultRelInfo; - Relation resultRelationDesc; + Relation resultRelationDesc = resultRelInfo->ri_RelationDesc; TM_Result result; TM_FailureData tmfd; List *recheckIndexes = NIL; - TupleConversionMap *saved_tcs_map = NULL; /* * abort the operation if not running transactions @@ -1090,12 +1228,6 @@ ExecUpdate(ModifyTableState *mtstate, ExecMaterializeSlot(slot); - /* - * get information on the (current) result relation - */ - resultRelInfo = estate->es_result_relation_info; - resultRelationDesc = resultRelInfo->ri_RelationDesc; - /* BEFORE ROW UPDATE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_update_before_row) @@ -1120,7 +1252,8 @@ ExecUpdate(ModifyTableState *mtstate, */ if (resultRelationDesc->rd_att->constr && resultRelationDesc->rd_att->constr->has_generated_stored) - ExecComputeStoredGenerated(estate, slot, CMD_UPDATE); + ExecComputeStoredGenerated(resultRelInfo, estate, slot, + CMD_UPDATE); /* * update in foreign table: let the FDW do it @@ -1157,7 +1290,8 @@ ExecUpdate(ModifyTableState *mtstate, */ if (resultRelationDesc->rd_att->constr && resultRelationDesc->rd_att->constr->has_generated_stored) - ExecComputeStoredGenerated(estate, slot, CMD_UPDATE); + ExecComputeStoredGenerated(resultRelInfo, estate, slot, + CMD_UPDATE); /* * Check any RLS UPDATE WITH CHECK policies @@ -1181,7 +1315,7 @@ lreplace:; * row. So skip the WCO checks if the partition constraint fails. */ partition_constraint_failed = - resultRelInfo->ri_PartitionCheck && + resultRelationDesc->rd_rel->relispartition && !ExecPartitionCheck(resultRelInfo, slot, estate, false); if (!partition_constraint_failed && @@ -1201,125 +1335,28 @@ lreplace:; */ if (partition_constraint_failed) { - bool tuple_deleted; - TupleTableSlot *ret_slot; - TupleTableSlot *epqslot = NULL; - PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing; - int map_index; - TupleConversionMap *tupconv_map; - - /* - * Disallow an INSERT ON CONFLICT DO UPDATE that causes the - * original row to migrate to a different partition. Maybe this - * can be implemented some day, but it seems a fringe feature with - * little redeeming value. - */ - if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("invalid ON UPDATE specification"), - errdetail("The result tuple would appear in a different partition than the original tuple."))); + TupleTableSlot *inserted_tuple, + *retry_slot; + bool retry; /* - * When an UPDATE is run on a leaf partition, we will not have - * partition tuple routing set up. In that case, fail with - * partition constraint violation error. + * ExecCrossPartitionUpdate will first DELETE the row from the + * partition it's currently in and then insert it back into the + * root table, which will re-route it to the correct partition. + * The first part may have to be repeated if it is detected that + * the tuple we're trying to move has been concurrently updated. */ - if (proute == NULL) - ExecPartitionCheckEmitError(resultRelInfo, slot, estate); - - /* - * Row movement, part 1. Delete the tuple, but skip RETURNING - * processing. We want to return rows from INSERT. - */ - ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate, - estate, false, false /* canSetTag */ , - true /* changingPart */ , &tuple_deleted, &epqslot); - - /* - * For some reason if DELETE didn't happen (e.g. trigger prevented - * it, or it was already deleted by self, or it was concurrently - * deleted by another transaction), then we should skip the insert - * as well; otherwise, an UPDATE could cause an increase in the - * total number of rows across all partitions, which is clearly - * wrong. - * - * For a normal UPDATE, the case where the tuple has been the - * subject of a concurrent UPDATE or DELETE would be handled by - * the EvalPlanQual machinery, but for an UPDATE that we've - * translated into a DELETE from this partition and an INSERT into - * some other partition, that's not available, because CTID chains - * can't span relation boundaries. We mimic the semantics to a - * limited extent by skipping the INSERT if the DELETE fails to - * find a tuple. This ensures that two concurrent attempts to - * UPDATE the same tuple at the same time can't turn one tuple - * into two, and that an UPDATE of a just-deleted tuple can't - * resurrect it. - */ - if (!tuple_deleted) - { - /* - * epqslot will be typically NULL. But when ExecDelete() - * finds that another transaction has concurrently updated the - * same row, it re-fetches the row, skips the delete, and - * epqslot is set to the re-fetched tuple slot. In that case, - * we need to do all the checks again. - */ - if (TupIsNull(epqslot)) - return NULL; - else - { - slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot); - goto lreplace; - } - } - - /* - * Updates set the transition capture map only when a new subplan - * is chosen. But for inserts, it is set for each row. So after - * INSERT, we need to revert back to the map created for UPDATE; - * otherwise the next UPDATE will incorrectly use the one created - * for INSERT. So first save the one created for UPDATE. - */ - if (mtstate->mt_transition_capture) - saved_tcs_map = mtstate->mt_transition_capture->tcs_map; - - /* - * resultRelInfo is one of the per-subplan resultRelInfos. So we - * should convert the tuple into root's tuple descriptor, since - * ExecInsert() starts the search from root. The tuple conversion - * map list is in the order of mtstate->resultRelInfo[], so to - * retrieve the one for this resultRel, we need to know the - * position of the resultRel in mtstate->resultRelInfo[]. - */ - map_index = resultRelInfo - mtstate->resultRelInfo; - Assert(map_index >= 0 && map_index < mtstate->mt_nplans); - tupconv_map = tupconv_map_for_subplan(mtstate, map_index); - if (tupconv_map != NULL) - slot = execute_attr_map_slot(tupconv_map->attrMap, - slot, - mtstate->mt_root_tuple_slot); - - /* - * Prepare for tuple routing, making it look like we're inserting - * into the root. - */ - Assert(mtstate->rootResultRelInfo != NULL); - slot = ExecPrepareTupleRouting(mtstate, estate, proute, - mtstate->rootResultRelInfo, slot); - - ret_slot = ExecInsert(mtstate, slot, planSlot, - estate, canSetTag); - - /* Revert ExecPrepareTupleRouting's node change. */ - estate->es_result_relation_info = resultRelInfo; - if (mtstate->mt_transition_capture) + retry = !ExecCrossPartitionUpdate(mtstate, resultRelInfo, tupleid, + oldtuple, slot, planSlot, + epqstate, canSetTag, + &retry_slot, &inserted_tuple); + if (retry) { - mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; - mtstate->mt_transition_capture->tcs_map = saved_tcs_map; + slot = retry_slot; + goto lreplace; } - return ret_slot; + return inserted_tuple; } /* @@ -1476,7 +1513,9 @@ lreplace:; /* insert index entries for tuple if necessary */ if (resultRelInfo->ri_NumIndices > 0 && update_indexes) - recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, NIL); + recheckIndexes = ExecInsertIndexTuples(resultRelInfo, + slot, estate, false, + NULL, NIL); } if (canSetTag) @@ -1715,7 +1754,7 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, */ /* Execute UPDATE with projection */ - *returning = ExecUpdate(mtstate, conflictTid, NULL, + *returning = ExecUpdate(mtstate, resultRelInfo, conflictTid, NULL, resultRelInfo->ri_onConflict->oc_ProjSlot, planSlot, &mtstate->mt_epqstate, mtstate->ps.state, @@ -1738,15 +1777,7 @@ static void fireBSTriggers(ModifyTableState *node) { ModifyTable *plan = (ModifyTable *) node->ps.plan; - ResultRelInfo *resultRelInfo = node->resultRelInfo; - - /* - * If the node modifies a partitioned table, we must fire its triggers. - * Note that in that case, node->resultRelInfo points to the first leaf - * partition, not the root table. - */ - if (node->rootResultRelInfo != NULL) - resultRelInfo = node->rootResultRelInfo; + ResultRelInfo *resultRelInfo = node->rootResultRelInfo; switch (node->operation) { @@ -1768,28 +1799,6 @@ fireBSTriggers(ModifyTableState *node) } } -/* - * Return the target rel ResultRelInfo. - * - * This relation is the same as : - * - the relation for which we will fire AFTER STATEMENT triggers. - * - the relation into whose tuple format all captured transition tuples must - * be converted. - * - the root partitioned table. - */ -static ResultRelInfo * -getTargetResultRelInfo(ModifyTableState *node) -{ - /* - * Note that if the node modifies a partitioned table, node->resultRelInfo - * points to the first leaf partition, not the root table. - */ - if (node->rootResultRelInfo != NULL) - return node->rootResultRelInfo; - else - return node->resultRelInfo; -} - /* * Process AFTER EACH STATEMENT triggers */ @@ -1797,7 +1806,7 @@ static void fireASTriggers(ModifyTableState *node) { ModifyTable *plan = (ModifyTable *) node->ps.plan; - ResultRelInfo *resultRelInfo = getTargetResultRelInfo(node); + ResultRelInfo *resultRelInfo = node->rootResultRelInfo; switch (node->operation) { @@ -1831,7 +1840,7 @@ static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate) { ModifyTable *plan = (ModifyTable *) mtstate->ps.plan; - ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate); + ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo; /* Check for transition tables on the directly targeted relation. */ mtstate->mt_transition_capture = @@ -1844,50 +1853,27 @@ ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate) MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc, RelationGetRelid(targetRelInfo->ri_RelationDesc), CMD_UPDATE); - - /* - * If we found that we need to collect transition tuples then we may also - * need tuple conversion maps for any children that have TupleDescs that - * aren't compatible with the tuplestores. (We can share these maps - * between the regular and ON CONFLICT cases.) - */ - if (mtstate->mt_transition_capture != NULL || - mtstate->mt_oc_transition_capture != NULL) - { - ExecSetupChildParentMapForSubplan(mtstate); - - /* - * Install the conversion map for the first plan for UPDATE and DELETE - * operations. It will be advanced each time we switch to the next - * plan. (INSERT operations set it every time, so we need not update - * mtstate->mt_oc_transition_capture here.) - */ - if (mtstate->mt_transition_capture && mtstate->operation != CMD_INSERT) - mtstate->mt_transition_capture->tcs_map = - tupconv_map_for_subplan(mtstate, 0); - } } /* * ExecPrepareTupleRouting --- prepare for routing one tuple * * Determine the partition in which the tuple in slot is to be inserted, - * and modify mtstate and estate to prepare for it. + * and return its ResultRelInfo in *partRelInfo. The return value is + * a slot holding the tuple of the partition rowtype. * - * Caller must revert the estate changes after executing the insertion! - * In mtstate, transition capture changes may also need to be reverted. - * - * Returns a slot holding the tuple of the partition rowtype. + * This also sets the transition table information in mtstate based on the + * selected partition. */ static TupleTableSlot * ExecPrepareTupleRouting(ModifyTableState *mtstate, EState *estate, PartitionTupleRouting *proute, ResultRelInfo *targetRelInfo, - TupleTableSlot *slot) + TupleTableSlot *slot, + ResultRelInfo **partRelInfo) { ResultRelInfo *partrel; - PartitionRoutingInfo *partrouteinfo; TupleConversionMap *map; /* @@ -1898,113 +1884,40 @@ ExecPrepareTupleRouting(ModifyTableState *mtstate, * UPDATE to another partition becomes a DELETE+INSERT. */ partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate); - partrouteinfo = partrel->ri_PartitionInfo; - Assert(partrouteinfo != NULL); - - /* - * Make it look like we are inserting into the partition. - */ - estate->es_result_relation_info = partrel; /* * If we're capturing transition tuples, we might need to convert from the - * partition rowtype to root partitioned table's rowtype. + * partition rowtype to root partitioned table's rowtype. But if there + * are no BEFORE triggers on the partition that could change the tuple, we + * can just remember the original unconverted tuple to avoid a needless + * round trip conversion. */ if (mtstate->mt_transition_capture != NULL) { - if (partrel->ri_TrigDesc && - partrel->ri_TrigDesc->trig_insert_before_row) - { - /* - * If there are any BEFORE triggers on the partition, we'll have - * to be ready to convert their result back to tuplestore format. - */ - mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; - mtstate->mt_transition_capture->tcs_map = - partrouteinfo->pi_PartitionToRootMap; - } - else - { - /* - * Otherwise, just remember the original unconverted tuple, to - * avoid a needless round trip conversion. - */ - mtstate->mt_transition_capture->tcs_original_insert_tuple = slot; - mtstate->mt_transition_capture->tcs_map = NULL; - } - } - if (mtstate->mt_oc_transition_capture != NULL) - { - mtstate->mt_oc_transition_capture->tcs_map = - partrouteinfo->pi_PartitionToRootMap; + bool has_before_insert_row_trig; + + has_before_insert_row_trig = (partrel->ri_TrigDesc && + partrel->ri_TrigDesc->trig_insert_before_row); + + mtstate->mt_transition_capture->tcs_original_insert_tuple = + !has_before_insert_row_trig ? slot : NULL; } /* * Convert the tuple, if necessary. */ - map = partrouteinfo->pi_RootToPartitionMap; + map = partrel->ri_RootToPartitionMap; if (map != NULL) { - TupleTableSlot *new_slot = partrouteinfo->pi_PartitionTupleSlot; + TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot; slot = execute_attr_map_slot(map->attrMap, slot, new_slot); } + *partRelInfo = partrel; return slot; } -/* - * Initialize the child-to-root tuple conversion map array for UPDATE subplans. - * - * This map array is required to convert the tuple from the subplan result rel - * to the target table descriptor. This requirement arises for two independent - * scenarios: - * 1. For update-tuple-routing. - * 2. For capturing tuples in transition tables. - */ -static void -ExecSetupChildParentMapForSubplan(ModifyTableState *mtstate) -{ - ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate); - ResultRelInfo *resultRelInfos = mtstate->resultRelInfo; - TupleDesc outdesc; - int numResultRelInfos = mtstate->mt_nplans; - int i; - - /* - * Build array of conversion maps from each child's TupleDesc to the one - * used in the target relation. The map pointers may be NULL when no - * conversion is necessary, which is hopefully a common case. - */ - - /* Get tuple descriptor of the target rel. */ - outdesc = RelationGetDescr(targetRelInfo->ri_RelationDesc); - - mtstate->mt_per_subplan_tupconv_maps = (TupleConversionMap **) - palloc(sizeof(TupleConversionMap *) * numResultRelInfos); - - for (i = 0; i < numResultRelInfos; ++i) - { - mtstate->mt_per_subplan_tupconv_maps[i] = - convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc), - outdesc); - } -} - -/* - * For a given subplan index, get the tuple conversion map. - */ -static TupleConversionMap * -tupconv_map_for_subplan(ModifyTableState *mtstate, int whichplan) -{ - /* If nobody else set the per-subplan array of maps, do so ourselves. */ - if (mtstate->mt_per_subplan_tupconv_maps == NULL) - ExecSetupChildParentMapForSubplan(mtstate); - - Assert(whichplan >= 0 && whichplan < mtstate->mt_nplans); - return mtstate->mt_per_subplan_tupconv_maps[whichplan]; -} - /* ---------------------------------------------------------------- * ExecModifyTable * @@ -2016,10 +1929,8 @@ static TupleTableSlot * ExecModifyTable(PlanState *pstate) { ModifyTableState *node = castNode(ModifyTableState, pstate); - PartitionTupleRouting *proute = node->mt_partition_tuple_routing; EState *estate = node->ps.state; CmdType operation = node->operation; - ResultRelInfo *saved_resultRelInfo; ResultRelInfo *resultRelInfo; PlanState *subplanstate; JunkFilter *junkfilter; @@ -2067,17 +1978,6 @@ ExecModifyTable(PlanState *pstate) subplanstate = node->mt_plans[node->mt_whichplan]; junkfilter = resultRelInfo->ri_junkFilter; - /* - * es_result_relation_info must point to the currently active result - * relation while we are within this ModifyTable node. Even though - * ModifyTable nodes can't be nested statically, they can be nested - * dynamically (since our subplan could include a reference to a modifying - * CTE). So we have to save and restore the caller's value. - */ - saved_resultRelInfo = estate->es_result_relation_info; - - estate->es_result_relation_info = resultRelInfo; - /* * Fetch rows from subplan(s), and execute the required table modification * for each row. @@ -2111,20 +2011,8 @@ ExecModifyTable(PlanState *pstate) resultRelInfo++; subplanstate = node->mt_plans[node->mt_whichplan]; junkfilter = resultRelInfo->ri_junkFilter; - estate->es_result_relation_info = resultRelInfo; EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan, node->mt_arowmarks[node->mt_whichplan]); - /* Prepare to convert transition tuples from this child. */ - if (node->mt_transition_capture != NULL) - { - node->mt_transition_capture->tcs_map = - tupconv_map_for_subplan(node, node->mt_whichplan); - } - if (node->mt_oc_transition_capture != NULL) - { - node->mt_oc_transition_capture->tcs_map = - tupconv_map_for_subplan(node, node->mt_whichplan); - } continue; } else @@ -2156,7 +2044,6 @@ ExecModifyTable(PlanState *pstate) */ slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); - estate->es_result_relation_info = saved_resultRelInfo; return slot; } @@ -2239,25 +2126,21 @@ ExecModifyTable(PlanState *pstate) switch (operation) { case CMD_INSERT: - /* Prepare for tuple routing if needed. */ - if (proute) - slot = ExecPrepareTupleRouting(node, estate, proute, - resultRelInfo, slot); - slot = ExecInsert(node, slot, planSlot, + slot = ExecInsert(node, resultRelInfo, slot, planSlot, estate, node->canSetTag); - /* Revert ExecPrepareTupleRouting's state change. */ - if (proute) - estate->es_result_relation_info = resultRelInfo; break; case CMD_UPDATE: - slot = ExecUpdate(node, tupleid, oldtuple, slot, planSlot, - &node->mt_epqstate, estate, node->canSetTag); + slot = ExecUpdate(node, resultRelInfo, tupleid, oldtuple, slot, + planSlot, &node->mt_epqstate, estate, + node->canSetTag); break; case CMD_DELETE: - slot = ExecDelete(node, tupleid, oldtuple, planSlot, - &node->mt_epqstate, estate, - true, node->canSetTag, - false /* changingPart */ , NULL, NULL); + slot = ExecDelete(node, resultRelInfo, tupleid, oldtuple, + planSlot, &node->mt_epqstate, estate, + true, /* processReturning */ + node->canSetTag, + false, /* changingPart */ + NULL, NULL); break; default: elog(ERROR, "unknown operation"); @@ -2269,15 +2152,9 @@ ExecModifyTable(PlanState *pstate) * the work on next call. */ if (slot) - { - estate->es_result_relation_info = saved_resultRelInfo; return slot; - } } - /* Restore es_result_relation_info before exiting */ - estate->es_result_relation_info = saved_resultRelInfo; - /* * We're done, but fire AFTER STATEMENT triggers before exiting. */ @@ -2298,10 +2175,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ModifyTableState *mtstate; CmdType operation = node->operation; int nplans = list_length(node->plans); - ResultRelInfo *saved_resultRelInfo; ResultRelInfo *resultRelInfo; Plan *subplan; - ListCell *l; + ListCell *l, + *l1; int i; Relation rel; bool update_tuple_routing_needed = node->partColsUpdated; @@ -2322,13 +2199,36 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) mtstate->mt_done = false; mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans); - mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex; + mtstate->resultRelInfo = (ResultRelInfo *) + palloc(nplans * sizeof(ResultRelInfo)); mtstate->mt_scans = (TupleTableSlot **) palloc0(sizeof(TupleTableSlot *) * nplans); - /* If modifying a partitioned table, initialize the root table info */ - if (node->rootResultRelIndex >= 0) - mtstate->rootResultRelInfo = estate->es_root_result_relations + - node->rootResultRelIndex; + /*---------- + * Resolve the target relation. This is the same as: + * + * - the relation for which we will fire FOR STATEMENT triggers, + * - the relation into whose tuple format all captured transition tuples + * must be converted, and + * - the root partitioned table used for tuple routing. + * + * If it's a partitioned table, the root partition doesn't appear + * elsewhere in the plan and its RT index is given explicitly in + * node->rootRelation. Otherwise (i.e. table inheritance) the target + * relation is the first relation in the node->resultRelations list. + *---------- + */ + if (node->rootRelation > 0) + { + mtstate->rootResultRelInfo = makeNode(ResultRelInfo); + ExecInitResultRelation(estate, mtstate->rootResultRelInfo, + node->rootRelation); + } + else + { + mtstate->rootResultRelInfo = mtstate->resultRelInfo; + ExecInitResultRelation(estate, mtstate->resultRelInfo, + linitial_int(node->resultRelations)); + } mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans); mtstate->mt_nplans = nplans; @@ -2337,23 +2237,33 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam); mtstate->fireBSTriggers = true; + /* + * Build state for collecting transition tuples. This requires having a + * valid trigger query context, so skip it in explain-only mode. + */ + if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY)) + ExecSetupTransitionCaptureState(mtstate, estate); + /* * call ExecInitNode on each of the plans to be executed and save the * results into the array "mt_plans". This is also a convenient place to * verify that the proposed target relations are valid and open their - * indexes for insertion of new index entries. Note we *must* set - * estate->es_result_relation_info correctly while we initialize each - * sub-plan; external modules such as FDWs may depend on that (see - * contrib/postgres_fdw/postgres_fdw.c: postgresBeginDirectModify() as one - * example). + * indexes for insertion of new index entries. */ - saved_resultRelInfo = estate->es_result_relation_info; - resultRelInfo = mtstate->resultRelInfo; i = 0; - foreach(l, node->plans) + forboth(l, node->resultRelations, l1, node->plans) { - subplan = (Plan *) lfirst(l); + Index resultRelation = lfirst_int(l); + + subplan = (Plan *) lfirst(l1); + + /* + * This opens result relation and fills ResultRelInfo. (root relation + * was initialized already.) + */ + if (resultRelInfo != mtstate->rootResultRelInfo) + ExecInitResultRelation(estate, resultRelInfo, resultRelation); /* Initialize the usesFdwDirectModify flag */ resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i, @@ -2390,7 +2300,6 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) update_tuple_routing_needed = true; /* Now init the plan for this result rel */ - estate->es_result_relation_info = resultRelInfo; mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags); mtstate->mt_scans[i] = ExecInitExtraTupleSlot(mtstate->ps.state, ExecGetResultType(mtstate->mt_plans[i]), @@ -2410,14 +2319,29 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) eflags); } + /* + * If needed, initialize a map to convert tuples in the child format + * to the format of the table mentioned in the query (root relation). + * It's needed for update tuple routing, because the routing starts + * from the root relation. It's also needed for capturing transition + * tuples, because the transition tuple store can only store tuples in + * the root table format. + * + * For INSERT, the map is only initialized for a given partition when + * the partition itself is first initialized by ExecFindPartition(). + */ + if (update_tuple_routing_needed || + (mtstate->mt_transition_capture && + mtstate->operation != CMD_INSERT)) + resultRelInfo->ri_ChildToRootMap = + convert_tuples_by_name(RelationGetDescr(resultRelInfo->ri_RelationDesc), + RelationGetDescr(mtstate->rootResultRelInfo->ri_RelationDesc)); resultRelInfo++; i++; } - estate->es_result_relation_info = saved_resultRelInfo; - /* Get the target relation */ - rel = (getTargetResultRelInfo(mtstate))->ri_RelationDesc; + rel = mtstate->rootResultRelInfo->ri_RelationDesc; /* * If it's not a partitioned table after all, UPDATE tuple routing should @@ -2436,26 +2360,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) ExecSetupPartitionTupleRouting(estate, mtstate, rel); /* - * Build state for collecting transition tuples. This requires having a - * valid trigger query context, so skip it in explain-only mode. - */ - if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY)) - ExecSetupTransitionCaptureState(mtstate, estate); - - /* - * Construct mapping from each of the per-subplan partition attnos to the - * root attno. This is required when during update row movement the tuple - * descriptor of a source partition does not match the root partitioned - * table descriptor. In such a case we need to convert tuples to the root - * tuple descriptor, because the search for destination partition starts - * from the root. We'll also need a slot to store these converted tuples. - * We can skip this setup if it's not a partition key update. + * For update row movement we'll need a dedicated slot to store the tuples + * that have been converted from partition format to the root table + * format. */ if (update_tuple_routing_needed) - { - ExecSetupChildParentMapForSubplan(mtstate); mtstate->mt_root_tuple_slot = table_slot_create(rel, NULL); - } /* * Initialize any WITH CHECK OPTION constraints if needed. diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 9a7962518ee69..9a706df5f061d 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -1303,83 +1303,3 @@ ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent) parent->chgParam = bms_add_member(parent->chgParam, paramid); } } - - -/* - * ExecInitAlternativeSubPlan - * - * Initialize for execution of one of a set of alternative subplans. - */ -AlternativeSubPlanState * -ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent) -{ - AlternativeSubPlanState *asstate = makeNode(AlternativeSubPlanState); - double num_calls; - SubPlan *subplan1; - SubPlan *subplan2; - Cost cost1; - Cost cost2; - ListCell *lc; - - asstate->subplan = asplan; - - /* - * Initialize subplans. (Can we get away with only initializing the one - * we're going to use?) - */ - foreach(lc, asplan->subplans) - { - SubPlan *sp = lfirst_node(SubPlan, lc); - SubPlanState *sps = ExecInitSubPlan(sp, parent); - - asstate->subplans = lappend(asstate->subplans, sps); - parent->subPlan = lappend(parent->subPlan, sps); - } - - /* - * Select the one to be used. For this, we need an estimate of the number - * of executions of the subplan. We use the number of output rows - * expected from the parent plan node. This is a good estimate if we are - * in the parent's targetlist, and an underestimate (but probably not by - * more than a factor of 2) if we are in the qual. - */ - num_calls = parent->plan->plan_rows; - - /* - * The planner saved enough info so that we don't have to work very hard - * to estimate the total cost, given the number-of-calls estimate. - */ - Assert(list_length(asplan->subplans) == 2); - subplan1 = (SubPlan *) linitial(asplan->subplans); - subplan2 = (SubPlan *) lsecond(asplan->subplans); - - cost1 = subplan1->startup_cost + num_calls * subplan1->per_call_cost; - cost2 = subplan2->startup_cost + num_calls * subplan2->per_call_cost; - - if (cost1 < cost2) - asstate->active = 0; - else - asstate->active = 1; - - return asstate; -} - -/* - * ExecAlternativeSubPlan - * - * Execute one of a set of alternative subplans. - * - * Note: in future we might consider changing to different subplans on the - * fly, in case the original rowcount estimate turns out to be way off. - */ -Datum -ExecAlternativeSubPlan(AlternativeSubPlanState *node, - ExprContext *econtext, - bool *isNull) -{ - /* Just pass control to the active subplan */ - SubPlanState *activesp = list_nth_node(SubPlanState, - node->subplans, node->active); - - return ExecSubPlan(activesp, econtext, isNull); -} diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c index 43bed78a52995..4c026d37d0dd3 100644 --- a/src/backend/jit/llvm/llvmjit.c +++ b/src/backend/jit/llvm/llvmjit.c @@ -324,26 +324,53 @@ llvm_pg_func(LLVMModuleRef mod, const char *funcname) } /* - * Copy attributes from one function to another. + * Copy attributes from one function to another, for a specific index (an + * index can reference return value, function and parameter attributes). */ -void -llvm_copy_attributes(LLVMValueRef v_from, LLVMValueRef v_to) +static void +llvm_copy_attributes_at_index(LLVMValueRef v_from, LLVMValueRef v_to, uint32 index) { int num_attributes; - int attno; LLVMAttributeRef *attrs; - num_attributes = - LLVMGetAttributeCountAtIndex(v_from, LLVMAttributeFunctionIndex); + num_attributes = LLVMGetAttributeCountAtIndexPG(v_from, index); + + /* + * Not just for efficiency: LLVM <= 3.9 crashes when + * LLVMGetAttributesAtIndex() is called for an index with 0 attributes. + */ + if (num_attributes == 0) + return; attrs = palloc(sizeof(LLVMAttributeRef) * num_attributes); - LLVMGetAttributesAtIndex(v_from, LLVMAttributeFunctionIndex, attrs); + LLVMGetAttributesAtIndex(v_from, index, attrs); - for (attno = 0; attno < num_attributes; attno++) - { - LLVMAddAttributeAtIndex(v_to, LLVMAttributeFunctionIndex, - attrs[attno]); - } + for (int attno = 0; attno < num_attributes; attno++) + LLVMAddAttributeAtIndex(v_to, index, attrs[attno]); + + pfree(attrs); +} + +/* + * Copy all attributes from one function to another. I.e. function, return and + * parameters will be copied. + */ +void +llvm_copy_attributes(LLVMValueRef v_from, LLVMValueRef v_to) +{ + uint32 param_count; + + /* copy function attributes */ + llvm_copy_attributes_at_index(v_from, v_to, LLVMAttributeFunctionIndex); + + /* and the return value attributes */ + llvm_copy_attributes_at_index(v_from, v_to, LLVMAttributeReturnIndex); + + /* and each function parameter's attribute */ + param_count = LLVMCountParams(v_from); + + for (int paramidx = 1; paramidx <= param_count; paramidx++) + llvm_copy_attributes_at_index(v_from, v_to, paramidx); } /* diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c index cca5c117a0eef..eb1dea658cb25 100644 --- a/src/backend/jit/llvm/llvmjit_expr.c +++ b/src/backend/jit/llvm/llvmjit_expr.c @@ -1918,12 +1918,6 @@ llvm_compile_expr(ExprState *state) LLVMBuildBr(b, opblocks[opno + 1]); break; - case EEOP_ALTERNATIVE_SUBPLAN: - build_EvalXFunc(b, mod, "ExecEvalAlternativeSubPlan", - v_state, op, v_econtext); - LLVMBuildBr(b, opblocks[opno + 1]); - break; - case EEOP_AGG_STRICT_DESERIALIZE: case EEOP_AGG_DESERIALIZE: { diff --git a/src/backend/jit/llvm/llvmjit_types.c b/src/backend/jit/llvm/llvmjit_types.c index 0a93d5f6658cf..1ed3cafa2f23b 100644 --- a/src/backend/jit/llvm/llvmjit_types.c +++ b/src/backend/jit/llvm/llvmjit_types.c @@ -102,7 +102,6 @@ void *referenced_functions[] = ExecAggTransReparent, ExecEvalAggOrderedTransDatum, ExecEvalAggOrderedTransTuple, - ExecEvalAlternativeSubPlan, ExecEvalArrayCoerce, ExecEvalArrayExpr, ExecEvalConstraintCheck, diff --git a/src/backend/jit/llvm/llvmjit_wrap.cpp b/src/backend/jit/llvm/llvmjit_wrap.cpp index e8a7380325a8b..37c006a1ff50f 100644 --- a/src/backend/jit/llvm/llvmjit_wrap.cpp +++ b/src/backend/jit/llvm/llvmjit_wrap.cpp @@ -16,6 +16,13 @@ extern "C" #include "postgres.h" } +#include + +/* Avoid macro clash with LLVM's C++ headers */ +#undef Min + +#include +#include #include #include @@ -44,3 +51,28 @@ char *LLVMGetHostCPUFeatures(void) { return strdup(Features.getString().c_str()); } #endif + +/* + * Like LLVM's LLVMGetAttributeCountAtIndex(), works around a bug in LLVM 3.9. + * + * In LLVM <= 3.9, LLVMGetAttributeCountAtIndex() segfaults if there are no + * attributes at an index (fixed in LLVM commit ce9bb1097dc2). + */ +unsigned +LLVMGetAttributeCountAtIndexPG(LLVMValueRef F, uint32 Idx) +{ + /* + * This is more expensive, so only do when using a problematic LLVM + * version. + */ +#if LLVM_VERSION_MAJOR < 4 + if (!llvm::unwrap(F)->getAttributes().hasAttributes(Idx)) + return 0; +#endif + + /* + * There is no nice public API to determine the count nicely, so just + * always fall back to LLVM's C API. + */ + return LLVMGetAttributeCountAtIndex(F, Idx); +} diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 36565df4fc1e4..d132c5cb48bd7 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -1521,7 +1521,7 @@ pg_SSPI_recvauth(Port *port) (errmsg("could not load library \"%s\": error code %lu", "SECUR32.DLL", GetLastError()))); - _QuerySecurityContextToken = (QUERY_SECURITY_CONTEXT_TOKEN_FN) + _QuerySecurityContextToken = (QUERY_SECURITY_CONTEXT_TOKEN_FN) (pg_funcptr_t) GetProcAddress(secur32, "QuerySecurityContextToken"); if (_QuerySecurityContextToken == NULL) { @@ -2522,7 +2522,7 @@ InitializeLDAPConnection(Port *port, LDAP **ldap) ldap_unbind(*ldap); return STATUS_ERROR; } - _ldap_start_tls_sA = (__ldap_start_tls_sA) GetProcAddress(ldaphandle, "ldap_start_tls_sA"); + _ldap_start_tls_sA = (__ldap_start_tls_sA) (pg_funcptr_t) GetProcAddress(ldaphandle, "ldap_start_tls_sA"); if (_ldap_start_tls_sA == NULL) { ereport(LOG, diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c index 64427f185bb76..5a73302b7b9ea 100644 --- a/src/backend/libpq/be-secure-gssapi.c +++ b/src/backend/libpq/be-secure-gssapi.c @@ -209,7 +209,7 @@ be_gssapi_write(Port *port, void *ptr, size_t len) PqGSSSendConsumed += input.length; /* 4 network-order bytes of length, then payload */ - netlen = htonl(output.length); + netlen = pg_hton32(output.length); memcpy(PqGSSSendBuffer + PqGSSSendLength, &netlen, sizeof(uint32)); PqGSSSendLength += sizeof(uint32); @@ -323,7 +323,7 @@ be_gssapi_read(Port *port, void *ptr, size_t len) } /* Decode the packet length and check for overlength packet */ - input.length = ntohl(*(uint32 *) PqGSSRecvBuffer); + input.length = pg_ntoh32(*(uint32 *) PqGSSRecvBuffer); if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)) ereport(FATAL, @@ -509,7 +509,7 @@ secure_open_gssapi(Port *port) /* * Get the length for this packet from the length header. */ - input.length = ntohl(*(uint32 *) PqGSSRecvBuffer); + input.length = pg_ntoh32(*(uint32 *) PqGSSRecvBuffer); /* Done with the length, reset our buffer */ PqGSSRecvLength = 0; @@ -567,7 +567,7 @@ secure_open_gssapi(Port *port) */ if (output.length > 0) { - uint32 netlen = htonl(output.length); + uint32 netlen = pg_hton32(output.length); if (output.length > PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32)) ereport(FATAL, diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index 9f106653f3f88..4c86fb608748b 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -1730,29 +1730,25 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, *err_msg = "clientcert can only be configured for \"hostssl\" rows"; return false; } - if (strcmp(val, "1") == 0 - || strcmp(val, "verify-ca") == 0) - { - hbaline->clientcert = clientCertCA; - } - else if (strcmp(val, "verify-full") == 0) + + if (strcmp(val, "verify-full") == 0) { hbaline->clientcert = clientCertFull; } - else if (strcmp(val, "0") == 0 - || strcmp(val, "no-verify") == 0) + else if (strcmp(val, "verify-ca") == 0) { if (hbaline->auth_method == uaCert) { ereport(elevel, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("clientcert can not be set to \"no-verify\" when using \"cert\" authentication"), + errmsg("clientcert only accepts \"verify-full\" when using \"cert\" authentication"), errcontext("line %d of configuration file \"%s\"", line_num, HbaFileName))); - *err_msg = "clientcert can not be set to \"no-verify\" when using \"cert\" authentication"; + *err_msg = "clientcert can only be set to \"verify-full\" when using \"cert\" authentication"; return false; } - hbaline->clientcert = clientCertOff; + + hbaline->clientcert = clientCertCA; } else { diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 0409a40b82a83..2b4d7654cc715 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -91,7 +91,6 @@ _copyPlannedStmt(const PlannedStmt *from) COPY_NODE_FIELD(planTree); COPY_NODE_FIELD(rtable); COPY_NODE_FIELD(resultRelations); - COPY_NODE_FIELD(rootResultRelations); COPY_NODE_FIELD(appendRelations); COPY_NODE_FIELD(subplans); COPY_BITMAPSET_FIELD(rewindPlanIDs); @@ -207,8 +206,6 @@ _copyModifyTable(const ModifyTable *from) COPY_SCALAR_FIELD(rootRelation); COPY_SCALAR_FIELD(partColsUpdated); COPY_NODE_FIELD(resultRelations); - COPY_SCALAR_FIELD(resultRelIndex); - COPY_SCALAR_FIELD(rootResultRelIndex); COPY_NODE_FIELD(plans); COPY_NODE_FIELD(withCheckOptionLists); COPY_NODE_FIELD(returningLists); @@ -761,6 +758,7 @@ _copyForeignScan(const ForeignScan *from) COPY_NODE_FIELD(fdw_recheck_quals); COPY_BITMAPSET_FIELD(fs_relids); COPY_SCALAR_FIELD(fsSystemCol); + COPY_SCALAR_FIELD(resultRelation); return newnode; } diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c index 80fa8c84e49ae..efa44342c4b8a 100644 --- a/src/backend/nodes/list.c +++ b/src/backend/nodes/list.c @@ -327,7 +327,7 @@ lappend(List *list, void *datum) else new_tail_cell(list); - lfirst(list_tail(list)) = datum; + llast(list) = datum; check_list_invariants(list); return list; } @@ -345,7 +345,7 @@ lappend_int(List *list, int datum) else new_tail_cell(list); - lfirst_int(list_tail(list)) = datum; + llast_int(list) = datum; check_list_invariants(list); return list; } @@ -363,7 +363,7 @@ lappend_oid(List *list, Oid datum) else new_tail_cell(list); - lfirst_oid(list_tail(list)) = datum; + llast_oid(list) = datum; check_list_invariants(list); return list; } @@ -459,7 +459,7 @@ lcons(void *datum, List *list) else new_head_cell(list); - lfirst(list_head(list)) = datum; + linitial(list) = datum; check_list_invariants(list); return list; } @@ -477,7 +477,7 @@ lcons_int(int datum, List *list) else new_head_cell(list); - lfirst_int(list_head(list)) = datum; + linitial_int(list) = datum; check_list_invariants(list); return list; } @@ -495,7 +495,7 @@ lcons_oid(Oid datum, List *list) else new_head_cell(list); - lfirst_oid(list_head(list)) = datum; + linitial_oid(list) = datum; check_list_invariants(list); return list; } diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index 9ce8f43385ec8..1dc873ed255f8 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -441,7 +441,7 @@ exprTypmod(const Node *expr) typmod = exprTypmod((Node *) linitial(cexpr->args)); if (typmod < 0) return -1; /* no point in trying harder */ - for_each_cell(arg, cexpr->args, list_second_cell(cexpr->args)) + for_each_from(arg, cexpr->args, 1) { Node *e = (Node *) lfirst(arg); @@ -469,7 +469,7 @@ exprTypmod(const Node *expr) typmod = exprTypmod((Node *) linitial(mexpr->args)); if (typmod < 0) return -1; /* no point in trying harder */ - for_each_cell(arg, mexpr->args, list_second_cell(mexpr->args)) + for_each_from(arg, mexpr->args, 1) { Node *e = (Node *) lfirst(arg); diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index e2f177515dac3..08a049232e0a7 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -309,7 +309,6 @@ _outPlannedStmt(StringInfo str, const PlannedStmt *node) WRITE_NODE_FIELD(planTree); WRITE_NODE_FIELD(rtable); WRITE_NODE_FIELD(resultRelations); - WRITE_NODE_FIELD(rootResultRelations); WRITE_NODE_FIELD(appendRelations); WRITE_NODE_FIELD(subplans); WRITE_BITMAPSET_FIELD(rewindPlanIDs); @@ -408,8 +407,6 @@ _outModifyTable(StringInfo str, const ModifyTable *node) WRITE_UINT_FIELD(rootRelation); WRITE_BOOL_FIELD(partColsUpdated); WRITE_NODE_FIELD(resultRelations); - WRITE_INT_FIELD(resultRelIndex); - WRITE_INT_FIELD(rootResultRelIndex); WRITE_NODE_FIELD(plans); WRITE_NODE_FIELD(withCheckOptionLists); WRITE_NODE_FIELD(returningLists); @@ -698,6 +695,7 @@ _outForeignScan(StringInfo str, const ForeignScan *node) WRITE_NODE_FIELD(fdw_recheck_quals); WRITE_BITMAPSET_FIELD(fs_relids); WRITE_BOOL_FIELD(fsSystemCol); + WRITE_INT_FIELD(resultRelation); } static void @@ -2194,7 +2192,6 @@ _outPlannerGlobal(StringInfo str, const PlannerGlobal *node) WRITE_NODE_FIELD(finalrtable); WRITE_NODE_FIELD(finalrowmarks); WRITE_NODE_FIELD(resultRelations); - WRITE_NODE_FIELD(rootResultRelations); WRITE_NODE_FIELD(appendRelations); WRITE_NODE_FIELD(relationOids); WRITE_NODE_FIELD(invalItems); @@ -2254,6 +2251,7 @@ _outPlannerInfo(StringInfo str, const PlannerInfo *node) WRITE_BOOL_FIELD(hasLateralRTEs); WRITE_BOOL_FIELD(hasHavingQual); WRITE_BOOL_FIELD(hasPseudoConstantQuals); + WRITE_BOOL_FIELD(hasAlternativeSubPlans); WRITE_BOOL_FIELD(hasRecursion); WRITE_INT_FIELD(wt_param_id); WRITE_BITMAPSET_FIELD(curOuterRels); diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c index bce0c7e72b2c5..c05f04a259c1b 100644 --- a/src/backend/nodes/params.c +++ b/src/backend/nodes/params.c @@ -414,9 +414,9 @@ ParamsErrorCallback(void *arg) return; if (data->portalName && data->portalName[0] != '\0') - errcontext("extended query \"%s\" with parameters: %s", + errcontext("portal \"%s\" with parameters: %s", data->portalName, data->params->paramValuesStr); else - errcontext("extended query with parameters: %s", + errcontext("unnamed portal with parameters: %s", data->params->paramValuesStr); } diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c index 42476724d88f0..970a2d438402a 100644 --- a/src/backend/nodes/print.c +++ b/src/backend/nodes/print.c @@ -394,7 +394,6 @@ print_expr(const Node *expr, const List *rtable) } else { - /* we print prefix and postfix ops the same... */ printf("%s ", ((opname != NULL) ? opname : "(invalid operator)")); print_expr(get_leftop((const Expr *) e), rtable); } diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index 42050ab71955a..ab7b535caaeaa 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -1542,7 +1542,6 @@ _readPlannedStmt(void) READ_NODE_FIELD(planTree); READ_NODE_FIELD(rtable); READ_NODE_FIELD(resultRelations); - READ_NODE_FIELD(rootResultRelations); READ_NODE_FIELD(appendRelations); READ_NODE_FIELD(subplans); READ_BITMAPSET_FIELD(rewindPlanIDs); @@ -1639,8 +1638,6 @@ _readModifyTable(void) READ_UINT_FIELD(rootRelation); READ_BOOL_FIELD(partColsUpdated); READ_NODE_FIELD(resultRelations); - READ_INT_FIELD(resultRelIndex); - READ_INT_FIELD(rootResultRelIndex); READ_NODE_FIELD(plans); READ_NODE_FIELD(withCheckOptionLists); READ_NODE_FIELD(returningLists); @@ -2017,6 +2014,7 @@ _readForeignScan(void) READ_NODE_FIELD(fdw_recheck_quals); READ_BITMAPSET_FIELD(fs_relids); READ_BOOL_FIELD(fsSystemCol); + READ_INT_FIELD(resultRelation); READ_DONE(); } diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index cd3716d494f03..733f7ea543289 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -107,6 +107,13 @@ */ #define APPEND_CPU_COST_MULTIPLIER 0.5 +/* + * Maximum value for row estimates. We cap row estimates to this to help + * ensure that costs based on these estimates remain within the range of what + * double can represent. add_path() wouldn't act sanely given infinite or NaN + * cost values. + */ +#define MAXIMUM_ROWCOUNT 1e100 double seq_page_cost = DEFAULT_SEQ_PAGE_COST; double random_page_cost = DEFAULT_RANDOM_PAGE_COST; @@ -189,11 +196,14 @@ double clamp_row_est(double nrows) { /* - * Force estimate to be at least one row, to make explain output look - * better and to avoid possible divide-by-zero when interpolating costs. - * Make it an integer, too. + * Avoid infinite and NaN row estimates. Costs derived from such values + * are going to be useless. Also force the estimate to be at least one + * row, to make explain output look better and to avoid possible + * divide-by-zero when interpolating costs. Make it an integer, too. */ - if (nrows <= 1.0) + if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows)) + nrows = MAXIMUM_ROWCOUNT; + else if (nrows <= 1.0) nrows = 1.0; else nrows = rint(nrows); @@ -2737,12 +2747,11 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path, QualCost restrict_qual_cost; double ntuples; - /* Protect some assumptions below that rowcounts aren't zero or NaN */ - if (outer_path_rows <= 0 || isnan(outer_path_rows)) + /* Protect some assumptions below that rowcounts aren't zero */ + if (outer_path_rows <= 0) outer_path_rows = 1; - if (inner_path_rows <= 0 || isnan(inner_path_rows)) + if (inner_path_rows <= 0) inner_path_rows = 1; - /* Mark the path with the correct row estimate */ if (path->path.param_info) path->path.rows = path->path.param_info->ppi_rows; @@ -2952,10 +2961,10 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace, innerendsel; Path sort_path; /* dummy for result of cost_sort */ - /* Protect some assumptions below that rowcounts aren't zero or NaN */ - if (outer_path_rows <= 0 || isnan(outer_path_rows)) + /* Protect some assumptions below that rowcounts aren't zero */ + if (outer_path_rows <= 0) outer_path_rows = 1; - if (inner_path_rows <= 0 || isnan(inner_path_rows)) + if (inner_path_rows <= 0) inner_path_rows = 1; /* @@ -3185,8 +3194,8 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, rescannedtuples; double rescanratio; - /* Protect some assumptions below that rowcounts aren't zero or NaN */ - if (inner_path_rows <= 0 || isnan(inner_path_rows)) + /* Protect some assumptions below that rowcounts aren't zero */ + if (inner_path_rows <= 0) inner_path_rows = 1; /* Mark the path with the correct row estimate */ diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index b68a5a0ec7171..690b753369e8c 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -137,6 +137,7 @@ process_equivalence(PlannerInfo *root, EquivalenceMember *em1, *em2; ListCell *lc1; + int ec2_idx; /* Should not already be marked as having generated an eclass */ Assert(restrictinfo->left_ec == NULL); @@ -258,6 +259,7 @@ process_equivalence(PlannerInfo *root, */ ec1 = ec2 = NULL; em1 = em2 = NULL; + ec2_idx = -1; foreach(lc1, root->eq_classes) { EquivalenceClass *cur_ec = (EquivalenceClass *) lfirst(lc1); @@ -311,6 +313,7 @@ process_equivalence(PlannerInfo *root, equal(item2, cur_em->em_expr)) { ec2 = cur_ec; + ec2_idx = foreach_current_index(lc1); em2 = cur_em; if (ec1) break; @@ -371,7 +374,7 @@ process_equivalence(PlannerInfo *root, ec1->ec_max_security = Max(ec1->ec_max_security, ec2->ec_max_security); ec2->ec_merged = ec1; - root->eq_classes = list_delete_ptr(root->eq_classes, ec2); + root->eq_classes = list_delete_nth_cell(root->eq_classes, ec2_idx); /* just to avoid debugging confusion w/ dangling pointers: */ ec2->ec_members = NIL; ec2->ec_sources = NIL; @@ -634,12 +637,6 @@ get_eclass_for_sort_expr(PlannerInfo *root, */ expr = canonicalize_ec_expression(expr, opcintype, collation); - /* - * Get the precise set of nullable relids appearing in the expression. - */ - expr_relids = pull_varnos((Node *) expr); - nullable_relids = bms_intersect(nullable_relids, expr_relids); - /* * Scan through the existing EquivalenceClasses for a match */ @@ -716,6 +713,12 @@ get_eclass_for_sort_expr(PlannerInfo *root, if (newec->ec_has_volatile && sortref == 0) /* should not happen */ elog(ERROR, "volatile EquivalenceClass has no sortref"); + /* + * Get the precise set of nullable relids appearing in the expression. + */ + expr_relids = pull_varnos((Node *) expr); + nullable_relids = bms_intersect(nullable_relids, expr_relids); + newem = add_eq_member(newec, copyObject(expr), expr_relids, nullable_relids, false, opcintype); @@ -1171,9 +1174,9 @@ generate_join_implied_equalities(PlannerInfo *root, } /* - * Get all eclasses in common between inner_rel's relids and outer_relids + * Get all eclasses that mention both inner and outer sides of the join */ - matching_ecs = get_common_eclass_indexes(root, inner_rel->relids, + matching_ecs = get_common_eclass_indexes(root, nominal_inner_relids, outer_relids); i = -1; @@ -1964,6 +1967,7 @@ reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo) bool matchleft; bool matchright; ListCell *lc2; + int coal_idx = -1; /* Ignore EC unless it contains pseudoconstants */ if (!cur_ec->ec_has_const) @@ -2008,6 +2012,7 @@ reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo) if (equal(leftvar, cfirst) && equal(rightvar, csecond)) { + coal_idx = foreach_current_index(lc2); match = true; break; } @@ -2072,7 +2077,7 @@ reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo) */ if (matchleft && matchright) { - cur_ec->ec_members = list_delete_ptr(cur_ec->ec_members, coal_em); + cur_ec->ec_members = list_delete_nth_cell(cur_ec->ec_members, coal_idx); return true; } @@ -2380,6 +2385,7 @@ add_child_join_rel_equivalences(PlannerInfo *root, Relids top_parent_relids = child_joinrel->top_parent_relids; Relids child_relids = child_joinrel->relids; Bitmapset *matching_ecs; + MemoryContext oldcontext; int i; Assert(IS_JOIN_REL(child_joinrel) && IS_JOIN_REL(parent_joinrel)); @@ -2387,6 +2393,16 @@ add_child_join_rel_equivalences(PlannerInfo *root, /* We need consider only ECs that mention the parent joinrel */ matching_ecs = get_eclass_indexes_for_relids(root, top_parent_relids); + /* + * If we're being called during GEQO join planning, we still have to + * create any new EC members in the main planner context, to avoid having + * a corrupt EC data structure after the GEQO context is reset. This is + * problematic since we'll leak memory across repeated GEQO cycles. For + * now, though, bloat is better than crash. If it becomes a real issue + * we'll have to do something to avoid generating duplicate EC members. + */ + oldcontext = MemoryContextSwitchTo(root->planner_cxt); + i = -1; while ((i = bms_next_member(matching_ecs, i)) >= 0) { @@ -2486,6 +2502,8 @@ add_child_join_rel_equivalences(PlannerInfo *root, } } } + + MemoryContextSwitchTo(oldcontext); } diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index db54a6ba2ea92..4a35903b29f74 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -1005,8 +1005,8 @@ sort_inner_and_outer(PlannerInfo *root, /* Make a pathkey list with this guy first */ if (l != list_head(all_pathkeys)) outerkeys = lcons(front_pathkey, - list_delete_ptr(list_copy(all_pathkeys), - front_pathkey)); + list_delete_nth_cell(list_copy(all_pathkeys), + foreach_current_index(l))); else outerkeys = all_pathkeys; /* no work at first one... */ diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 99278eed93194..94280a730c4d9 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -2261,7 +2261,7 @@ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path) { bool is_first_sort = ((RollupData *) linitial(rollups))->is_hashed; - for_each_cell(lc, rollups, list_second_cell(rollups)) + for_each_from(lc, rollups, 1) { RollupData *rollup = lfirst(lc); AttrNumber *new_grpColIdx; @@ -5530,7 +5530,11 @@ make_foreignscan(List *qptlist, plan->lefttree = outer_plan; plan->righttree = NULL; node->scan.scanrelid = scanrelid; + + /* these may be overridden by the FDW's PlanDirectModify callback. */ node->operation = CMD_SELECT; + node->resultRelation = 0; + /* fs_server will be filled in by create_foreignscan_plan */ node->fs_server = InvalidOid; node->fdw_exprs = fdw_exprs; @@ -6808,8 +6812,6 @@ make_modifytable(PlannerInfo *root, node->rootRelation = rootRelation; node->partColsUpdated = partColsUpdated; node->resultRelations = resultRelations; - node->resultRelIndex = -1; /* will be set correctly in setrefs.c */ - node->rootResultRelIndex = -1; /* will be set correctly in setrefs.c */ node->plans = subplans; if (!onconflict) { diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 139c5e3dc245f..986d7a52e32ca 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -305,7 +305,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, glob->finalrtable = NIL; glob->finalrowmarks = NIL; glob->resultRelations = NIL; - glob->rootResultRelations = NIL; glob->appendRelations = NIL; glob->relationOids = NIL; glob->invalItems = NIL; @@ -493,7 +492,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, Assert(glob->finalrtable == NIL); Assert(glob->finalrowmarks == NIL); Assert(glob->resultRelations == NIL); - Assert(glob->rootResultRelations == NIL); Assert(glob->appendRelations == NIL); top_plan = set_plan_references(root, top_plan); /* ... and the subplans (both regular subplans and initplans) */ @@ -520,7 +518,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, result->planTree = top_plan; result->rtable = glob->finalrtable; result->resultRelations = glob->resultRelations; - result->rootResultRelations = glob->rootResultRelations; result->appendRelations = glob->appendRelations; result->subplans = glob->subplans; result->rewindPlanIDs = glob->rewindPlanIDs; @@ -629,6 +626,8 @@ subquery_planner(PlannerGlobal *glob, Query *parse, root->minmax_aggs = NIL; root->qual_security_level = 0; root->inhTargetKind = INHKIND_NONE; + root->hasPseudoConstantQuals = false; + root->hasAlternativeSubPlans = false; root->hasRecursion = hasRecursion; if (hasRecursion) root->wt_param_id = assign_special_exec_param(root); @@ -759,9 +758,6 @@ subquery_planner(PlannerGlobal *glob, Query *parse, */ root->hasHavingQual = (parse->havingQual != NULL); - /* Clear this flag; might get set in distribute_qual_to_rels */ - root->hasPseudoConstantQuals = false; - /* * Do expression preprocessing on targetlist and quals, as well as other * random expressions in the querytree. Note that we do not need to @@ -4431,7 +4427,7 @@ consider_groupingsets_paths(PlannerInfo *root, * below, must use the same condition. */ i = 0; - for_each_cell(lc, gd->rollups, list_second_cell(gd->rollups)) + for_each_from(lc, gd->rollups, 1) { RollupData *rollup = lfirst_node(RollupData, lc); @@ -4465,7 +4461,7 @@ consider_groupingsets_paths(PlannerInfo *root, rollups = list_make1(linitial(gd->rollups)); i = 0; - for_each_cell(lc, gd->rollups, list_second_cell(gd->rollups)) + for_each_from(lc, gd->rollups, 1) { RollupData *rollup = lfirst_node(RollupData, lc); @@ -4582,14 +4578,17 @@ create_window_paths(PlannerInfo *root, /* * Consider computing window functions starting from the existing * cheapest-total path (which will likely require a sort) as well as any - * existing paths that satisfy root->window_pathkeys (which won't). + * existing paths that satisfy or partially satisfy root->window_pathkeys. */ foreach(lc, input_rel->pathlist) { Path *path = (Path *) lfirst(lc); + int presorted_keys; if (path == input_rel->cheapest_total_path || - pathkeys_contained_in(root->window_pathkeys, path->pathkeys)) + pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys, + &presorted_keys) || + presorted_keys > 0) create_one_window_path(root, window_rel, path, @@ -4664,18 +4663,42 @@ create_one_window_path(PlannerInfo *root, { WindowClause *wc = lfirst_node(WindowClause, l); List *window_pathkeys; + int presorted_keys; + bool is_sorted; window_pathkeys = make_pathkeys_for_window(root, wc, root->processed_tlist); + is_sorted = pathkeys_count_contained_in(window_pathkeys, + path->pathkeys, + &presorted_keys); + /* Sort if necessary */ - if (!pathkeys_contained_in(window_pathkeys, path->pathkeys)) + if (!is_sorted) { - path = (Path *) create_sort_path(root, window_rel, - path, - window_pathkeys, - -1.0); + /* + * No presorted keys or incremental sort disabled, just perform a + * complete sort. + */ + if (presorted_keys == 0 || !enable_incremental_sort) + path = (Path *) create_sort_path(root, window_rel, + path, + window_pathkeys, + -1.0); + else + { + /* + * Since we have presorted keys and incremental sort is + * enabled, just use incremental sort. + */ + path = (Path *) create_incremental_sort_path(root, + window_rel, + path, + window_pathkeys, + presorted_keys, + -1.0); + } } if (lnext(activeWindows, l)) diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index baefe0e946173..8b43371425940 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -49,6 +49,7 @@ typedef struct { PlannerInfo *root; int rtoffset; + double num_exec; } fix_scan_expr_context; typedef struct @@ -58,6 +59,7 @@ typedef struct indexed_tlist *inner_itlist; Index acceptable_rel; int rtoffset; + double num_exec; } fix_join_expr_context; typedef struct @@ -66,8 +68,28 @@ typedef struct indexed_tlist *subplan_itlist; Index newvarno; int rtoffset; + double num_exec; } fix_upper_expr_context; +/* + * Selecting the best alternative in an AlternativeSubPlan expression requires + * estimating how many times that expression will be evaluated. For an + * expression in a plan node's targetlist, the plan's estimated number of + * output rows is clearly what to use, but for an expression in a qual it's + * far less clear. Since AlternativeSubPlans aren't heavily used, we don't + * want to expend a lot of cycles making such estimates. What we use is twice + * the number of output rows. That's not entirely unfounded: we know that + * clause_selectivity() would fall back to a default selectivity estimate + * of 0.5 for any SubPlan, so if the qual containing the SubPlan is the last + * to be applied (which it likely would be, thanks to order_qual_clauses()), + * this matches what we could have estimated in a far more laborious fashion. + * Obviously there are many other scenarios, but it's probably not worth the + * trouble to try to improve on this estimate, especially not when we don't + * have a better estimate for the selectivity of the SubPlan qual itself. + */ +#define NUM_EXEC_TLIST(parentplan) ((parentplan)->plan_rows) +#define NUM_EXEC_QUAL(parentplan) ((parentplan)->plan_rows * 2.0) + /* * Check if a Const node is a regclass value. We accept plain OID too, * since a regclass Const will get folded to that type if it's an argument @@ -79,8 +101,8 @@ typedef struct (((con)->consttype == REGCLASSOID || (con)->consttype == OIDOID) && \ !(con)->constisnull) -#define fix_scan_list(root, lst, rtoffset) \ - ((List *) fix_scan_expr(root, (Node *) (lst), rtoffset)) +#define fix_scan_list(root, lst, rtoffset, num_exec) \ + ((List *) fix_scan_expr(root, (Node *) (lst), rtoffset, num_exec)) static void add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing); static void flatten_unplanned_rtes(PlannerGlobal *glob, RangeTblEntry *rte); @@ -109,7 +131,8 @@ static Plan *set_mergeappend_references(PlannerInfo *root, int rtoffset); static void set_hash_references(PlannerInfo *root, Plan *plan, int rtoffset); static Relids offset_relid_set(Relids relids, int rtoffset); -static Node *fix_scan_expr(PlannerInfo *root, Node *node, int rtoffset); +static Node *fix_scan_expr(PlannerInfo *root, Node *node, + int rtoffset, double num_exec); static Node *fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context); static bool fix_scan_expr_walker(Node *node, fix_scan_expr_context *context); static void set_join_references(PlannerInfo *root, Join *join, int rtoffset); @@ -133,14 +156,15 @@ static List *fix_join_expr(PlannerInfo *root, List *clauses, indexed_tlist *outer_itlist, indexed_tlist *inner_itlist, - Index acceptable_rel, int rtoffset); + Index acceptable_rel, + int rtoffset, double num_exec); static Node *fix_join_expr_mutator(Node *node, fix_join_expr_context *context); static Node *fix_upper_expr(PlannerInfo *root, Node *node, indexed_tlist *subplan_itlist, Index newvarno, - int rtoffset); + int rtoffset, double num_exec); static Node *fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context); static List *set_returning_clause_references(PlannerInfo *root, @@ -177,17 +201,20 @@ static List *set_returning_clause_references(PlannerInfo *root, * 5. PARAM_MULTIEXPR Params are replaced by regular PARAM_EXEC Params, * now that we have finished planning all MULTIEXPR subplans. * - * 6. We compute regproc OIDs for operators (ie, we look up the function + * 6. AlternativeSubPlan expressions are replaced by just one of their + * alternatives, using an estimate of how many times they'll be executed. + * + * 7. We compute regproc OIDs for operators (ie, we look up the function * that implements each op). * - * 7. We create lists of specific objects that the plan depends on. + * 8. We create lists of specific objects that the plan depends on. * This will be used by plancache.c to drive invalidation of cached plans. * Relation dependencies are represented by OIDs, and everything else by * PlanInvalItems (this distinction is motivated by the shared-inval APIs). * Currently, relations, user-defined functions, and domains are the only * types of objects that are explicitly tracked this way. * - * 8. We assign every plan node in the tree a unique ID. + * 9. We assign every plan node in the tree a unique ID. * * We also perform one final optimization step, which is to delete * SubqueryScan, Append, and MergeAppend plan nodes that aren't doing @@ -490,9 +517,11 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scanrelid += rtoffset; splan->plan.targetlist = - fix_scan_list(root, splan->plan.targetlist, rtoffset); + fix_scan_list(root, splan->plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->plan.qual = - fix_scan_list(root, splan->plan.qual, rtoffset); + fix_scan_list(root, splan->plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); } break; case T_SampleScan: @@ -501,11 +530,14 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); splan->tablesample = (TableSampleClause *) - fix_scan_expr(root, (Node *) splan->tablesample, rtoffset); + fix_scan_expr(root, (Node *) splan->tablesample, + rtoffset, 1); } break; case T_IndexScan: @@ -514,17 +546,23 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); splan->indexqual = - fix_scan_list(root, splan->indexqual, rtoffset); + fix_scan_list(root, splan->indexqual, + rtoffset, 1); splan->indexqualorig = - fix_scan_list(root, splan->indexqualorig, rtoffset); + fix_scan_list(root, splan->indexqualorig, + rtoffset, NUM_EXEC_QUAL(plan)); splan->indexorderby = - fix_scan_list(root, splan->indexorderby, rtoffset); + fix_scan_list(root, splan->indexorderby, + rtoffset, 1); splan->indexorderbyorig = - fix_scan_list(root, splan->indexorderbyorig, rtoffset); + fix_scan_list(root, splan->indexorderbyorig, + rtoffset, NUM_EXEC_QUAL(plan)); } break; case T_IndexOnlyScan: @@ -543,9 +581,10 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) Assert(splan->scan.plan.targetlist == NIL); Assert(splan->scan.plan.qual == NIL); splan->indexqual = - fix_scan_list(root, splan->indexqual, rtoffset); + fix_scan_list(root, splan->indexqual, rtoffset, 1); splan->indexqualorig = - fix_scan_list(root, splan->indexqualorig, rtoffset); + fix_scan_list(root, splan->indexqualorig, + rtoffset, NUM_EXEC_QUAL(plan)); } break; case T_BitmapHeapScan: @@ -554,11 +593,14 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); splan->bitmapqualorig = - fix_scan_list(root, splan->bitmapqualorig, rtoffset); + fix_scan_list(root, splan->bitmapqualorig, + rtoffset, NUM_EXEC_QUAL(plan)); } break; case T_TidScan: @@ -567,11 +609,14 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); splan->tidquals = - fix_scan_list(root, splan->tidquals, rtoffset); + fix_scan_list(root, splan->tidquals, + rtoffset, 1); } break; case T_SubqueryScan: @@ -585,11 +630,13 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); splan->functions = - fix_scan_list(root, splan->functions, rtoffset); + fix_scan_list(root, splan->functions, rtoffset, 1); } break; case T_TableFuncScan: @@ -598,11 +645,14 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); splan->tablefunc = (TableFunc *) - fix_scan_expr(root, (Node *) splan->tablefunc, rtoffset); + fix_scan_expr(root, (Node *) splan->tablefunc, + rtoffset, 1); } break; case T_ValuesScan: @@ -611,11 +661,14 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); splan->values_lists = - fix_scan_list(root, splan->values_lists, rtoffset); + fix_scan_list(root, splan->values_lists, + rtoffset, 1); } break; case T_CteScan: @@ -624,9 +677,11 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); } break; case T_NamedTuplestoreScan: @@ -635,9 +690,11 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); } break; case T_WorkTableScan: @@ -646,9 +703,11 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) splan->scan.scanrelid += rtoffset; splan->scan.plan.targetlist = - fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, splan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->scan.plan.qual = - fix_scan_list(root, splan->scan.plan.qual, rtoffset); + fix_scan_list(root, splan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); } break; case T_ForeignScan: @@ -732,9 +791,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) Assert(splan->plan.qual == NIL); splan->limitOffset = - fix_scan_expr(root, splan->limitOffset, rtoffset); + fix_scan_expr(root, splan->limitOffset, rtoffset, 1); splan->limitCount = - fix_scan_expr(root, splan->limitCount, rtoffset); + fix_scan_expr(root, splan->limitCount, rtoffset, 1); } break; case T_Agg: @@ -775,9 +834,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) * variable refs, so fix_scan_expr works for them. */ wplan->startOffset = - fix_scan_expr(root, wplan->startOffset, rtoffset); + fix_scan_expr(root, wplan->startOffset, rtoffset, 1); wplan->endOffset = - fix_scan_expr(root, wplan->endOffset, rtoffset); + fix_scan_expr(root, wplan->endOffset, rtoffset, 1); } break; case T_Result: @@ -793,13 +852,15 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) else { splan->plan.targetlist = - fix_scan_list(root, splan->plan.targetlist, rtoffset); + fix_scan_list(root, splan->plan.targetlist, + rtoffset, NUM_EXEC_TLIST(plan)); splan->plan.qual = - fix_scan_list(root, splan->plan.qual, rtoffset); + fix_scan_list(root, splan->plan.qual, + rtoffset, NUM_EXEC_QUAL(plan)); } /* resconstantqual can't contain any subplan variable refs */ splan->resconstantqual = - fix_scan_expr(root, splan->resconstantqual, rtoffset); + fix_scan_expr(root, splan->resconstantqual, rtoffset, 1); } break; case T_ProjectSet: @@ -813,7 +874,8 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) Assert(splan->plan.qual == NIL); splan->withCheckOptionLists = - fix_scan_list(root, splan->withCheckOptionLists, rtoffset); + fix_scan_list(root, splan->withCheckOptionLists, + rtoffset, 1); if (splan->returningLists) { @@ -874,18 +936,18 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) fix_join_expr(root, splan->onConflictSet, NULL, itlist, linitial_int(splan->resultRelations), - rtoffset); + rtoffset, NUM_EXEC_QUAL(plan)); splan->onConflictWhere = (Node *) fix_join_expr(root, (List *) splan->onConflictWhere, NULL, itlist, linitial_int(splan->resultRelations), - rtoffset); + rtoffset, NUM_EXEC_QUAL(plan)); pfree(itlist); splan->exclRelTlist = - fix_scan_list(root, splan->exclRelTlist, rtoffset); + fix_scan_list(root, splan->exclRelTlist, rtoffset, 1); } splan->nominalRelation += rtoffset; @@ -913,26 +975,15 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) /* * Append this ModifyTable node's final result relation RT - * index(es) to the global list for the plan, and set its - * resultRelIndex to reflect their starting position in the - * global list. + * index(es) to the global list for the plan. */ - splan->resultRelIndex = list_length(root->glob->resultRelations); root->glob->resultRelations = list_concat(root->glob->resultRelations, splan->resultRelations); - - /* - * If the main target relation is a partitioned table, also - * add the partition root's RT index to rootResultRelations, - * and remember its index in that list in rootResultRelIndex. - */ if (splan->rootRelation) { - splan->rootResultRelIndex = - list_length(root->glob->rootResultRelations); - root->glob->rootResultRelations = - lappend_int(root->glob->rootResultRelations, + root->glob->resultRelations = + lappend_int(root->glob->resultRelations, splan->rootRelation); } } @@ -1026,19 +1077,24 @@ set_indexonlyscan_references(PlannerInfo *root, (Node *) plan->scan.plan.targetlist, index_itlist, INDEX_VAR, - rtoffset); + rtoffset, + NUM_EXEC_TLIST((Plan *) plan)); plan->scan.plan.qual = (List *) fix_upper_expr(root, (Node *) plan->scan.plan.qual, index_itlist, INDEX_VAR, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) plan)); /* indexqual is already transformed to reference index columns */ - plan->indexqual = fix_scan_list(root, plan->indexqual, rtoffset); + plan->indexqual = fix_scan_list(root, plan->indexqual, + rtoffset, 1); /* indexorderby is already transformed to reference index columns */ - plan->indexorderby = fix_scan_list(root, plan->indexorderby, rtoffset); + plan->indexorderby = fix_scan_list(root, plan->indexorderby, + rtoffset, 1); /* indextlist must NOT be transformed to reference index columns */ - plan->indextlist = fix_scan_list(root, plan->indextlist, rtoffset); + plan->indextlist = fix_scan_list(root, plan->indextlist, + rtoffset, NUM_EXEC_TLIST((Plan *) plan)); pfree(index_itlist); @@ -1084,9 +1140,11 @@ set_subqueryscan_references(PlannerInfo *root, */ plan->scan.scanrelid += rtoffset; plan->scan.plan.targetlist = - fix_scan_list(root, plan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, plan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST((Plan *) plan)); plan->scan.plan.qual = - fix_scan_list(root, plan->scan.plan.qual, rtoffset); + fix_scan_list(root, plan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL((Plan *) plan)); result = (Plan *) plan; } @@ -1202,29 +1260,34 @@ set_foreignscan_references(PlannerInfo *root, (Node *) fscan->scan.plan.targetlist, itlist, INDEX_VAR, - rtoffset); + rtoffset, + NUM_EXEC_TLIST((Plan *) fscan)); fscan->scan.plan.qual = (List *) fix_upper_expr(root, (Node *) fscan->scan.plan.qual, itlist, INDEX_VAR, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) fscan)); fscan->fdw_exprs = (List *) fix_upper_expr(root, (Node *) fscan->fdw_exprs, itlist, INDEX_VAR, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) fscan)); fscan->fdw_recheck_quals = (List *) fix_upper_expr(root, (Node *) fscan->fdw_recheck_quals, itlist, INDEX_VAR, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) fscan)); pfree(itlist); /* fdw_scan_tlist itself just needs fix_scan_list() adjustments */ fscan->fdw_scan_tlist = - fix_scan_list(root, fscan->fdw_scan_tlist, rtoffset); + fix_scan_list(root, fscan->fdw_scan_tlist, + rtoffset, NUM_EXEC_TLIST((Plan *) fscan)); } else { @@ -1233,16 +1296,24 @@ set_foreignscan_references(PlannerInfo *root, * way */ fscan->scan.plan.targetlist = - fix_scan_list(root, fscan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, fscan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST((Plan *) fscan)); fscan->scan.plan.qual = - fix_scan_list(root, fscan->scan.plan.qual, rtoffset); + fix_scan_list(root, fscan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL((Plan *) fscan)); fscan->fdw_exprs = - fix_scan_list(root, fscan->fdw_exprs, rtoffset); + fix_scan_list(root, fscan->fdw_exprs, + rtoffset, NUM_EXEC_QUAL((Plan *) fscan)); fscan->fdw_recheck_quals = - fix_scan_list(root, fscan->fdw_recheck_quals, rtoffset); + fix_scan_list(root, fscan->fdw_recheck_quals, + rtoffset, NUM_EXEC_QUAL((Plan *) fscan)); } fscan->fs_relids = offset_relid_set(fscan->fs_relids, rtoffset); + + /* Adjust resultRelation if it's valid */ + if (fscan->resultRelation > 0) + fscan->resultRelation += rtoffset; } /* @@ -1270,33 +1341,40 @@ set_customscan_references(PlannerInfo *root, (Node *) cscan->scan.plan.targetlist, itlist, INDEX_VAR, - rtoffset); + rtoffset, + NUM_EXEC_TLIST((Plan *) cscan)); cscan->scan.plan.qual = (List *) fix_upper_expr(root, (Node *) cscan->scan.plan.qual, itlist, INDEX_VAR, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) cscan)); cscan->custom_exprs = (List *) fix_upper_expr(root, (Node *) cscan->custom_exprs, itlist, INDEX_VAR, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) cscan)); pfree(itlist); /* custom_scan_tlist itself just needs fix_scan_list() adjustments */ cscan->custom_scan_tlist = - fix_scan_list(root, cscan->custom_scan_tlist, rtoffset); + fix_scan_list(root, cscan->custom_scan_tlist, + rtoffset, NUM_EXEC_TLIST((Plan *) cscan)); } else { /* Adjust tlist, qual, custom_exprs in the standard way */ cscan->scan.plan.targetlist = - fix_scan_list(root, cscan->scan.plan.targetlist, rtoffset); + fix_scan_list(root, cscan->scan.plan.targetlist, + rtoffset, NUM_EXEC_TLIST((Plan *) cscan)); cscan->scan.plan.qual = - fix_scan_list(root, cscan->scan.plan.qual, rtoffset); + fix_scan_list(root, cscan->scan.plan.qual, + rtoffset, NUM_EXEC_QUAL((Plan *) cscan)); cscan->custom_exprs = - fix_scan_list(root, cscan->custom_exprs, rtoffset); + fix_scan_list(root, cscan->custom_exprs, + rtoffset, NUM_EXEC_QUAL((Plan *) cscan)); } /* Adjust child plan-nodes recursively, if needed */ @@ -1458,7 +1536,8 @@ set_hash_references(PlannerInfo *root, Plan *plan, int rtoffset) (Node *) hplan->hashkeys, outer_itlist, OUTER_VAR, - rtoffset); + rtoffset, + NUM_EXEC_QUAL(plan)); /* Hash doesn't project */ set_dummy_tlist_references(plan, rtoffset); @@ -1623,6 +1702,69 @@ fix_param_node(PlannerInfo *root, Param *p) return (Node *) copyObject(p); } +/* + * fix_alternative_subplan + * Do set_plan_references processing on an AlternativeSubPlan + * + * Choose one of the alternative implementations and return just that one, + * discarding the rest of the AlternativeSubPlan structure. + * Note: caller must still recurse into the result! + * + * We don't make any attempt to fix up cost estimates in the parent plan + * node or higher-level nodes. However, we do remove the rejected subplan(s) + * from root->glob->subplans, to minimize cycles expended on them later. + */ +static Node * +fix_alternative_subplan(PlannerInfo *root, AlternativeSubPlan *asplan, + double num_exec) +{ + SubPlan *bestplan = NULL; + Cost bestcost = 0; + ListCell *lc; + + /* + * Compute the estimated cost of each subplan assuming num_exec + * executions, and keep the cheapest one. Replace discarded subplans with + * NULL pointers in the global subplans list. In event of exact equality + * of estimates, we prefer the later plan; this is a bit arbitrary, but in + * current usage it biases us to break ties against fast-start subplans. + */ + Assert(asplan->subplans != NIL); + + foreach(lc, asplan->subplans) + { + SubPlan *curplan = (SubPlan *) lfirst(lc); + Cost curcost; + + curcost = curplan->startup_cost + num_exec * curplan->per_call_cost; + if (bestplan == NULL) + { + bestplan = curplan; + bestcost = curcost; + } + else if (curcost <= bestcost) + { + /* drop old bestplan */ + ListCell *lc2 = list_nth_cell(root->glob->subplans, + bestplan->plan_id - 1); + + lfirst(lc2) = NULL; + bestplan = curplan; + bestcost = curcost; + } + else + { + /* drop curplan */ + ListCell *lc2 = list_nth_cell(root->glob->subplans, + curplan->plan_id - 1); + + lfirst(lc2) = NULL; + } + } + + return (Node *) bestplan; +} + /* * fix_scan_expr * Do set_plan_references processing on a scan-level expression @@ -1630,21 +1772,24 @@ fix_param_node(PlannerInfo *root, Param *p) * This consists of incrementing all Vars' varnos by rtoffset, * replacing PARAM_MULTIEXPR Params, expanding PlaceHolderVars, * replacing Aggref nodes that should be replaced by initplan output Params, + * choosing the best implementation for AlternativeSubPlans, * looking up operator opcode info for OpExpr and related nodes, * and adding OIDs from regclass Const nodes into root->glob->relationOids. */ static Node * -fix_scan_expr(PlannerInfo *root, Node *node, int rtoffset) +fix_scan_expr(PlannerInfo *root, Node *node, int rtoffset, double num_exec) { fix_scan_expr_context context; context.root = root; context.rtoffset = rtoffset; + context.num_exec = num_exec; if (rtoffset != 0 || root->multiexpr_params != NIL || root->glob->lastPHId != 0 || - root->minmax_aggs != NIL) + root->minmax_aggs != NIL || + root->hasAlternativeSubPlans) { return fix_scan_expr_mutator(node, &context); } @@ -1655,7 +1800,8 @@ fix_scan_expr(PlannerInfo *root, Node *node, int rtoffset) * are no MULTIEXPR subqueries then we don't need to replace * PARAM_MULTIEXPR Params, and if there are no placeholders anywhere * we won't need to remove them, and if there are no minmax Aggrefs we - * won't need to replace them. Then it's OK to just scribble on the + * won't need to replace them, and if there are no AlternativeSubPlans + * we won't need to remove them. Then it's OK to just scribble on the * input node tree instead of copying (since the only change, filling * in any unset opfuncid fields, is harmless). This saves just enough * cycles to be noticeable on trivial queries. @@ -1729,6 +1875,11 @@ fix_scan_expr_mutator(Node *node, fix_scan_expr_context *context) return fix_scan_expr_mutator((Node *) phv->phexpr, context); } + if (IsA(node, AlternativeSubPlan)) + return fix_scan_expr_mutator(fix_alternative_subplan(context->root, + (AlternativeSubPlan *) node, + context->num_exec), + context); fix_expr_common(context->root, node); return expression_tree_mutator(node, fix_scan_expr_mutator, (void *) context); @@ -1740,6 +1891,7 @@ fix_scan_expr_walker(Node *node, fix_scan_expr_context *context) if (node == NULL) return false; Assert(!IsA(node, PlaceHolderVar)); + Assert(!IsA(node, AlternativeSubPlan)); fix_expr_common(context->root, node); return expression_tree_walker(node, fix_scan_expr_walker, (void *) context); @@ -1776,7 +1928,8 @@ set_join_references(PlannerInfo *root, Join *join, int rtoffset) outer_itlist, inner_itlist, (Index) 0, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) join)); /* Now do join-type-specific stuff */ if (IsA(join, NestLoop)) @@ -1792,7 +1945,8 @@ set_join_references(PlannerInfo *root, Join *join, int rtoffset) (Node *) nlp->paramval, outer_itlist, OUTER_VAR, - rtoffset); + rtoffset, + NUM_EXEC_TLIST(outer_plan)); /* Check we replaced any PlaceHolderVar with simple Var */ if (!(IsA(nlp->paramval, Var) && nlp->paramval->varno == OUTER_VAR)) @@ -1808,7 +1962,8 @@ set_join_references(PlannerInfo *root, Join *join, int rtoffset) outer_itlist, inner_itlist, (Index) 0, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) join)); } else if (IsA(join, HashJoin)) { @@ -1819,7 +1974,8 @@ set_join_references(PlannerInfo *root, Join *join, int rtoffset) outer_itlist, inner_itlist, (Index) 0, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) join)); /* * HashJoin's hashkeys are used to look for matching tuples from its @@ -1829,7 +1985,8 @@ set_join_references(PlannerInfo *root, Join *join, int rtoffset) (Node *) hj->hashkeys, outer_itlist, OUTER_VAR, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) join)); } /* @@ -1867,13 +2024,15 @@ set_join_references(PlannerInfo *root, Join *join, int rtoffset) outer_itlist, inner_itlist, (Index) 0, - rtoffset); + rtoffset, + NUM_EXEC_TLIST((Plan *) join)); join->plan.qual = fix_join_expr(root, join->plan.qual, outer_itlist, inner_itlist, (Index) 0, - rtoffset); + rtoffset, + NUM_EXEC_QUAL((Plan *) join)); pfree(outer_itlist); pfree(inner_itlist); @@ -1926,14 +2085,16 @@ set_upper_references(PlannerInfo *root, Plan *plan, int rtoffset) (Node *) tle->expr, subplan_itlist, OUTER_VAR, - rtoffset); + rtoffset, + NUM_EXEC_TLIST(plan)); } else newexpr = fix_upper_expr(root, (Node *) tle->expr, subplan_itlist, OUTER_VAR, - rtoffset); + rtoffset, + NUM_EXEC_TLIST(plan)); tle = flatCopyTargetEntry(tle); tle->expr = (Expr *) newexpr; output_targetlist = lappend(output_targetlist, tle); @@ -1945,7 +2106,8 @@ set_upper_references(PlannerInfo *root, Plan *plan, int rtoffset) (Node *) plan->qual, subplan_itlist, OUTER_VAR, - rtoffset); + rtoffset, + NUM_EXEC_QUAL(plan)); pfree(subplan_itlist); } @@ -2389,6 +2551,7 @@ search_indexed_tlist_for_sortgroupref(Expr *node, * 'acceptable_rel' is either zero or the rangetable index of a relation * whose Vars may appear in the clause without provoking an error * 'rtoffset': how much to increment varnos by + * 'num_exec': estimated number of executions of expression * * Returns the new expression tree. The original clause structure is * not modified. @@ -2399,7 +2562,8 @@ fix_join_expr(PlannerInfo *root, indexed_tlist *outer_itlist, indexed_tlist *inner_itlist, Index acceptable_rel, - int rtoffset) + int rtoffset, + double num_exec) { fix_join_expr_context context; @@ -2408,6 +2572,7 @@ fix_join_expr(PlannerInfo *root, context.inner_itlist = inner_itlist; context.acceptable_rel = acceptable_rel; context.rtoffset = rtoffset; + context.num_exec = num_exec; return (List *) fix_join_expr_mutator((Node *) clauses, &context); } @@ -2502,6 +2667,11 @@ fix_join_expr_mutator(Node *node, fix_join_expr_context *context) /* Special cases (apply only AFTER failing to match to lower tlist) */ if (IsA(node, Param)) return fix_param_node(context->root, (Param *) node); + if (IsA(node, AlternativeSubPlan)) + return fix_join_expr_mutator(fix_alternative_subplan(context->root, + (AlternativeSubPlan *) node, + context->num_exec), + context); fix_expr_common(context->root, node); return expression_tree_mutator(node, fix_join_expr_mutator, @@ -2533,6 +2703,7 @@ fix_join_expr_mutator(Node *node, fix_join_expr_context *context) * 'subplan_itlist': indexed target list for subplan (or index) * 'newvarno': varno to use for Vars referencing tlist elements * 'rtoffset': how much to increment varnos by + * 'num_exec': estimated number of executions of expression * * The resulting tree is a copy of the original in which all Var nodes have * varno = newvarno, varattno = resno of corresponding targetlist element. @@ -2543,7 +2714,8 @@ fix_upper_expr(PlannerInfo *root, Node *node, indexed_tlist *subplan_itlist, Index newvarno, - int rtoffset) + int rtoffset, + double num_exec) { fix_upper_expr_context context; @@ -2551,6 +2723,7 @@ fix_upper_expr(PlannerInfo *root, context.subplan_itlist = subplan_itlist; context.newvarno = newvarno; context.rtoffset = rtoffset; + context.num_exec = num_exec; return fix_upper_expr_mutator(node, &context); } @@ -2623,6 +2796,11 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context) } /* If no match, just fall through to process it normally */ } + if (IsA(node, AlternativeSubPlan)) + return fix_upper_expr_mutator(fix_alternative_subplan(context->root, + (AlternativeSubPlan *) node, + context->num_exec), + context); fix_expr_common(context->root, node); return expression_tree_mutator(node, fix_upper_expr_mutator, @@ -2687,7 +2865,8 @@ set_returning_clause_references(PlannerInfo *root, itlist, NULL, resultRelation, - rtoffset); + rtoffset, + NUM_EXEC_TLIST(topplan)); pfree(itlist); diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index 6eb794669fe35..fcce81926b7d6 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -81,6 +81,7 @@ static Node *convert_testexpr(PlannerInfo *root, static Node *convert_testexpr_mutator(Node *node, convert_testexpr_context *context); static bool subplan_is_hashable(Plan *plan); +static bool subpath_is_hashable(Path *path); static bool testexpr_is_hashable(Node *testexpr, List *param_ids); static bool test_opexpr_is_hashable(OpExpr *testexpr, List *param_ids); static bool hash_ok_operator(OpExpr *expr); @@ -247,7 +248,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, * likely to be better (it depends on the expected number of executions of * the EXISTS qual, and we are much too early in planning the outer query * to be able to guess that). So we generate both plans, if possible, and - * leave it to the executor to decide which to use. + * leave it to setrefs.c to decide which to use. */ if (simple_exists && IsA(result, SubPlan)) { @@ -273,20 +274,20 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, plan_params = root->plan_params; root->plan_params = NIL; - /* Select best Path and turn it into a Plan */ + /* Select best Path */ final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL); best_path = final_rel->cheapest_total_path; - plan = create_plan(subroot, best_path); - /* Now we can check if it'll fit in hash_mem */ - /* XXX can we check this at the Path stage? */ - if (subplan_is_hashable(plan)) + if (subpath_is_hashable(best_path)) { SubPlan *hashplan; AlternativeSubPlan *asplan; - /* OK, convert to SubPlan format. */ + /* OK, finish planning the ANY subquery */ + plan = create_plan(subroot, best_path); + + /* ... and convert to SubPlan format */ hashplan = castNode(SubPlan, build_subplan(root, plan, subroot, plan_params, @@ -298,10 +299,11 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, Assert(hashplan->parParam == NIL); Assert(hashplan->useHashTable); - /* Leave it to the executor to decide which plan to use */ + /* Leave it to setrefs.c to decide which plan to use */ asplan = makeNode(AlternativeSubPlan); asplan->subplans = list_make2(result, hashplan); result = (Node *) asplan; + root->hasAlternativeSubPlans = true; } } } @@ -714,6 +716,9 @@ convert_testexpr_mutator(Node *node, /* * subplan_is_hashable: can we implement an ANY subplan by hashing? + * + * This is not responsible for checking whether the combining testexpr + * is suitable for hashing. We only look at the subquery itself. */ static bool subplan_is_hashable(Plan *plan) @@ -735,6 +740,31 @@ subplan_is_hashable(Plan *plan) return true; } +/* + * subpath_is_hashable: can we implement an ANY subplan by hashing? + * + * Identical to subplan_is_hashable, but work from a Path for the subplan. + */ +static bool +subpath_is_hashable(Path *path) +{ + double subquery_size; + int hash_mem = get_hash_mem(); + + /* + * The estimated size of the subquery result must fit in hash_mem. (Note: + * we use heap tuple overhead here even though the tuples will actually be + * stored as MinimalTuples; this provides some fudge factor for hashtable + * overhead.) + */ + subquery_size = path->rows * + (MAXALIGN(path->pathtarget->width) + MAXALIGN(SizeofHeapTupleHeader)); + if (subquery_size > hash_mem * 1024L) + return false; + + return true; +} + /* * testexpr_is_hashable: is an ANY SubLink's test expression hashable? * diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 750586fceb746..e7d814651b184 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -4522,7 +4522,8 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, * needed; that's probably not important, but let's be careful. */ querytree_list = list_make1(querytree); - if (check_sql_fn_retval(querytree_list, result_type, rettupdesc, + if (check_sql_fn_retval(list_make1(querytree_list), + result_type, rettupdesc, false, NULL)) goto fail; /* reject whole-tuple-result cases */ @@ -5040,7 +5041,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) * shows it's returning a whole tuple result; otherwise what it's * returning is a single composite column which is not what we need. */ - if (!check_sql_fn_retval(querytree_list, + if (!check_sql_fn_retval(list_make1(querytree_list), fexpr->funcresulttype, rettupdesc, true, NULL) && (functypclass == TYPEFUNC_COMPOSITE || @@ -5052,7 +5053,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) * check_sql_fn_retval might've inserted a projection step, but that's * fine; just make sure we use the upper Query. */ - querytree = linitial(querytree_list); + querytree = linitial_node(Query, querytree_list); /* * Looks good --- substitute parameters into the query. diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index c1fc866cbf911..5281a2f9983ff 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -3583,15 +3583,18 @@ create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, if (lc == list_head(subpaths)) /* first node? */ pathnode->path.startup_cost = subpath->startup_cost; pathnode->path.total_cost += subpath->total_cost; - pathnode->path.rows += subpath->rows; - total_size += subpath->pathtarget->width * subpath->rows; + if (returningLists != NIL) + { + pathnode->path.rows += subpath->rows; + total_size += subpath->pathtarget->width * subpath->rows; + } } /* * Set width to the average width of the subpath outputs. XXX this is - * totally wrong: we should report zero if no RETURNING, else an average - * of the RETURNING tlist widths. But it's what happened historically, - * and improving it is a task for another day. + * totally wrong: we should return an average of the RETURNING tlist + * widths. But it's what happened historically, and improving it is a task + * for another day. */ if (pathnode->path.rows > 0) total_size /= pathnode->path.rows; diff --git a/src/backend/parser/check_keywords.pl b/src/backend/parser/check_keywords.pl index 702c97bba2aa6..e6c6c98fb5ec7 100644 --- a/src/backend/parser/check_keywords.pl +++ b/src/backend/parser/check_keywords.pl @@ -6,8 +6,8 @@ # src/backend/parser/check_keywords.pl # Copyright (c) 2009-2020, PostgreSQL Global Development Group -use warnings; use strict; +use warnings; my $gram_filename = $ARGV[0]; my $kwlist_filename = $ARGV[1]; @@ -21,6 +21,28 @@ sub error return; } +# Check alphabetical order of a set of keyword symbols +# (note these are NOT the actual keyword strings) +sub check_alphabetical_order +{ + my ($listname, $list) = @_; + my $prevkword = ''; + + foreach my $kword (@$list) + { + # Some symbols have a _P suffix. Remove it for the comparison. + my $bare_kword = $kword; + $bare_kword =~ s/_P$//; + if ($bare_kword le $prevkword) + { + error + "'$bare_kword' after '$prevkword' in $listname list is misplaced"; + } + $prevkword = $bare_kword; + } + return; +} + $, = ' '; # set output field separator $\ = "\n"; # set output record separator @@ -33,9 +55,11 @@ sub error open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename"); my $kcat; +my $in_bare_labels; my $comment; my @arr; my %keywords; +my @bare_label_keywords; line: while (my $S = <$gram>) { @@ -51,7 +75,7 @@ sub error $s = '[/][*]', $S =~ s#$s# /* #g; $s = '[*][/]', $S =~ s#$s# */ #g; - if (!($kcat)) + if (!($kcat) && !($in_bare_labels)) { # Is this the beginning of a keyword list? @@ -63,6 +87,10 @@ sub error next line; } } + + # Is this the beginning of the bare_label_keyword list? + $in_bare_labels = 1 if ($S =~ m/^bare_label_keyword:/); + next line; } @@ -97,7 +125,8 @@ sub error { # end of keyword list - $kcat = ''; + undef $kcat; + undef $in_bare_labels; next; } @@ -107,31 +136,21 @@ sub error } # Put this keyword into the right list - push @{ $keywords{$kcat} }, $arr[$fieldIndexer]; + if ($in_bare_labels) + { + push @bare_label_keywords, $arr[$fieldIndexer]; + } + else + { + push @{ $keywords{$kcat} }, $arr[$fieldIndexer]; + } } } close $gram; # Check that each keyword list is in alphabetical order (just for neatnik-ism) -my ($prevkword, $bare_kword); -foreach my $kcat (keys %keyword_categories) -{ - $prevkword = ''; - - foreach my $kword (@{ $keywords{$kcat} }) - { - - # Some keyword have a _P suffix. Remove it for the comparison. - $bare_kword = $kword; - $bare_kword =~ s/_P$//; - if ($bare_kword le $prevkword) - { - error - "'$bare_kword' after '$prevkword' in $kcat list is misplaced"; - } - $prevkword = $bare_kword; - } -} +check_alphabetical_order($_, $keywords{$_}) for (keys %keyword_categories); +check_alphabetical_order('bare_label_keyword', \@bare_label_keywords); # Transform the keyword lists into hashes. # kwhashes is a hash of hashes, keyed by keyword category id, @@ -147,6 +166,7 @@ sub error $kwhashes{$kcat_id} = $hash; } +my %bare_label_keywords = map { $_ => 1 } @bare_label_keywords; # Now read in kwlist.h @@ -160,11 +180,12 @@ sub error { my ($line) = $_; - if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*)\)/) + if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*), (.*)\)/) { my ($kwstring) = $1; my ($kwname) = $2; my ($kwcat_id) = $3; + my ($collabel) = $4; # Check that the list is in alphabetical order (critical!) if ($kwstring le $prevkwstring) @@ -197,7 +218,7 @@ sub error "keyword name '$kwname' doesn't match keyword string '$kwstring'"; } - # Check that the keyword is present in the grammar + # Check that the keyword is present in the right category list %kwhash = %{ $kwhashes{$kwcat_id} }; if (!(%kwhash)) @@ -219,6 +240,29 @@ sub error delete $kwhashes{$kwcat_id}->{$kwname}; } } + + # Check that the keyword's collabel property matches gram.y + if ($collabel eq 'BARE_LABEL') + { + unless ($bare_label_keywords{$kwname}) + { + error + "'$kwname' is marked as BARE_LABEL in kwlist.h, but it is missing from gram.y's bare_label_keyword rule"; + } + } + elsif ($collabel eq 'AS_LABEL') + { + if ($bare_label_keywords{$kwname}) + { + error + "'$kwname' is marked as AS_LABEL in kwlist.h, but it is listed in gram.y's bare_label_keyword rule"; + } + } + else + { + error + "'$collabel' not recognized in kwlist.h. Expected either 'BARE_LABEL' or 'AS_LABEL'"; + } } } close $kwlist; diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index c5154b818cfb1..480d16834687a 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -166,7 +166,7 @@ static RoleSpec *makeRoleSpec(RoleSpecType type, int location); static void check_qualified_name(List *names, core_yyscan_t yyscanner); static List *check_func_name(List *names, core_yyscan_t yyscanner); static List *check_indirection(List *indirection, core_yyscan_t yyscanner); -static List *extractArgTypes(List *parameters); +static List *extractArgTypes(ObjectType objtype, List *parameters); static List *extractAggrArgTypes(List *aggrargs); static List *makeOrderedSetArgs(List *directargs, List *orderedargs, core_yyscan_t yyscanner); @@ -375,8 +375,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type privilege %type privileges privilege_list %type privilege_target -%type function_with_argtypes aggregate_with_argtypes operator_with_argtypes -%type function_with_argtypes_list aggregate_with_argtypes_list operator_with_argtypes_list +%type function_with_argtypes aggregate_with_argtypes operator_with_argtypes procedure_with_argtypes function_with_argtypes_common +%type function_with_argtypes_list aggregate_with_argtypes_list operator_with_argtypes_list procedure_with_argtypes_list %type defacl_privilege_target %type DefACLOption %type DefACLOptionList @@ -540,14 +540,16 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type Sconst comment_text notify_payload %type RoleId opt_boolean_or_string %type var_list -%type ColId ColLabel var_name type_function_name param_name +%type ColId ColLabel BareColLabel %type NonReservedWord NonReservedWord_or_Sconst +%type var_name type_function_name param_name %type createdb_opt_name %type var_value zone_value %type auth_ident RoleSpec opt_granted_by %type unreserved_keyword type_func_name_keyword %type col_name_keyword reserved_keyword +%type bare_label_keyword %type TableConstraint TableLikeClause %type TableLikeOptionList TableLikeOption @@ -741,24 +743,16 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %nonassoc '<' '>' '=' LESS_EQUALS GREATER_EQUALS NOT_EQUALS %nonassoc BETWEEN IN_P LIKE ILIKE SIMILAR NOT_LA %nonassoc ESCAPE /* ESCAPE must be just above LIKE/ILIKE/SIMILAR */ -%left POSTFIXOP /* dummy for postfix Op rules */ /* - * To support target_el without AS, we must give IDENT an explicit priority - * between POSTFIXOP and Op. We can safely assign the same priority to - * various unreserved keywords as needed to resolve ambiguities (this can't - * have any bad effects since obviously the keywords will still behave the - * same as if they weren't keywords). We need to do this: - * for PARTITION, RANGE, ROWS, GROUPS to support opt_existing_window_name; - * for RANGE, ROWS, GROUPS so that they can follow a_expr without creating - * postfix-operator problems; - * for GENERATED so that it can follow b_expr; - * and for NULL so that it can follow b_expr in ColQualList without creating - * postfix-operator problems. + * To support target_el without AS, it used to be necessary to assign IDENT an + * explicit precedence just less than Op. While that's not really necessary + * since we removed postfix operators, it's still helpful to do so because + * there are some other unreserved keywords that need precedence assignments. + * If those keywords have the same precedence as IDENT then they clearly act + * the same as non-keywords, reducing the risk of unwanted precedence effects. * - * To support CUBE and ROLLUP in GROUP BY without reserving them, we give them - * an explicit priority lower than '(', so that a rule with CUBE '(' will shift - * rather than reducing a conflicting rule that takes CUBE as a function name. - * Using the same precedence as IDENT seems right for the reasons given above. + * We need to do this for PARTITION, RANGE, ROWS, and GROUPS to support + * opt_existing_window_name (see comment there). * * The frame_bound productions UNBOUNDED PRECEDING and UNBOUNDED FOLLOWING * are even messier: since UNBOUNDED is an unreserved keyword (per spec!), @@ -768,9 +762,14 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); * appear to cause UNBOUNDED to be treated differently from other unreserved * keywords anywhere else in the grammar, but it's definitely risky. We can * blame any funny behavior of UNBOUNDED on the SQL standard, though. + * + * To support CUBE and ROLLUP in GROUP BY without reserving them, we give them + * an explicit priority lower than '(', so that a rule with CUBE '(' will shift + * rather than reducing a conflicting rule that takes CUBE as a function name. + * Using the same precedence as IDENT seems right for the reasons given above. */ -%nonassoc UNBOUNDED /* ideally should have same precedence as IDENT */ -%nonassoc IDENT GENERATED NULL_P PARTITION RANGE ROWS GROUPS PRECEDING FOLLOWING CUBE ROLLUP +%nonassoc UNBOUNDED /* ideally would have same precedence as IDENT */ +%nonassoc IDENT PARTITION RANGE ROWS GROUPS PRECEDING FOLLOWING CUBE ROLLUP %left Op OPERATOR /* multi-character ops and user-defined operators */ %left '+' '-' %left '*' '/' '%' @@ -791,8 +790,6 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); * left-associativity among the JOIN rules themselves. */ %left JOIN CROSS LEFT FULL RIGHT INNER_P NATURAL -/* kluge to keep xml_whitespace_option from causing shift/reduce conflicts */ -%right PRESERVE STRIP_P %% @@ -4626,7 +4623,7 @@ AlterExtensionContentsStmt: n->object = (Node *) lcons(makeString($9), $7); $$ = (Node *)n; } - | ALTER EXTENSION name add_drop PROCEDURE function_with_argtypes + | ALTER EXTENSION name add_drop PROCEDURE procedure_with_argtypes { AlterExtensionContentsStmt *n = makeNode(AlterExtensionContentsStmt); n->extname = $3; @@ -4635,7 +4632,7 @@ AlterExtensionContentsStmt: n->object = (Node *) $6; $$ = (Node *)n; } - | ALTER EXTENSION name add_drop ROUTINE function_with_argtypes + | ALTER EXTENSION name add_drop ROUTINE procedure_with_argtypes { AlterExtensionContentsStmt *n = makeNode(AlterExtensionContentsStmt); n->extname = $3; @@ -6368,7 +6365,7 @@ CommentStmt: n->comment = $8; $$ = (Node *) n; } - | COMMENT ON PROCEDURE function_with_argtypes IS comment_text + | COMMENT ON PROCEDURE procedure_with_argtypes IS comment_text { CommentStmt *n = makeNode(CommentStmt); n->objtype = OBJECT_PROCEDURE; @@ -6376,7 +6373,7 @@ CommentStmt: n->comment = $6; $$ = (Node *) n; } - | COMMENT ON ROUTINE function_with_argtypes IS comment_text + | COMMENT ON ROUTINE procedure_with_argtypes IS comment_text { CommentStmt *n = makeNode(CommentStmt); n->objtype = OBJECT_ROUTINE; @@ -6522,7 +6519,7 @@ SecLabelStmt: n->label = $9; $$ = (Node *) n; } - | SECURITY LABEL opt_provider ON PROCEDURE function_with_argtypes + | SECURITY LABEL opt_provider ON PROCEDURE procedure_with_argtypes IS security_label { SecLabelStmt *n = makeNode(SecLabelStmt); @@ -6883,7 +6880,7 @@ privilege_target: n->objs = $2; $$ = n; } - | PROCEDURE function_with_argtypes_list + | PROCEDURE procedure_with_argtypes_list { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; @@ -6891,7 +6888,7 @@ privilege_target: n->objs = $2; $$ = n; } - | ROUTINE function_with_argtypes_list + | ROUTINE procedure_with_argtypes_list { PrivTarget *n = (PrivTarget *) palloc(sizeof(PrivTarget)); n->targtype = ACL_TARGET_OBJECT; @@ -7412,20 +7409,33 @@ function_with_argtypes_list: { $$ = lappend($1, $3); } ; +procedure_with_argtypes_list: + procedure_with_argtypes { $$ = list_make1($1); } + | procedure_with_argtypes_list ',' procedure_with_argtypes + { $$ = lappend($1, $3); } + ; + function_with_argtypes: func_name func_args { ObjectWithArgs *n = makeNode(ObjectWithArgs); n->objname = $1; - n->objargs = extractArgTypes($2); + n->objargs = extractArgTypes(OBJECT_FUNCTION, $2); $$ = n; } + | function_with_argtypes_common + { + $$ = $1; + } + ; + +function_with_argtypes_common: /* * Because of reduce/reduce conflicts, we can't use func_name * below, but we can write it out the long way, which actually * allows more cases. */ - | type_func_name_keyword + type_func_name_keyword { ObjectWithArgs *n = makeNode(ObjectWithArgs); n->objname = list_make1(makeString(pstrdup($1))); @@ -7449,6 +7459,24 @@ function_with_argtypes: } ; +/* + * This is different from function_with_argtypes in the call to + * extractArgTypes(). + */ +procedure_with_argtypes: + func_name func_args + { + ObjectWithArgs *n = makeNode(ObjectWithArgs); + n->objname = $1; + n->objargs = extractArgTypes(OBJECT_PROCEDURE, $2); + $$ = n; + } + | function_with_argtypes_common + { + $$ = $1; + } + ; + /* * func_args_with_defaults is separate because we only want to accept * defaults in CREATE FUNCTION, not in ALTER etc. @@ -7827,7 +7855,7 @@ AlterFunctionStmt: n->actions = $4; $$ = (Node *) n; } - | ALTER PROCEDURE function_with_argtypes alterfunc_opt_list opt_restrict + | ALTER PROCEDURE procedure_with_argtypes alterfunc_opt_list opt_restrict { AlterFunctionStmt *n = makeNode(AlterFunctionStmt); n->objtype = OBJECT_PROCEDURE; @@ -7835,7 +7863,7 @@ AlterFunctionStmt: n->actions = $4; $$ = (Node *) n; } - | ALTER ROUTINE function_with_argtypes alterfunc_opt_list opt_restrict + | ALTER ROUTINE procedure_with_argtypes alterfunc_opt_list opt_restrict { AlterFunctionStmt *n = makeNode(AlterFunctionStmt); n->objtype = OBJECT_ROUTINE; @@ -7891,7 +7919,7 @@ RemoveFuncStmt: n->concurrent = false; $$ = (Node *)n; } - | DROP PROCEDURE function_with_argtypes_list opt_drop_behavior + | DROP PROCEDURE procedure_with_argtypes_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_PROCEDURE; @@ -7901,7 +7929,7 @@ RemoveFuncStmt: n->concurrent = false; $$ = (Node *)n; } - | DROP PROCEDURE IF_P EXISTS function_with_argtypes_list opt_drop_behavior + | DROP PROCEDURE IF_P EXISTS procedure_with_argtypes_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_PROCEDURE; @@ -7911,7 +7939,7 @@ RemoveFuncStmt: n->concurrent = false; $$ = (Node *)n; } - | DROP ROUTINE function_with_argtypes_list opt_drop_behavior + | DROP ROUTINE procedure_with_argtypes_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_ROUTINE; @@ -7921,7 +7949,7 @@ RemoveFuncStmt: n->concurrent = false; $$ = (Node *)n; } - | DROP ROUTINE IF_P EXISTS function_with_argtypes_list opt_drop_behavior + | DROP ROUTINE IF_P EXISTS procedure_with_argtypes_list opt_drop_behavior { DropStmt *n = makeNode(DropStmt); n->removeType = OBJECT_ROUTINE; @@ -8396,7 +8424,7 @@ RenameStmt: ALTER AGGREGATE aggregate_with_argtypes RENAME TO name n->missing_ok = true; $$ = (Node *)n; } - | ALTER PROCEDURE function_with_argtypes RENAME TO name + | ALTER PROCEDURE procedure_with_argtypes RENAME TO name { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_PROCEDURE; @@ -8414,7 +8442,7 @@ RenameStmt: ALTER AGGREGATE aggregate_with_argtypes RENAME TO name n->missing_ok = false; $$ = (Node *)n; } - | ALTER ROUTINE function_with_argtypes RENAME TO name + | ALTER ROUTINE procedure_with_argtypes RENAME TO name { RenameStmt *n = makeNode(RenameStmt); n->renameType = OBJECT_ROUTINE; @@ -8825,7 +8853,7 @@ AlterObjectDependsStmt: n->remove = $4; $$ = (Node *)n; } - | ALTER PROCEDURE function_with_argtypes opt_no DEPENDS ON EXTENSION name + | ALTER PROCEDURE procedure_with_argtypes opt_no DEPENDS ON EXTENSION name { AlterObjectDependsStmt *n = makeNode(AlterObjectDependsStmt); n->objectType = OBJECT_PROCEDURE; @@ -8834,7 +8862,7 @@ AlterObjectDependsStmt: n->remove = $4; $$ = (Node *)n; } - | ALTER ROUTINE function_with_argtypes opt_no DEPENDS ON EXTENSION name + | ALTER ROUTINE procedure_with_argtypes opt_no DEPENDS ON EXTENSION name { AlterObjectDependsStmt *n = makeNode(AlterObjectDependsStmt); n->objectType = OBJECT_ROUTINE; @@ -8965,7 +8993,7 @@ AlterObjectSchemaStmt: n->missing_ok = false; $$ = (Node *)n; } - | ALTER PROCEDURE function_with_argtypes SET SCHEMA name + | ALTER PROCEDURE procedure_with_argtypes SET SCHEMA name { AlterObjectSchemaStmt *n = makeNode(AlterObjectSchemaStmt); n->objectType = OBJECT_PROCEDURE; @@ -8974,7 +9002,7 @@ AlterObjectSchemaStmt: n->missing_ok = false; $$ = (Node *)n; } - | ALTER ROUTINE function_with_argtypes SET SCHEMA name + | ALTER ROUTINE procedure_with_argtypes SET SCHEMA name { AlterObjectSchemaStmt *n = makeNode(AlterObjectSchemaStmt); n->objectType = OBJECT_ROUTINE; @@ -9276,7 +9304,7 @@ AlterOwnerStmt: ALTER AGGREGATE aggregate_with_argtypes OWNER TO RoleSpec n->newowner = $9; $$ = (Node *)n; } - | ALTER PROCEDURE function_with_argtypes OWNER TO RoleSpec + | ALTER PROCEDURE procedure_with_argtypes OWNER TO RoleSpec { AlterOwnerStmt *n = makeNode(AlterOwnerStmt); n->objectType = OBJECT_PROCEDURE; @@ -9284,7 +9312,7 @@ AlterOwnerStmt: ALTER AGGREGATE aggregate_with_argtypes OWNER TO RoleSpec n->newowner = $6; $$ = (Node *)n; } - | ALTER ROUTINE function_with_argtypes OWNER TO RoleSpec + | ALTER ROUTINE procedure_with_argtypes OWNER TO RoleSpec { AlterOwnerStmt *n = makeNode(AlterOwnerStmt); n->objectType = OBJECT_ROUTINE; @@ -12993,8 +13021,6 @@ a_expr: c_expr { $$ = $1; } { $$ = (Node *) makeA_Expr(AEXPR_OP, $2, $1, $3, @2); } | qual_Op a_expr %prec Op { $$ = (Node *) makeA_Expr(AEXPR_OP, $1, NULL, $2, @1); } - | a_expr qual_Op %prec POSTFIXOP - { $$ = (Node *) makeA_Expr(AEXPR_OP, $2, $1, NULL, @2); } | a_expr AND a_expr { $$ = makeAndExpr($1, $3, @2); } @@ -13408,8 +13434,6 @@ b_expr: c_expr { $$ = (Node *) makeA_Expr(AEXPR_OP, $2, $1, $3, @2); } | qual_Op b_expr %prec Op { $$ = (Node *) makeA_Expr(AEXPR_OP, $1, NULL, $2, @1); } - | b_expr qual_Op %prec POSTFIXOP - { $$ = (Node *) makeA_Expr(AEXPR_OP, $2, $1, NULL, @2); } | b_expr IS DISTINCT FROM b_expr %prec IS { $$ = (Node *) makeSimpleA_Expr(AEXPR_DISTINCT, "=", $1, $5, @2); @@ -14663,15 +14687,7 @@ target_el: a_expr AS ColLabel $$->val = (Node *)$1; $$->location = @1; } - /* - * We support omitting AS only for column labels that aren't - * any known keyword. There is an ambiguity against postfix - * operators: is "a ! b" an infix expression, or a postfix - * expression and a column label? We prefer to resolve this - * as an infix expression, which we accomplish by assigning - * IDENT a precedence higher than POSTFIXOP. - */ - | a_expr IDENT + | a_expr BareColLabel { $$ = makeNode(ResTarget); $$->name = $2; @@ -14919,6 +14935,13 @@ RoleId: RoleSpec "CURRENT_USER"), parser_errposition(@1))); break; + case ROLESPEC_CURRENT_ROLE: + ereport(ERROR, + (errcode(ERRCODE_RESERVED_NAME), + errmsg("%s cannot be used as a role name here", + "CURRENT_ROLE"), + parser_errposition(@1))); + break; } } ; @@ -14950,6 +14973,10 @@ RoleSpec: NonReservedWord } $$ = n; } + | CURRENT_ROLE + { + $$ = makeRoleSpec(ROLESPEC_CURRENT_ROLE, @1); + } | CURRENT_USER { $$ = makeRoleSpec(ROLESPEC_CURRENT_USER, @1); @@ -15009,6 +15036,13 @@ ColLabel: IDENT { $$ = $1; } | reserved_keyword { $$ = pstrdup($1); } ; +/* Bare column label --- names that can be column labels without writing "AS". + * This classification is orthogonal to the other keyword categories. + */ +BareColLabel: IDENT { $$ = $1; } + | bare_label_keyword { $$ = pstrdup($1); } + ; + /* * Keyword category lists. Generally, every keyword present in @@ -15513,6 +15547,429 @@ reserved_keyword: | WITH ; +/* + * While all keywords can be used as column labels when preceded by AS, + * not all of them can be used as a "bare" column label without AS. + * Those that can be used as a bare label must be listed here, + * in addition to appearing in one of the category lists above. + * + * Always add a new keyword to this list if possible. Mark it BARE_LABEL + * in kwlist.h if it is included here, or AS_LABEL if it is not. + */ +bare_label_keyword: + ABORT_P + | ABSOLUTE_P + | ACCESS + | ACTION + | ADD_P + | ADMIN + | AFTER + | AGGREGATE + | ALL + | ALSO + | ALTER + | ALWAYS + | ANALYSE + | ANALYZE + | AND + | ANY + | ASC + | ASSERTION + | ASSIGNMENT + | ASYMMETRIC + | AT + | ATTACH + | ATTRIBUTE + | AUTHORIZATION + | BACKWARD + | BEFORE + | BEGIN_P + | BETWEEN + | BIGINT + | BINARY + | BIT + | BOOLEAN_P + | BOTH + | BY + | CACHE + | CALL + | CALLED + | CASCADE + | CASCADED + | CASE + | CAST + | CATALOG_P + | CHAIN + | CHARACTERISTICS + | CHECK + | CHECKPOINT + | CLASS + | CLOSE + | CLUSTER + | COALESCE + | COLLATE + | COLLATION + | COLUMN + | COLUMNS + | COMMENT + | COMMENTS + | COMMIT + | COMMITTED + | CONCURRENTLY + | CONFIGURATION + | CONFLICT + | CONNECTION + | CONSTRAINT + | CONSTRAINTS + | CONTENT_P + | CONTINUE_P + | CONVERSION_P + | COPY + | COST + | CROSS + | CSV + | CUBE + | CURRENT_P + | CURRENT_CATALOG + | CURRENT_DATE + | CURRENT_ROLE + | CURRENT_SCHEMA + | CURRENT_TIME + | CURRENT_TIMESTAMP + | CURRENT_USER + | CURSOR + | CYCLE + | DATA_P + | DATABASE + | DEALLOCATE + | DEC + | DECIMAL_P + | DECLARE + | DEFAULT + | DEFAULTS + | DEFERRABLE + | DEFERRED + | DEFINER + | DELETE_P + | DELIMITER + | DELIMITERS + | DEPENDS + | DESC + | DETACH + | DICTIONARY + | DISABLE_P + | DISCARD + | DISTINCT + | DO + | DOCUMENT_P + | DOMAIN_P + | DOUBLE_P + | DROP + | EACH + | ELSE + | ENABLE_P + | ENCODING + | ENCRYPTED + | END_P + | ENUM_P + | ESCAPE + | EVENT + | EXCLUDE + | EXCLUDING + | EXCLUSIVE + | EXECUTE + | EXISTS + | EXPLAIN + | EXPRESSION + | EXTENSION + | EXTERNAL + | EXTRACT + | FALSE_P + | FAMILY + | FIRST_P + | FLOAT_P + | FOLLOWING + | FORCE + | FOREIGN + | FORWARD + | FREEZE + | FULL + | FUNCTION + | FUNCTIONS + | GENERATED + | GLOBAL + | GRANTED + | GREATEST + | GROUPING + | GROUPS + | HANDLER + | HEADER_P + | HOLD + | IDENTITY_P + | IF_P + | ILIKE + | IMMEDIATE + | IMMUTABLE + | IMPLICIT_P + | IMPORT_P + | IN_P + | INCLUDE + | INCLUDING + | INCREMENT + | INDEX + | INDEXES + | INHERIT + | INHERITS + | INITIALLY + | INLINE_P + | INNER_P + | INOUT + | INPUT_P + | INSENSITIVE + | INSERT + | INSTEAD + | INT_P + | INTEGER + | INTERVAL + | INVOKER + | IS + | ISOLATION + | JOIN + | KEY + | LABEL + | LANGUAGE + | LARGE_P + | LAST_P + | LATERAL_P + | LEADING + | LEAKPROOF + | LEAST + | LEFT + | LEVEL + | LIKE + | LISTEN + | LOAD + | LOCAL + | LOCALTIME + | LOCALTIMESTAMP + | LOCATION + | LOCK_P + | LOCKED + | LOGGED + | MAPPING + | MATCH + | MATERIALIZED + | MAXVALUE + | METHOD + | MINVALUE + | MODE + | MOVE + | NAME_P + | NAMES + | NATIONAL + | NATURAL + | NCHAR + | NEW + | NEXT + | NFC + | NFD + | NFKC + | NFKD + | NO + | NONE + | NORMALIZE + | NORMALIZED + | NOT + | NOTHING + | NOTIFY + | NOWAIT + | NULL_P + | NULLIF + | NULLS_P + | NUMERIC + | OBJECT_P + | OF + | OFF + | OIDS + | OLD + | ONLY + | OPERATOR + | OPTION + | OPTIONS + | OR + | ORDINALITY + | OTHERS + | OUT_P + | OUTER_P + | OVERLAY + | OVERRIDING + | OWNED + | OWNER + | PARALLEL + | PARSER + | PARTIAL + | PARTITION + | PASSING + | PASSWORD + | PLACING + | PLANS + | POLICY + | POSITION + | PRECEDING + | PREPARE + | PREPARED + | PRESERVE + | PRIMARY + | PRIOR + | PRIVILEGES + | PROCEDURAL + | PROCEDURE + | PROCEDURES + | PROGRAM + | PUBLICATION + | QUOTE + | RANGE + | READ + | REAL + | REASSIGN + | RECHECK + | RECURSIVE + | REF + | REFERENCES + | REFERENCING + | REFRESH + | REINDEX + | RELATIVE_P + | RELEASE + | RENAME + | REPEATABLE + | REPLACE + | REPLICA + | RESET + | RESTART + | RESTRICT + | RETURNS + | REVOKE + | RIGHT + | ROLE + | ROLLBACK + | ROLLUP + | ROUTINE + | ROUTINES + | ROW + | ROWS + | RULE + | SAVEPOINT + | SCHEMA + | SCHEMAS + | SCROLL + | SEARCH + | SECURITY + | SELECT + | SEQUENCE + | SEQUENCES + | SERIALIZABLE + | SERVER + | SESSION + | SESSION_USER + | SET + | SETOF + | SETS + | SHARE + | SHOW + | SIMILAR + | SIMPLE + | SKIP + | SMALLINT + | SNAPSHOT + | SOME + | SQL_P + | STABLE + | STANDALONE_P + | START + | STATEMENT + | STATISTICS + | STDIN + | STDOUT + | STORAGE + | STORED + | STRICT_P + | STRIP_P + | SUBSCRIPTION + | SUBSTRING + | SUPPORT + | SYMMETRIC + | SYSID + | SYSTEM_P + | TABLE + | TABLES + | TABLESAMPLE + | TABLESPACE + | TEMP + | TEMPLATE + | TEMPORARY + | TEXT_P + | THEN + | TIES + | TIME + | TIMESTAMP + | TRAILING + | TRANSACTION + | TRANSFORM + | TREAT + | TRIGGER + | TRIM + | TRUE_P + | TRUNCATE + | TRUSTED + | TYPE_P + | TYPES_P + | UESCAPE + | UNBOUNDED + | UNCOMMITTED + | UNENCRYPTED + | UNIQUE + | UNKNOWN + | UNLISTEN + | UNLOGGED + | UNTIL + | UPDATE + | USER + | USING + | VACUUM + | VALID + | VALIDATE + | VALIDATOR + | VALUE_P + | VALUES + | VARCHAR + | VARIADIC + | VERBOSE + | VERSION_P + | VIEW + | VIEWS + | VOLATILE + | WHEN + | WHITESPACE_P + | WORK + | WRAPPER + | WRITE + | XML_P + | XMLATTRIBUTES + | XMLCONCAT + | XMLELEMENT + | XMLEXISTS + | XMLFOREST + | XMLNAMESPACES + | XMLPARSE + | XMLPI + | XMLROOT + | XMLSERIALIZE + | XMLTABLE + | YES_P + | ZONE + ; + %% /* @@ -15792,13 +16249,14 @@ check_indirection(List *indirection, core_yyscan_t yyscanner) } /* extractArgTypes() + * * Given a list of FunctionParameter nodes, extract a list of just the - * argument types (TypeNames) for input parameters only. This is what - * is needed to look up an existing function, which is what is wanted by - * the productions that use this call. + * argument types (TypeNames) for signature parameters only (e.g., only input + * parameters for functions). This is what is needed to look up an existing + * function, which is what is wanted by the productions that use this call. */ static List * -extractArgTypes(List *parameters) +extractArgTypes(ObjectType objtype, List *parameters) { List *result = NIL; ListCell *i; @@ -15807,7 +16265,7 @@ extractArgTypes(List *parameters) { FunctionParameter *p = (FunctionParameter *) lfirst(i); - if (p->mode != FUNC_PARAM_OUT && p->mode != FUNC_PARAM_TABLE) + if ((p->mode != FUNC_PARAM_OUT || objtype == OBJECT_PROCEDURE) && p->mode != FUNC_PARAM_TABLE) result = lappend(result, p->argType); } return result; @@ -15820,7 +16278,7 @@ static List * extractAggrArgTypes(List *aggrargs) { Assert(list_length(aggrargs) == 2); - return extractArgTypes((List *) linitial(aggrargs)); + return extractArgTypes(OBJECT_AGGREGATE, (List *) linitial(aggrargs)); } /* makeOrderedSetArgs() @@ -15833,7 +16291,7 @@ makeOrderedSetArgs(List *directargs, List *orderedargs, core_yyscan_t yyscanner) { FunctionParameter *lastd = (FunctionParameter *) llast(directargs); - int ndirectargs; + Value *ndirectargs; /* No restriction unless last direct arg is VARIADIC */ if (lastd->mode == FUNC_PARAM_VARIADIC) @@ -15857,10 +16315,10 @@ makeOrderedSetArgs(List *directargs, List *orderedargs, } /* don't merge into the next line, as list_concat changes directargs */ - ndirectargs = list_length(directargs); + ndirectargs = makeInteger(list_length(directargs)); return list_make2(list_concat(directargs, orderedargs), - makeInteger(ndirectargs)); + ndirectargs); } /* insertSelectOptions() @@ -15919,7 +16377,7 @@ insertSelectOptions(SelectStmt *stmt, if (!stmt->sortClause && limitClause->limitOption == LIMIT_OPTION_WITH_TIES) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("WITH TIES options can not be specified without ORDER BY clause"))); + errmsg("WITH TIES cannot be specified without ORDER BY clause"))); stmt->limitOption = limitClause->limitOption; } if (withClause) diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c index f813b587f186a..783f3fe8f2d1c 100644 --- a/src/backend/parser/parse_agg.c +++ b/src/backend/parser/parse_agg.c @@ -1083,7 +1083,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry) if (gset_common) { - for_each_cell(l, gsets, list_second_cell(gsets)) + for_each_from(l, gsets, 1) { gset_common = list_intersection_int(gset_common, lfirst(l)); if (!gset_common) @@ -1774,7 +1774,7 @@ expand_grouping_sets(List *groupingSets, int limit) result = lappend(result, list_union_int(NIL, (List *) lfirst(lc))); } - for_each_cell(lc, expanded_groups, list_second_cell(expanded_groups)) + for_each_from(lc, expanded_groups, 1) { List *p = lfirst(lc); List *new_result = NIL; diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index 6fff13479e47f..edcaf276c0ad3 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -1770,7 +1770,7 @@ transformLimitClause(ParseState *pstate, Node *clause, IsA(clause, A_Const) && ((A_Const *) clause)->val.type == T_Null) ereport(ERROR, (errcode(ERRCODE_INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - errmsg("row count cannot be NULL in FETCH FIRST ... WITH TIES clause"))); + errmsg("row count cannot be null in FETCH FIRST ... WITH TIES clause"))); return qual; } diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index f69976cc8c989..f5165863d7791 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -57,7 +57,7 @@ bool Transform_null_equals = false; #define PREC_GROUP_NOT_LIKE 9 /* NOT LIKE/ILIKE/SIMILAR */ #define PREC_GROUP_NOT_BETWEEN 10 /* NOT BETWEEN */ #define PREC_GROUP_NOT_IN 11 /* NOT IN */ -#define PREC_GROUP_POSTFIX_OP 12 /* generic postfix operators */ +#define PREC_GROUP_ANY_ALL 12 /* ANY/ALL */ #define PREC_GROUP_INFIX_OP 13 /* generic infix operators */ #define PREC_GROUP_PREFIX_OP 14 /* generic prefix operators */ @@ -71,7 +71,7 @@ bool Transform_null_equals = false; * 4. LIKE ILIKE SIMILAR * 5. BETWEEN * 6. IN - * 7. generic postfix Op + * 7. ANY ALL * 8. generic Op, including <= => <> * 9. generic prefix Op * 10. IS tests (NullTest, BooleanTest, etc) @@ -1031,7 +1031,7 @@ transformAExprOpAny(ParseState *pstate, A_Expr *a) Node *rexpr = a->rexpr; if (operator_precedence_warning) - emit_precedence_warnings(pstate, PREC_GROUP_POSTFIX_OP, + emit_precedence_warnings(pstate, PREC_GROUP_ANY_ALL, strVal(llast(a->name)), lexpr, NULL, a->location); @@ -1054,7 +1054,7 @@ transformAExprOpAll(ParseState *pstate, A_Expr *a) Node *rexpr = a->rexpr; if (operator_precedence_warning) - emit_precedence_warnings(pstate, PREC_GROUP_POSTFIX_OP, + emit_precedence_warnings(pstate, PREC_GROUP_ANY_ALL, strVal(llast(a->name)), lexpr, NULL, a->location); @@ -1698,11 +1698,12 @@ transformMultiAssignRef(ParseState *pstate, MultiAssignRef *maref) /* * If we're at the last column, delete the RowExpr from * p_multiassign_exprs; we don't need it anymore, and don't want it in - * the finished UPDATE tlist. + * the finished UPDATE tlist. We assume this is still the last entry + * in p_multiassign_exprs. */ if (maref->colno == maref->ncolumns) pstate->p_multiassign_exprs = - list_delete_ptr(pstate->p_multiassign_exprs, tle); + list_delete_last(pstate->p_multiassign_exprs); return result; } @@ -2019,7 +2020,7 @@ transformSubLink(ParseState *pstate, SubLink *sublink) sublink->testexpr, NULL, sublink->location); else - emit_precedence_warnings(pstate, PREC_GROUP_POSTFIX_OP, + emit_precedence_warnings(pstate, PREC_GROUP_ANY_ALL, strVal(llast(sublink->operName)), sublink->testexpr, NULL, sublink->location); @@ -3244,28 +3245,11 @@ operator_precedence_group(Node *node, const char **nodename) group = PREC_GROUP_PREFIX_OP; } } - else if (aexpr->kind == AEXPR_OP && - aexpr->lexpr != NULL && - aexpr->rexpr == NULL) - { - /* postfix operator */ - if (list_length(aexpr->name) == 1) - { - *nodename = strVal(linitial(aexpr->name)); - group = PREC_GROUP_POSTFIX_OP; - } - else - { - /* schema-qualified operator syntax */ - *nodename = "OPERATOR()"; - group = PREC_GROUP_POSTFIX_OP; - } - } else if (aexpr->kind == AEXPR_OP_ANY || aexpr->kind == AEXPR_OP_ALL) { *nodename = strVal(llast(aexpr->name)); - group = PREC_GROUP_POSTFIX_OP; + group = PREC_GROUP_ANY_ALL; } else if (aexpr->kind == AEXPR_DISTINCT || aexpr->kind == AEXPR_NOT_DISTINCT) @@ -3356,7 +3340,7 @@ operator_precedence_group(Node *node, const char **nodename) else { *nodename = strVal(llast(s->operName)); - group = PREC_GROUP_POSTFIX_OP; + group = PREC_GROUP_ANY_ALL; } } } @@ -3432,9 +3416,8 @@ emit_precedence_warnings(ParseState *pstate, * Complain if left child, which should be same or higher precedence * according to current rules, used to be lower precedence. * - * Exception to precedence rules: if left child is IN or NOT IN or a - * postfix operator, the grouping is syntactically forced regardless of - * precedence. + * Exception to precedence rules: if left child is IN or NOT IN the + * grouping is syntactically forced regardless of precedence. */ cgroup = operator_precedence_group(lchild, &copname); if (cgroup > 0) @@ -3442,7 +3425,7 @@ emit_precedence_warnings(ParseState *pstate, if (oldprecedence_l[cgroup] < oldprecedence_r[opgroup] && cgroup != PREC_GROUP_IN && cgroup != PREC_GROUP_NOT_IN && - cgroup != PREC_GROUP_POSTFIX_OP && + cgroup != PREC_GROUP_ANY_ALL && cgroup != PREC_GROUP_POSTFIX_IS) ereport(WARNING, (errmsg("operator precedence change: %s is now lower precedence than %s", diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c index 2749974f6384d..6613a3a8f8798 100644 --- a/src/backend/parser/parse_oper.c +++ b/src/backend/parser/parse_oper.c @@ -52,7 +52,7 @@ typedef struct OprCacheKey { char oprname[NAMEDATALEN]; Oid left_arg; /* Left input OID, or 0 if prefix op */ - Oid right_arg; /* Right input OID, or 0 if postfix op */ + Oid right_arg; /* Right input OID */ Oid search_path[MAX_CACHED_PATH_LEN]; } OprCacheKey; @@ -88,8 +88,7 @@ static void InvalidateOprCacheCallBack(Datum arg, int cacheid, uint32 hashvalue) * Given a possibly-qualified operator name and exact input datatypes, * look up the operator. * - * Pass oprleft = InvalidOid for a prefix op, oprright = InvalidOid for - * a postfix op. + * Pass oprleft = InvalidOid for a prefix op. * * If the operator name is not schema-qualified, it is sought in the current * namespace search path. @@ -115,10 +114,16 @@ LookupOperName(ParseState *pstate, List *opername, Oid oprleft, Oid oprright, if (!OidIsValid(oprleft)) oprkind = 'l'; - else if (!OidIsValid(oprright)) - oprkind = 'r'; - else + else if (OidIsValid(oprright)) oprkind = 'b'; + else + { + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("postfix operators are not supported"), + parser_errposition(pstate, location))); + oprkind = 0; /* keep compiler quiet */ + } ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), @@ -507,85 +512,6 @@ compatible_oper_opid(List *op, Oid arg1, Oid arg2, bool noError) } -/* right_oper() -- search for a unary right operator (postfix operator) - * Given operator name and type of arg, return oper struct. - * - * IMPORTANT: the returned operator (if any) is only promised to be - * coercion-compatible with the input datatype. Do not use this if - * you need an exact- or binary-compatible match. - * - * If no matching operator found, return NULL if noError is true, - * raise an error if it is false. pstate and location are used only to report - * the error position; pass NULL/-1 if not available. - * - * NOTE: on success, the returned object is a syscache entry. The caller - * must ReleaseSysCache() the entry when done with it. - */ -Operator -right_oper(ParseState *pstate, List *op, Oid arg, bool noError, int location) -{ - Oid operOid; - OprCacheKey key; - bool key_ok; - FuncDetailCode fdresult = FUNCDETAIL_NOTFOUND; - HeapTuple tup = NULL; - - /* - * Try to find the mapping in the lookaside cache. - */ - key_ok = make_oper_cache_key(pstate, &key, op, arg, InvalidOid, location); - - if (key_ok) - { - operOid = find_oper_cache_entry(&key); - if (OidIsValid(operOid)) - { - tup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operOid)); - if (HeapTupleIsValid(tup)) - return (Operator) tup; - } - } - - /* - * First try for an "exact" match. - */ - operOid = OpernameGetOprid(op, arg, InvalidOid); - if (!OidIsValid(operOid)) - { - /* - * Otherwise, search for the most suitable candidate. - */ - FuncCandidateList clist; - - /* Get postfix operators of given name */ - clist = OpernameGetCandidates(op, 'r', false); - - /* No operators found? Then fail... */ - if (clist != NULL) - { - /* - * We must run oper_select_candidate even if only one candidate, - * otherwise we may falsely return a non-type-compatible operator. - */ - fdresult = oper_select_candidate(1, &arg, clist, &operOid); - } - } - - if (OidIsValid(operOid)) - tup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operOid)); - - if (HeapTupleIsValid(tup)) - { - if (key_ok) - make_oper_cache_entry(&key, operOid); - } - else if (!noError) - op_error(pstate, op, 'r', arg, InvalidOid, fdresult, location); - - return (Operator) tup; -} - - /* left_oper() -- search for a unary left operator (prefix operator) * Given operator name and type of arg, return oper struct. * @@ -696,8 +622,7 @@ op_signature_string(List *op, char oprkind, Oid arg1, Oid arg2) appendStringInfoString(&argbuf, NameListToString(op)); - if (oprkind != 'r') - appendStringInfo(&argbuf, " %s", format_type_be(arg2)); + appendStringInfo(&argbuf, " %s", format_type_be(arg2)); return argbuf.data; /* return palloc'd string buffer */ } @@ -758,17 +683,16 @@ make_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree, Oid rettype; OpExpr *result; - /* Select the operator */ + /* Check it's not a postfix operator */ if (rtree == NULL) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("postfix operators are not supported"))); + + /* Select the operator */ + if (ltree == NULL) { - /* right operator */ - ltypeId = exprType(ltree); - rtypeId = InvalidOid; - tup = right_oper(pstate, opname, ltypeId, false, location); - } - else if (ltree == NULL) - { - /* left operator */ + /* prefix operator */ rtypeId = exprType(rtree); ltypeId = InvalidOid; tup = left_oper(pstate, opname, rtypeId, false, location); @@ -795,17 +719,9 @@ make_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree, parser_errposition(pstate, location))); /* Do typecasting and build the expression tree */ - if (rtree == NULL) - { - /* right operator */ - args = list_make1(ltree); - actual_arg_types[0] = ltypeId; - declared_arg_types[0] = opform->oprleft; - nargs = 1; - } - else if (ltree == NULL) + if (ltree == NULL) { - /* left operator */ + /* prefix operator */ args = list_make1(rtree); actual_arg_types[0] = rtypeId; declared_arg_types[0] = opform->oprright; diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c index b875a506463f8..a56bd86181a70 100644 --- a/src/backend/parser/parse_relation.c +++ b/src/backend/parser/parse_relation.c @@ -1737,16 +1737,46 @@ addRangeTableEntryForFunction(ParseState *pstate, /* * A coldeflist is required if the function returns RECORD and hasn't - * got a predetermined record type, and is prohibited otherwise. + * got a predetermined record type, and is prohibited otherwise. This + * can be a bit confusing, so we expend some effort on delivering a + * relevant error message. */ if (coldeflist != NIL) { - if (functypclass != TYPEFUNC_RECORD) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("a column definition list is only allowed for functions returning \"record\""), - parser_errposition(pstate, - exprLocation((Node *) coldeflist)))); + switch (functypclass) + { + case TYPEFUNC_RECORD: + /* ok */ + break; + case TYPEFUNC_COMPOSITE: + case TYPEFUNC_COMPOSITE_DOMAIN: + + /* + * If the function's raw result type is RECORD, we must + * have resolved it using its OUT parameters. Otherwise, + * it must have a named composite type. + */ + if (exprType(funcexpr) == RECORDOID) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("a column definition list is redundant for a function with OUT parameters"), + parser_errposition(pstate, + exprLocation((Node *) coldeflist)))); + else + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("a column definition list is redundant for a function returning a named composite type"), + parser_errposition(pstate, + exprLocation((Node *) coldeflist)))); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("a column definition list is only allowed for functions returning \"record\""), + parser_errposition(pstate, + exprLocation((Node *) coldeflist)))); + break; + } } else { diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c index 566c5178373e5..9de0cff833896 100644 --- a/src/backend/parser/parse_target.c +++ b/src/backend/parser/parse_target.c @@ -412,7 +412,7 @@ markTargetListOrigin(ParseState *pstate, TargetEntry *tle, ste = get_tle_by_resno(GetCTETargetList(cte), attnum); if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", + elog(ERROR, "CTE %s does not have attribute %d", rte->eref->aliasname, attnum); tle->resorigtbl = ste->resorigtbl; tle->resorigcol = ste->resorigcol; @@ -1606,7 +1606,7 @@ expandRecordVariable(ParseState *pstate, Var *var, int levelsup) ste = get_tle_by_resno(GetCTETargetList(cte), attnum); if (ste == NULL || ste->resjunk) - elog(ERROR, "subquery %s does not have attribute %d", + elog(ERROR, "CTE %s does not have attribute %d", rte->eref->aliasname, attnum); expr = (Node *) ste->expr; if (IsA(expr, Var)) diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index ec944371dd36c..015b0538e3391 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -360,6 +360,7 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, CreateSeqStmt *seqstmt; AlterSeqStmt *altseqstmt; List *attnamelist; + int nameEl_idx = -1; /* * Determine namespace and name to use for the sequence. @@ -386,6 +387,7 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"))); nameEl = defel; + nameEl_idx = foreach_current_index(option); } } @@ -405,7 +407,7 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column, } sname = rv->relname; /* Remove the SEQUENCE NAME item from seqoptions */ - seqoptions = list_delete_ptr(seqoptions, nameEl); + seqoptions = list_delete_nth_cell(seqoptions, nameEl_idx); } else { @@ -4164,7 +4166,7 @@ validateInfiniteBounds(ParseState *pstate, List *blist) } /* - * Transform one constant in a partition bound spec + * Transform one entry in a partition bound spec, producing a constant. */ static Const * transformPartitionBoundValue(ParseState *pstate, Node *val, @@ -4177,50 +4179,17 @@ transformPartitionBoundValue(ParseState *pstate, Node *val, value = transformExpr(pstate, val, EXPR_KIND_PARTITION_BOUND); /* - * Check that the input expression's collation is compatible with one - * specified for the parent's partition key (partcollation). Don't throw - * an error if it's the default collation which we'll replace with the - * parent's collation anyway. + * transformExpr() should have already rejected column references, + * subqueries, aggregates, window functions, and SRFs, based on the + * EXPR_KIND_ of a partition bound expression. */ - if (IsA(value, CollateExpr)) - { - Oid exprCollOid = exprCollation(value); - - /* - * Check we have a collation iff it is a collatable type. The only - * expected failures here are (1) COLLATE applied to a noncollatable - * type, or (2) partition bound expression had an unresolved - * collation. But we might as well code this to be a complete - * consistency check. - */ - if (type_is_collatable(colType)) - { - if (!OidIsValid(exprCollOid)) - ereport(ERROR, - (errcode(ERRCODE_INDETERMINATE_COLLATION), - errmsg("could not determine which collation to use for partition bound expression"), - errhint("Use the COLLATE clause to set the collation explicitly."))); - } - else - { - if (OidIsValid(exprCollOid)) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("collations are not supported by type %s", - format_type_be(colType)))); - } - - if (OidIsValid(exprCollOid) && - exprCollOid != DEFAULT_COLLATION_OID && - exprCollOid != partCollation) - ereport(ERROR, - (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("collation of partition bound value for column \"%s\" does not match partition key collation \"%s\"", - colName, get_collation_name(partCollation)), - parser_errposition(pstate, exprLocation(value)))); - } + Assert(!contain_var_clause(value)); - /* Coerce to correct type */ + /* + * Coerce to the correct type. This might cause an explicit coercion step + * to be added on top of the expression, which must be evaluated before + * returning the result to the caller. + */ value = coerce_to_target_type(pstate, value, exprType(value), colType, @@ -4236,25 +4205,36 @@ transformPartitionBoundValue(ParseState *pstate, Node *val, format_type_be(colType), colName), parser_errposition(pstate, exprLocation(val)))); - /* Simplify the expression, in case we had a coercion */ - if (!IsA(value, Const)) - value = (Node *) expression_planner((Expr *) value); - /* - * transformExpr() should have already rejected column references, - * subqueries, aggregates, window functions, and SRFs, based on the - * EXPR_KIND_ for a default expression. + * Evaluate the expression, if needed, assigning the partition key's data + * type and collation to the resulting Const node. */ - Assert(!contain_var_clause(value)); + if (!IsA(value, Const)) + { + assign_expr_collations(pstate, value); + value = (Node *) expression_planner((Expr *) value); + value = (Node *) evaluate_expr((Expr *) value, colType, colTypmod, + partCollation); + if (!IsA(value, Const)) + elog(ERROR, "could not evaluate partition bound expression"); + } + else + { + /* + * If the expression is already a Const, as is often the case, we can + * skip the rather expensive steps above. But we still have to insert + * the right collation, since coerce_to_target_type doesn't handle + * that. + */ + ((Const *) value)->constcollid = partCollation; + } /* - * Evaluate the expression, assigning the partition key's collation to the - * resulting Const expression. + * Attach original expression's parse location to the Const, so that + * that's what will be reported for any later errors related to this + * partition bound. */ - value = (Node *) evaluate_expr((Expr *) value, colType, colTypmod, - partCollation); - if (!IsA(value, Const)) - elog(ERROR, "could not evaluate partition bound expression"); + ((Const *) value)->location = exprLocation(val); return (Const *) value; } diff --git a/src/backend/parser/scan.l b/src/backend/parser/scan.l index b1ea0cb538467..4eab2980c9908 100644 --- a/src/backend/parser/scan.l +++ b/src/backend/parser/scan.l @@ -73,7 +73,7 @@ bool standard_conforming_strings = true; * callers need to pass it to scanner_init, if they are using the * standard keyword list ScanKeywords. */ -#define PG_KEYWORD(kwname, value, category) value, +#define PG_KEYWORD(kwname, value, category, collabel) value, const uint16 ScanKeywordTokens[] = { #include "parser/kwlist.h" diff --git a/src/backend/parser/scansup.c b/src/backend/parser/scansup.c index cac70d5df7afc..d07cbafcee757 100644 --- a/src/backend/parser/scansup.c +++ b/src/backend/parser/scansup.c @@ -1,8 +1,7 @@ /*------------------------------------------------------------------------- * * scansup.c - * support routines for the lex/flex scanner, used by both the normal - * backend as well as the bootstrap backend + * scanner support routines used by the core lexer * * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -20,98 +19,6 @@ #include "mb/pg_wchar.h" #include "parser/scansup.h" -/* ---------------- - * scanstr - * - * if the string passed in has escaped codes, map the escape codes to actual - * chars - * - * the string returned is palloc'd and should eventually be pfree'd by the - * caller! - * ---------------- - */ - -char * -scanstr(const char *s) -{ - char *newStr; - int len, - i, - j; - - if (s == NULL || s[0] == '\0') - return pstrdup(""); - - len = strlen(s); - - newStr = palloc(len + 1); /* string cannot get longer */ - - for (i = 0, j = 0; i < len; i++) - { - if (s[i] == '\'') - { - /* - * Note: if scanner is working right, unescaped quotes can only - * appear in pairs, so there should be another character. - */ - i++; - /* The bootstrap parser is not as smart, so check here. */ - Assert(s[i] == '\''); - newStr[j] = s[i]; - } - else if (s[i] == '\\') - { - i++; - switch (s[i]) - { - case 'b': - newStr[j] = '\b'; - break; - case 'f': - newStr[j] = '\f'; - break; - case 'n': - newStr[j] = '\n'; - break; - case 'r': - newStr[j] = '\r'; - break; - case 't': - newStr[j] = '\t'; - break; - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - { - int k; - long octVal = 0; - - for (k = 0; - s[i + k] >= '0' && s[i + k] <= '7' && k < 3; - k++) - octVal = (octVal << 3) + (s[i + k] - '0'); - i += k - 1; - newStr[j] = ((char) octVal); - } - break; - default: - newStr[j] = s[i]; - break; - } /* switch */ - } /* s[i] == '\\' */ - else - newStr[j] = s[i]; - j++; - } - newStr[j] = '\0'; - return newStr; -} - /* * downcase_truncate_identifier() --- do appropriate downcasing and diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index 419c8fe845160..66c42b58986e6 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -223,7 +223,7 @@ static int32 partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc, static int partition_range_bsearch(int partnatts, FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, - PartitionRangeBound *probe, bool *is_equal); + PartitionRangeBound *probe, int32 *cmpval); static int get_partition_bound_num_indexes(PartitionBoundInfo b); static Expr *make_partition_op_expr(PartitionKey key, int keynum, uint16 strategy, Expr *arg1, Expr *arg2); @@ -1020,8 +1020,6 @@ partition_bounds_merge(int partnatts, JoinType jointype, List **outer_parts, List **inner_parts) { - PartitionBoundInfo outer_binfo = outer_rel->boundinfo; - /* * Currently, this function is called only from try_partitionwise_join(), * so the join type should be INNER, LEFT, FULL, SEMI, or ANTI. @@ -1031,10 +1029,10 @@ partition_bounds_merge(int partnatts, jointype == JOIN_ANTI); /* The partitioning strategies should be the same. */ - Assert(outer_binfo->strategy == inner_rel->boundinfo->strategy); + Assert(outer_rel->boundinfo->strategy == inner_rel->boundinfo->strategy); *outer_parts = *inner_parts = NIL; - switch (outer_binfo->strategy) + switch (outer_rel->boundinfo->strategy) { case PARTITION_STRATEGY_HASH: @@ -1075,7 +1073,7 @@ partition_bounds_merge(int partnatts, default: elog(ERROR, "unexpected partition strategy: %d", - (int) outer_binfo->strategy); + (int) outer_rel->boundinfo->strategy); return NULL; /* keep compiler quiet */ } } @@ -1528,7 +1526,7 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs, &next_index); Assert(merged_index >= 0); - /* Get the range of the merged partition. */ + /* Get the range bounds of the merged partition. */ get_merged_range_bounds(partnatts, partsupfuncs, partcollations, jointype, &outer_lb, &outer_ub, @@ -1833,7 +1831,7 @@ merge_matching_partitions(PartitionMap *outer_map, PartitionMap *inner_map, /* * If neither of them has been merged, merge them. Otherwise, if one has - * been merged with a dummy relation on the other side (and the other + * been merged with a dummy partition on the other side (and the other * hasn't yet been merged with anything), re-merge them. Otherwise, they * can't be merged, so return -1. */ @@ -2807,14 +2805,14 @@ partitions_are_ordered(PartitionBoundInfo boundinfo, int nparts) */ void check_new_partition_bound(char *relname, Relation parent, - PartitionBoundSpec *spec) + PartitionBoundSpec *spec, ParseState *pstate) { PartitionKey key = RelationGetPartitionKey(parent); PartitionDesc partdesc = RelationGetPartitionDesc(parent); PartitionBoundInfo boundinfo = partdesc->boundinfo; - ParseState *pstate = make_parsestate(NULL); int with = -1; bool overlap = false; + int overlap_location = -1; if (spec->is_default) { @@ -2909,6 +2907,7 @@ check_new_partition_bound(char *relname, Relation parent, if (boundinfo->indexes[remainder] != -1) { overlap = true; + overlap_location = spec->location; with = boundinfo->indexes[remainder]; break; } @@ -2937,6 +2936,7 @@ check_new_partition_bound(char *relname, Relation parent, { Const *val = castNode(Const, lfirst(cell)); + overlap_location = val->location; if (!val->constisnull) { int offset; @@ -2970,6 +2970,7 @@ check_new_partition_bound(char *relname, Relation parent, { PartitionRangeBound *lower, *upper; + int cmpval; Assert(spec->strategy == PARTITION_STRATEGY_RANGE); lower = make_one_partition_rbound(key, -1, spec->lowerdatums, true); @@ -2979,10 +2980,17 @@ check_new_partition_bound(char *relname, Relation parent, * First check if the resulting range would be empty with * specified lower and upper bounds */ - if (partition_rbound_cmp(key->partnatts, key->partsupfunc, - key->partcollation, lower->datums, - lower->kind, true, upper) >= 0) + cmpval = partition_rbound_cmp(key->partnatts, + key->partsupfunc, + key->partcollation, + lower->datums, lower->kind, + true, upper); + if (cmpval >= 0) { + /* Fetch the problematic key from the lower datums list. */ + PartitionRangeDatum *datum = list_nth(spec->lowerdatums, + cmpval - 1); + ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("empty range bound specified for partition \"%s\"", @@ -2990,13 +2998,12 @@ check_new_partition_bound(char *relname, Relation parent, errdetail("Specified lower bound %s is greater than or equal to upper bound %s.", get_range_partbound_string(spec->lowerdatums), get_range_partbound_string(spec->upperdatums)), - parser_errposition(pstate, spec->location))); + parser_errposition(pstate, datum->location))); } if (partdesc->nparts > 0) { int offset; - bool equal; Assert(boundinfo && boundinfo->strategy == PARTITION_STRATEGY_RANGE && @@ -3022,7 +3029,7 @@ check_new_partition_bound(char *relname, Relation parent, key->partsupfunc, key->partcollation, boundinfo, lower, - &equal); + &cmpval); if (boundinfo->indexes[offset + 1] < 0) { @@ -3034,7 +3041,6 @@ check_new_partition_bound(char *relname, Relation parent, */ if (offset + 1 < boundinfo->ndatums) { - int32 cmpval; Datum *datums; PartitionRangeDatumKind *kind; bool is_lower; @@ -3050,12 +3056,20 @@ check_new_partition_bound(char *relname, Relation parent, is_lower, upper); if (cmpval < 0) { + /* + * Fetch the problematic key from the upper + * datums list. + */ + PartitionRangeDatum *datum = + list_nth(spec->upperdatums, -cmpval - 1); + /* * The new partition overlaps with the * existing partition between offset + 1 and * offset + 2. */ overlap = true; + overlap_location = datum->location; with = boundinfo->indexes[offset + 2]; } } @@ -3066,7 +3080,20 @@ check_new_partition_bound(char *relname, Relation parent, * The new partition overlaps with the existing * partition between offset and offset + 1. */ + PartitionRangeDatum *datum; + + /* + * Fetch the problematic key from the lower datums + * list. Given the way partition_range_bsearch() + * works, the new lower bound is certainly >= the + * bound at offset. If the bound matches exactly, we + * flag the 1st key. + */ + Assert(cmpval >= 0); + datum = cmpval == 0 ? linitial(spec->lowerdatums) : + list_nth(spec->lowerdatums, cmpval - 1); overlap = true; + overlap_location = datum->location; with = boundinfo->indexes[offset + 1]; } } @@ -3086,7 +3113,7 @@ check_new_partition_bound(char *relname, Relation parent, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("partition \"%s\" would overlap partition \"%s\"", relname, get_rel_name(partdesc->oids[with])), - parser_errposition(pstate, spec->location))); + parser_errposition(pstate, overlap_location))); } } @@ -3319,8 +3346,12 @@ make_one_partition_rbound(PartitionKey key, int index, List *datums, bool lower) /* * partition_rbound_cmp * - * Return for two range bounds whether the 1st one (specified in datums1, - * kind1, and lower1) is <, =, or > the bound specified in *b2. + * For two range bounds this decides whether the 1st one (specified by + * datums1, kind1, and lower1) is <, =, or > the bound specified in *b2. + * + * 0 is returned if they are equal, otherwise a non-zero integer whose sign + * indicates the ordering, and whose absolute value gives the 1-based + * partition key number of the first mismatching column. * * partnatts, partsupfunc and partcollation give the number of attributes in the * bounds to be compared, comparison function to be used and the collations of @@ -3339,6 +3370,7 @@ partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc, Datum *datums1, PartitionRangeDatumKind *kind1, bool lower1, PartitionRangeBound *b2) { + int32 colnum = 0; int32 cmpval = 0; /* placate compiler */ int i; Datum *datums2 = b2->datums; @@ -3347,6 +3379,9 @@ partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc, for (i = 0; i < partnatts; i++) { + /* Track column number in case we need it for result */ + colnum++; + /* * First, handle cases where the column is unbounded, which should not * invoke the comparison procedure, and should not consider any later @@ -3354,9 +3389,9 @@ partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc, * compare the same way as the values they represent. */ if (kind1[i] < kind2[i]) - return -1; + return -colnum; else if (kind1[i] > kind2[i]) - return 1; + return colnum; else if (kind1[i] != PARTITION_RANGE_DATUM_VALUE) /* @@ -3383,7 +3418,7 @@ partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc, if (cmpval == 0 && lower1 != lower2) cmpval = lower1 ? 1 : -1; - return cmpval; + return cmpval == 0 ? 0 : (cmpval < 0 ? -colnum : colnum); } /* @@ -3395,7 +3430,6 @@ partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc, * n_tuple_datums, partsupfunc and partcollation give number of attributes in * the bounds to be compared, comparison function to be used and the collations * of attributes resp. - * */ int32 partition_rbound_datum_cmp(FmgrInfo *partsupfunc, Oid *partcollation, @@ -3488,14 +3522,17 @@ partition_list_bsearch(FmgrInfo *partsupfunc, Oid *partcollation, * equal to the given range bound or -1 if all of the range bounds are * greater * - * *is_equal is set to true if the range bound at the returned index is equal - * to the input range bound + * Upon return from this function, *cmpval is set to 0 if the bound at the + * returned index matches the input range bound exactly, otherwise a + * non-zero integer whose sign indicates the ordering, and whose absolute + * value gives the 1-based partition key number of the first mismatching + * column. */ static int partition_range_bsearch(int partnatts, FmgrInfo *partsupfunc, Oid *partcollation, PartitionBoundInfo boundinfo, - PartitionRangeBound *probe, bool *is_equal) + PartitionRangeBound *probe, int32 *cmpval) { int lo, hi, @@ -3505,21 +3542,17 @@ partition_range_bsearch(int partnatts, FmgrInfo *partsupfunc, hi = boundinfo->ndatums - 1; while (lo < hi) { - int32 cmpval; - mid = (lo + hi + 1) / 2; - cmpval = partition_rbound_cmp(partnatts, partsupfunc, - partcollation, - boundinfo->datums[mid], - boundinfo->kind[mid], - (boundinfo->indexes[mid] == -1), - probe); - if (cmpval <= 0) + *cmpval = partition_rbound_cmp(partnatts, partsupfunc, + partcollation, + boundinfo->datums[mid], + boundinfo->kind[mid], + (boundinfo->indexes[mid] == -1), + probe); + if (*cmpval <= 0) { lo = mid; - *is_equal = (cmpval == 0); - - if (*is_equal) + if (*cmpval == 0) break; } else @@ -3530,7 +3563,7 @@ partition_range_bsearch(int partnatts, FmgrInfo *partsupfunc, } /* - * partition_range_bsearch + * partition_range_datum_bsearch * Returns the index of the greatest range bound that is less than or * equal to the given tuple or -1 if all of the range bounds are greater * diff --git a/src/backend/port/win32/crashdump.c b/src/backend/port/win32/crashdump.c index e6c68379b20e2..47114d916cc1d 100644 --- a/src/backend/port/win32/crashdump.c +++ b/src/backend/port/win32/crashdump.c @@ -122,7 +122,7 @@ crashDumpHandler(struct _EXCEPTION_POINTERS *pExceptionInfo) return EXCEPTION_CONTINUE_SEARCH; } - pDump = (MINIDUMPWRITEDUMP) GetProcAddress(hDll, "MiniDumpWriteDump"); + pDump = (MINIDUMPWRITEDUMP) (pg_funcptr_t) GetProcAddress(hDll, "MiniDumpWriteDump"); if (pDump == NULL) { diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c index 6fbd1ed6fb49f..7c7611a01e233 100644 --- a/src/backend/port/win32/socket.c +++ b/src/backend/port/win32/socket.c @@ -120,13 +120,21 @@ TranslateSocketError(void) case WSAEADDRNOTAVAIL: errno = EADDRNOTAVAIL; break; - case WSAEHOSTUNREACH: case WSAEHOSTDOWN: + errno = EHOSTDOWN; + break; + case WSAEHOSTUNREACH: case WSAHOST_NOT_FOUND: + errno = EHOSTUNREACH; + break; case WSAENETDOWN: + errno = ENETDOWN; + break; case WSAENETUNREACH: + errno = ENETUNREACH; + break; case WSAENETRESET: - errno = EHOSTUNREACH; + errno = ENETRESET; break; case WSAENOTCONN: case WSAESHUTDOWN: diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 1b8cd7bacd43c..2cef56f115f4e 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -454,8 +454,8 @@ AutoVacLauncherMain(int argc, char *argv[]) pqsignal(SIGHUP, SignalHandlerForConfigReload); pqsignal(SIGINT, StatementCancelHandler); pqsignal(SIGTERM, SignalHandlerForShutdownRequest); + /* SIGQUIT handler was already set up by InitPostmasterChild */ - pqsignal(SIGQUIT, quickdie); InitializeTimeouts(); /* establishes SIGALRM handler */ pqsignal(SIGPIPE, SIG_IGN); @@ -495,6 +495,13 @@ AutoVacLauncherMain(int argc, char *argv[]) * If an exception is encountered, processing resumes here. * * This code is a stripped down version of PostgresMain error recovery. + * + * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask + * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus, + * signals other than SIGQUIT will be blocked until we complete error + * recovery. It might seem that this policy makes the HOLD_INTERRUPTS() + * call redundant, but it is not since InterruptPending might be set + * already. */ if (sigsetjmp(local_sigjmp_buf, 1) != 0) { @@ -1525,7 +1532,8 @@ AutoVacWorkerMain(int argc, char *argv[]) */ pqsignal(SIGINT, StatementCancelHandler); pqsignal(SIGTERM, die); - pqsignal(SIGQUIT, quickdie); + /* SIGQUIT handler was already set up by InitPostmasterChild */ + InitializeTimeouts(); /* establishes SIGALRM handler */ pqsignal(SIGPIPE, SIG_IGN); @@ -1550,7 +1558,15 @@ AutoVacWorkerMain(int argc, char *argv[]) /* * If an exception is encountered, processing resumes here. * - * See notes in postgres.c about the design of this coding. + * Unlike most auxiliary processes, we don't attempt to continue + * processing after an error; we just clean up and exit. The autovac + * launcher is responsible for spawning another worker later. + * + * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask + * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus, + * signals other than SIGQUIT will be blocked until we exit. It might + * seem that this policy makes the HOLD_INTERRUPTS() call redundant, but + * it is not since InterruptPending might be set already. */ if (sigsetjmp(local_sigjmp_buf, 1) != 0) { diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index d043ced6861a4..5a9a0e3435376 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -731,9 +731,9 @@ StartBackgroundWorker(void) pqsignal(SIGFPE, SIG_IGN); } pqsignal(SIGTERM, bgworker_die); + /* SIGQUIT handler was already set up by InitPostmasterChild */ pqsignal(SIGHUP, SIG_IGN); - pqsignal(SIGQUIT, SignalHandlerForCrashExit); InitializeTimeouts(); /* establishes SIGALRM handler */ pqsignal(SIGPIPE, SIG_IGN); diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c index 069e27e427fed..a7afa758b618d 100644 --- a/src/backend/postmaster/bgwriter.c +++ b/src/backend/postmaster/bgwriter.c @@ -104,7 +104,7 @@ BackgroundWriterMain(void) pqsignal(SIGHUP, SignalHandlerForConfigReload); pqsignal(SIGINT, SIG_IGN); pqsignal(SIGTERM, SignalHandlerForShutdownRequest); - pqsignal(SIGQUIT, SignalHandlerForCrashExit); + /* SIGQUIT handler was already set up by InitPostmasterChild */ pqsignal(SIGALRM, SIG_IGN); pqsignal(SIGPIPE, SIG_IGN); pqsignal(SIGUSR1, procsignal_sigusr1_handler); @@ -115,9 +115,6 @@ BackgroundWriterMain(void) */ pqsignal(SIGCHLD, SIG_DFL); - /* We allow SIGQUIT (quickdie) at all times */ - sigdelset(&BlockSig, SIGQUIT); - /* * We just started, assume there has been either a shutdown or * end-of-recovery snapshot. @@ -140,7 +137,20 @@ BackgroundWriterMain(void) /* * If an exception is encountered, processing resumes here. * - * See notes in postgres.c about the design of this coding. + * You might wonder why this isn't coded as an infinite loop around a + * PG_TRY construct. The reason is that this is the bottom of the + * exception stack, and so with PG_TRY there would be no exception handler + * in force at all during the CATCH part. By leaving the outermost setjmp + * always active, we have at least some chance of recovering from an error + * during error recovery. (If we get into an infinite loop thereby, it + * will soon be stopped by overflow of elog.c's internal state stack.) + * + * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask + * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus, + * signals other than SIGQUIT will be blocked until we complete error + * recovery. It might seem that this policy makes the HOLD_INTERRUPTS() + * call redundant, but it is not since InterruptPending might be set + * already. */ if (sigsetjmp(local_sigjmp_buf, 1) != 0) { diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index 624a3238b804c..429c8010ef449 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -198,7 +198,7 @@ CheckpointerMain(void) pqsignal(SIGHUP, SignalHandlerForConfigReload); pqsignal(SIGINT, ReqCheckpointHandler); /* request checkpoint */ pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */ - pqsignal(SIGQUIT, SignalHandlerForCrashExit); + /* SIGQUIT handler was already set up by InitPostmasterChild */ pqsignal(SIGALRM, SIG_IGN); pqsignal(SIGPIPE, SIG_IGN); pqsignal(SIGUSR1, procsignal_sigusr1_handler); @@ -209,9 +209,6 @@ CheckpointerMain(void) */ pqsignal(SIGCHLD, SIG_DFL); - /* We allow SIGQUIT (quickdie) at all times */ - sigdelset(&BlockSig, SIGQUIT); - /* * Initialize so that first time-driven event happens at the correct time. */ @@ -231,7 +228,20 @@ CheckpointerMain(void) /* * If an exception is encountered, processing resumes here. * - * See notes in postgres.c about the design of this coding. + * You might wonder why this isn't coded as an infinite loop around a + * PG_TRY construct. The reason is that this is the bottom of the + * exception stack, and so with PG_TRY there would be no exception handler + * in force at all during the CATCH part. By leaving the outermost setjmp + * always active, we have at least some chance of recovering from an error + * during error recovery. (If we get into an infinite loop thereby, it + * will soon be stopped by overflow of elog.c's internal state stack.) + * + * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask + * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus, + * signals other than SIGQUIT will be blocked until we complete error + * recovery. It might seem that this policy makes the HOLD_INTERRUPTS() + * call redundant, but it is not since InterruptPending might be set + * already. */ if (sigsetjmp(local_sigjmp_buf, 1) != 0) { @@ -494,6 +504,9 @@ CheckpointerMain(void) */ pgstat_send_bgwriter(); + /* Send WAL statistics to the stats collector. */ + pgstat_send_wal(); + /* * If any checkpoint flags have been set, redo the loop to handle the * checkpoint without sleeping. diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c index 01ffd6513c7c1..ed1b65358df85 100644 --- a/src/backend/postmaster/pgarch.c +++ b/src/backend/postmaster/pgarch.c @@ -96,7 +96,6 @@ static pid_t pgarch_forkexec(void); #endif NON_EXEC_STATIC void PgArchiverMain(int argc, char *argv[]) pg_attribute_noreturn(); -static void pgarch_exit(SIGNAL_ARGS); static void pgarch_waken(SIGNAL_ARGS); static void pgarch_waken_stop(SIGNAL_ARGS); static void pgarch_MainLoop(void); @@ -229,7 +228,7 @@ PgArchiverMain(int argc, char *argv[]) pqsignal(SIGHUP, SignalHandlerForConfigReload); pqsignal(SIGINT, SIG_IGN); pqsignal(SIGTERM, SignalHandlerForShutdownRequest); - pqsignal(SIGQUIT, pgarch_exit); + /* SIGQUIT handler was already set up by InitPostmasterChild */ pqsignal(SIGALRM, SIG_IGN); pqsignal(SIGPIPE, SIG_IGN); pqsignal(SIGUSR1, pgarch_waken); @@ -246,14 +245,6 @@ PgArchiverMain(int argc, char *argv[]) exit(0); } -/* SIGQUIT signal handler for archiver process */ -static void -pgarch_exit(SIGNAL_ARGS) -{ - /* SIGQUIT means curl up and die ... */ - exit(1); -} - /* SIGUSR1 signal handler for archiver process */ static void pgarch_waken(SIGNAL_ARGS) diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 5f4b168fd16bf..822f0ebc6285a 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -51,6 +51,7 @@ #include "postmaster/fork_process.h" #include "postmaster/interrupt.h" #include "postmaster/postmaster.h" +#include "replication/slot.h" #include "replication/walsender.h" #include "storage/backendid.h" #include "storage/dsm.h" @@ -135,11 +136,12 @@ char *pgstat_stat_filename = NULL; char *pgstat_stat_tmpname = NULL; /* - * BgWriter global statistics counters (unused in other processes). - * Stored directly in a stats message structure so it can be sent - * without needing to copy things around. We assume this inits to zeroes. + * BgWriter and WAL global statistics counters. + * Stored directly in a stats message structure so they can be sent + * without needing to copy things around. We assume these init to zeroes. */ PgStat_MsgBgWriter BgWriterStats; +PgStat_MsgWal WalStats; /* * List of SLRU names that we keep stats for. There is no central registry of @@ -281,7 +283,10 @@ static int localNumBackends = 0; */ static PgStat_ArchiverStats archiverStats; static PgStat_GlobalStats globalStats; +static PgStat_WalStats walStats; static PgStat_SLRUStats slruStats[SLRU_NUM_ELEMENTS]; +static PgStat_ReplSlotStats *replSlotStats; +static int nReplSlotStats; /* * List of OIDs of databases we need to write out. If an entry is InvalidOid, @@ -322,6 +327,9 @@ static void pgstat_read_current_status(void); static bool pgstat_write_statsfile_needed(void); static bool pgstat_db_requested(Oid databaseid); +static int pgstat_replslot_index(const char *name, bool create_it); +static void pgstat_reset_replslot(int i, TimestampTz ts); + static void pgstat_send_tabstat(PgStat_MsgTabstat *tsmsg); static void pgstat_send_funcstats(void); static void pgstat_send_slru(void); @@ -348,17 +356,20 @@ static void pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len); static void pgstat_recv_resetsharedcounter(PgStat_MsgResetsharedcounter *msg, int len); static void pgstat_recv_resetsinglecounter(PgStat_MsgResetsinglecounter *msg, int len); static void pgstat_recv_resetslrucounter(PgStat_MsgResetslrucounter *msg, int len); +static void pgstat_recv_resetreplslotcounter(PgStat_MsgResetreplslotcounter *msg, int len); static void pgstat_recv_autovac(PgStat_MsgAutovacStart *msg, int len); static void pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len); static void pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len); static void pgstat_recv_archiver(PgStat_MsgArchiver *msg, int len); static void pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len); +static void pgstat_recv_wal(PgStat_MsgWal *msg, int len); static void pgstat_recv_slru(PgStat_MsgSLRU *msg, int len); static void pgstat_recv_funcstat(PgStat_MsgFuncstat *msg, int len); static void pgstat_recv_funcpurge(PgStat_MsgFuncpurge *msg, int len); static void pgstat_recv_recoveryconflict(PgStat_MsgRecoveryConflict *msg, int len); static void pgstat_recv_deadlock(PgStat_MsgDeadlock *msg, int len); static void pgstat_recv_checksum_failure(PgStat_MsgChecksumFailure *msg, int len); +static void pgstat_recv_replslot(PgStat_MsgReplSlot *msg, int len); static void pgstat_recv_tempfile(PgStat_MsgTempFile *msg, int len); /* ------------------------------------------------------------ @@ -938,6 +949,9 @@ pgstat_report_stat(bool force) /* Now, send function statistics */ pgstat_send_funcstats(); + /* Send WAL statistics */ + pgstat_send_wal(); + /* Finally send SLRU statistics */ pgstat_send_slru(); } @@ -1370,11 +1384,13 @@ pgstat_reset_shared_counters(const char *target) msg.m_resettarget = RESET_ARCHIVER; else if (strcmp(target, "bgwriter") == 0) msg.m_resettarget = RESET_BGWRITER; + else if (strcmp(target, "wal") == 0) + msg.m_resettarget = RESET_WAL; else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("unrecognized reset target: \"%s\"", target), - errhint("Target must be \"archiver\" or \"bgwriter\"."))); + errhint("Target must be \"archiver\", \"bgwriter\" or \"wal\"."))); pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RESETSHAREDCOUNTER); pgstat_send(&msg, sizeof(msg)); @@ -1429,6 +1445,61 @@ pgstat_reset_slru_counter(const char *name) pgstat_send(&msg, sizeof(msg)); } +/* ---------- + * pgstat_reset_replslot_counter() - + * + * Tell the statistics collector to reset a single replication slot + * counter, or all replication slots counters (when name is null). + * + * Permission checking for this function is managed through the normal + * GRANT system. + * ---------- + */ +void +pgstat_reset_replslot_counter(const char *name) +{ + PgStat_MsgResetreplslotcounter msg; + + if (pgStatSock == PGINVALID_SOCKET) + return; + + if (name) + { + ReplicationSlot *slot; + + /* + * Check if the slot exits with the given name. It is possible that by + * the time this message is executed the slot is dropped but at least + * this check will ensure that the given name is for a valid slot. + */ + LWLockAcquire(ReplicationSlotControlLock, LW_SHARED); + slot = SearchNamedReplicationSlot(name); + LWLockRelease(ReplicationSlotControlLock); + + if (!slot) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("replication slot \"%s\" does not exist", + name))); + + /* + * Nothing to do for physical slots as we collect stats only for + * logical slots. + */ + if (SlotIsPhysical(slot)) + return; + + memcpy(&msg.m_slotname, name, NAMEDATALEN); + msg.clearall = false; + } + else + msg.clearall = true; + + pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RESETREPLSLOTCOUNTER); + + pgstat_send(&msg, sizeof(msg)); +} + /* ---------- * pgstat_report_autovac() - * @@ -1629,6 +1700,46 @@ pgstat_report_tempfile(size_t filesize) pgstat_send(&msg, sizeof(msg)); } +/* ---------- + * pgstat_report_replslot() - + * + * Tell the collector about replication slot statistics. + * ---------- + */ +void +pgstat_report_replslot(const char *slotname, int spilltxns, int spillcount, + int spillbytes) +{ + PgStat_MsgReplSlot msg; + + /* + * Prepare and send the message + */ + pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_REPLSLOT); + memcpy(&msg.m_slotname, slotname, NAMEDATALEN); + msg.m_drop = false; + msg.m_spill_txns = spilltxns; + msg.m_spill_count = spillcount; + msg.m_spill_bytes = spillbytes; + pgstat_send(&msg, sizeof(PgStat_MsgReplSlot)); +} + +/* ---------- + * pgstat_report_replslot_drop() - + * + * Tell the collector about dropping the replication slot. + * ---------- + */ +void +pgstat_report_replslot_drop(const char *slotname) +{ + PgStat_MsgReplSlot msg; + + pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_REPLSLOT); + memcpy(&msg.m_slotname, slotname, NAMEDATALEN); + msg.m_drop = true; + pgstat_send(&msg, sizeof(PgStat_MsgReplSlot)); +} /* ---------- * pgstat_ping() - @@ -2674,6 +2785,21 @@ pgstat_fetch_global(void) return &globalStats; } +/* + * --------- + * pgstat_fetch_stat_wal() - + * + * Support function for the SQL-callable pgstat* functions. Returns + * a pointer to the WAL statistics struct. + * --------- + */ +PgStat_WalStats * +pgstat_fetch_stat_wal(void) +{ + backend_read_statsfile(); + + return &walStats; +} /* * --------- @@ -2691,6 +2817,23 @@ pgstat_fetch_slru(void) return slruStats; } +/* + * --------- + * pgstat_fetch_replslot() - + * + * Support function for the SQL-callable pgstat* functions. Returns + * a pointer to the replication slot statistics struct and sets the + * number of entries in nslots_p. + * --------- + */ +PgStat_ReplSlotStats * +pgstat_fetch_replslot(int *nslots_p) +{ + backend_read_statsfile(); + + *nslots_p = nReplSlotStats; + return replSlotStats; +} /* ------------------------------------------------------------ * Functions for management of the shared-memory PgBackendStatus array @@ -4419,6 +4562,38 @@ pgstat_send_bgwriter(void) MemSet(&BgWriterStats, 0, sizeof(BgWriterStats)); } +/* ---------- + * pgstat_send_wal() - + * + * Send WAL statistics to the collector + * ---------- + */ +void +pgstat_send_wal(void) +{ + /* We assume this initializes to zeroes */ + static const PgStat_MsgWal all_zeroes; + + /* + * This function can be called even if nothing at all has happened. In + * this case, avoid sending a completely empty message to the stats + * collector. + */ + if (memcmp(&WalStats, &all_zeroes, sizeof(PgStat_MsgWal)) == 0) + return; + + /* + * Prepare and send the message + */ + pgstat_setheader(&WalStats.m_hdr, PGSTAT_MTYPE_WAL); + pgstat_send(&WalStats, sizeof(WalStats)); + + /* + * Clear out the statistics buffer, so it can be re-used. + */ + MemSet(&WalStats, 0, sizeof(WalStats)); +} + /* ---------- * pgstat_send_slru() - * @@ -4638,6 +4813,11 @@ PgstatCollectorMain(int argc, char *argv[]) len); break; + case PGSTAT_MTYPE_RESETREPLSLOTCOUNTER: + pgstat_recv_resetreplslotcounter(&msg.msg_resetreplslotcounter, + len); + break; + case PGSTAT_MTYPE_AUTOVAC_START: pgstat_recv_autovac(&msg.msg_autovacuum_start, len); break; @@ -4658,6 +4838,10 @@ PgstatCollectorMain(int argc, char *argv[]) pgstat_recv_bgwriter(&msg.msg_bgwriter, len); break; + case PGSTAT_MTYPE_WAL: + pgstat_recv_wal(&msg.msg_wal, len); + break; + case PGSTAT_MTYPE_SLRU: pgstat_recv_slru(&msg.msg_slru, len); break; @@ -4688,6 +4872,10 @@ PgstatCollectorMain(int argc, char *argv[]) len); break; + case PGSTAT_MTYPE_REPLSLOT: + pgstat_recv_replslot(&msg.msg_replslot, len); + break; + default: break; } @@ -4887,6 +5075,7 @@ pgstat_write_statsfiles(bool permanent, bool allDbs) const char *tmpfile = permanent ? PGSTAT_STAT_PERMANENT_TMPFILE : pgstat_stat_tmpname; const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename; int rc; + int i; elog(DEBUG2, "writing stats file \"%s\"", statfile); @@ -4927,6 +5116,12 @@ pgstat_write_statsfiles(bool permanent, bool allDbs) rc = fwrite(&archiverStats, sizeof(archiverStats), 1, fpout); (void) rc; /* we'll check for error with ferror */ + /* + * Write WAL stats struct + */ + rc = fwrite(&walStats, sizeof(walStats), 1, fpout); + (void) rc; /* we'll check for error with ferror */ + /* * Write SLRU stats struct */ @@ -4960,6 +5155,16 @@ pgstat_write_statsfiles(bool permanent, bool allDbs) (void) rc; /* we'll check for error with ferror */ } + /* + * Write replication slot stats struct + */ + for (i = 0; i < nReplSlotStats; i++) + { + fputc('R', fpout); + rc = fwrite(&replSlotStats[i], sizeof(PgStat_ReplSlotStats), 1, fpout); + (void) rc; /* we'll check for error with ferror */ + } + /* * No more output to be done. Close the temp file and replace the old * pgstat.stat with it. The ferror() check replaces testing for error @@ -5185,12 +5390,17 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) dbhash = hash_create("Databases hash", PGSTAT_DB_HASH_SIZE, &hash_ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + /* Allocate the space for replication slot statistics */ + replSlotStats = palloc0(max_replication_slots * sizeof(PgStat_ReplSlotStats)); + nReplSlotStats = 0; + /* - * Clear out global and archiver statistics so they start from zero in - * case we can't load an existing statsfile. + * Clear out global, archiver, WAL and SLRU statistics so they start from + * zero in case we can't load an existing statsfile. */ memset(&globalStats, 0, sizeof(globalStats)); memset(&archiverStats, 0, sizeof(archiverStats)); + memset(&walStats, 0, sizeof(walStats)); memset(&slruStats, 0, sizeof(slruStats)); /* @@ -5199,6 +5409,7 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) */ globalStats.stat_reset_timestamp = GetCurrentTimestamp(); archiverStats.stat_reset_timestamp = globalStats.stat_reset_timestamp; + walStats.stat_reset_timestamp = globalStats.stat_reset_timestamp; /* * Set the same reset timestamp for all SLRU items too. @@ -5206,6 +5417,12 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) for (i = 0; i < SLRU_NUM_ELEMENTS; i++) slruStats[i].stat_reset_timestamp = globalStats.stat_reset_timestamp; + /* + * Set the same reset timestamp for all replication slots too. + */ + for (i = 0; i < max_replication_slots; i++) + replSlotStats[i].stat_reset_timestamp = globalStats.stat_reset_timestamp; + /* * Try to open the stats file. If it doesn't exist, the backends simply * return zero for anything and the collector simply starts from scratch @@ -5268,6 +5485,17 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) goto done; } + /* + * Read WAL stats struct + */ + if (fread(&walStats, 1, sizeof(walStats), fpin) != sizeof(walStats)) + { + ereport(pgStatRunningInCollector ? LOG : WARNING, + (errmsg("corrupted statistics file \"%s\"", statfile))); + memset(&walStats, 0, sizeof(walStats)); + goto done; + } + /* * Read SLRU stats struct */ @@ -5369,6 +5597,23 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) break; + /* + * 'R' A PgStat_ReplSlotStats struct describing a replication + * slot follows. + */ + case 'R': + if (fread(&replSlotStats[nReplSlotStats], 1, sizeof(PgStat_ReplSlotStats), fpin) + != sizeof(PgStat_ReplSlotStats)) + { + ereport(pgStatRunningInCollector ? LOG : WARNING, + (errmsg("corrupted statistics file \"%s\"", + statfile))); + memset(&replSlotStats[nReplSlotStats], 0, sizeof(PgStat_ReplSlotStats)); + goto done; + } + nReplSlotStats++; + break; + case 'E': goto done; @@ -5557,7 +5802,8 @@ pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, * pgstat_read_db_statsfile_timestamp() - * * Attempt to determine the timestamp of the last db statfile write. - * Returns true if successful; the timestamp is stored in *ts. + * Returns true if successful; the timestamp is stored in *ts. The caller must + * rely on timestamp stored in *ts iff the function returns true. * * This needs to be careful about handling databases for which no stats file * exists, such as databases without a stat entry or those not yet written: @@ -5577,7 +5823,9 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent, PgStat_StatDBEntry dbentry; PgStat_GlobalStats myGlobalStats; PgStat_ArchiverStats myArchiverStats; + PgStat_WalStats myWalStats; PgStat_SLRUStats mySLRUStats[SLRU_NUM_ELEMENTS]; + PgStat_ReplSlotStats myReplSlotStats; FILE *fpin; int32 format_id; const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename; @@ -5632,6 +5880,17 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent, return false; } + /* + * Read WAL stats struct + */ + if (fread(&myWalStats, 1, sizeof(myWalStats), fpin) != sizeof(myWalStats)) + { + ereport(pgStatRunningInCollector ? LOG : WARNING, + (errmsg("corrupted statistics file \"%s\"", statfile))); + FreeFile(fpin); + return false; + } + /* * Read SLRU stats struct */ @@ -5665,7 +5924,8 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent, ereport(pgStatRunningInCollector ? LOG : WARNING, (errmsg("corrupted statistics file \"%s\"", statfile))); - goto done; + FreeFile(fpin); + return false; } /* @@ -5680,14 +5940,33 @@ pgstat_read_db_statsfile_timestamp(Oid databaseid, bool permanent, break; + /* + * 'R' A PgStat_ReplSlotStats struct describing a replication + * slot follows. + */ + case 'R': + if (fread(&myReplSlotStats, 1, sizeof(PgStat_ReplSlotStats), fpin) + != sizeof(PgStat_ReplSlotStats)) + { + ereport(pgStatRunningInCollector ? LOG : WARNING, + (errmsg("corrupted statistics file \"%s\"", + statfile))); + FreeFile(fpin); + return false; + } + break; + case 'E': goto done; default: - ereport(pgStatRunningInCollector ? LOG : WARNING, - (errmsg("corrupted statistics file \"%s\"", - statfile))); - goto done; + { + ereport(pgStatRunningInCollector ? LOG : WARNING, + (errmsg("corrupted statistics file \"%s\"", + statfile))); + FreeFile(fpin); + return false; + } } } @@ -6208,6 +6487,12 @@ pgstat_recv_resetsharedcounter(PgStat_MsgResetsharedcounter *msg, int len) memset(&archiverStats, 0, sizeof(archiverStats)); archiverStats.stat_reset_timestamp = GetCurrentTimestamp(); } + else if (msg->m_resettarget == RESET_WAL) + { + /* Reset the WAL statistics for the cluster. */ + memset(&walStats, 0, sizeof(walStats)); + walStats.stat_reset_timestamp = GetCurrentTimestamp(); + } /* * Presumably the sender of this message validated the target, don't @@ -6266,6 +6551,46 @@ pgstat_recv_resetslrucounter(PgStat_MsgResetslrucounter *msg, int len) } } +/* ---------- + * pgstat_recv_resetreplslotcounter() - + * + * Reset some replication slot statistics of the cluster. + * ---------- + */ +static void +pgstat_recv_resetreplslotcounter(PgStat_MsgResetreplslotcounter *msg, + int len) +{ + int i; + int idx = -1; + TimestampTz ts; + + ts = GetCurrentTimestamp(); + if (msg->clearall) + { + for (i = 0; i < nReplSlotStats; i++) + pgstat_reset_replslot(i, ts); + } + else + { + /* Get the index of replication slot statistics to reset */ + idx = pgstat_replslot_index(msg->m_slotname, false); + + /* + * Nothing to do if the given slot entry is not found. This could + * happen when the slot with the given name is removed and the + * corresponding statistics entry is also removed before receiving the + * reset message. + */ + if (idx < 0) + return; + + /* Reset the stats for the requested replication slot */ + pgstat_reset_replslot(idx, ts); + } +} + + /* ---------- * pgstat_recv_autovac() - * @@ -6422,6 +6747,18 @@ pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len) globalStats.buf_alloc += msg->m_buf_alloc; } +/* ---------- + * pgstat_recv_wal() - + * + * Process a WAL message. + * ---------- + */ +static void +pgstat_recv_wal(PgStat_MsgWal *msg, int len) +{ + walStats.wal_buffers_full += msg->m_wal_buffers_full; +} + /* ---------- * pgstat_recv_slru() - * @@ -6513,6 +6850,51 @@ pgstat_recv_checksum_failure(PgStat_MsgChecksumFailure *msg, int len) dbentry->last_checksum_failure = msg->m_failure_time; } +/* ---------- + * pgstat_recv_replslot() - + * + * Process a REPLSLOT message. + * ---------- + */ +static void +pgstat_recv_replslot(PgStat_MsgReplSlot *msg, int len) +{ + int idx; + + /* + * Get the index of replication slot statistics. On dropping, we don't + * create the new statistics. + */ + idx = pgstat_replslot_index(msg->m_slotname, !msg->m_drop); + + /* + * The slot entry is not found or there is no space to accommodate the new + * entry. This could happen when the message for the creation of a slot + * reached before the drop message even though the actual operations + * happen in reverse order. In such a case, the next update of the + * statistics for the same slot will create the required entry. + */ + if (idx < 0) + return; + + Assert(idx >= 0 && idx <= max_replication_slots); + if (msg->m_drop) + { + /* Remove the replication slot statistics with the given name */ + memcpy(&replSlotStats[idx], &replSlotStats[nReplSlotStats - 1], + sizeof(PgStat_ReplSlotStats)); + nReplSlotStats--; + Assert(nReplSlotStats >= 0); + } + else + { + /* Update the replication slot statistics */ + replSlotStats[idx].spill_txns += msg->m_spill_txns; + replSlotStats[idx].spill_count += msg->m_spill_count; + replSlotStats[idx].spill_bytes += msg->m_spill_bytes; + } +} + /* ---------- * pgstat_recv_tempfile() - * @@ -6695,6 +7077,57 @@ pgstat_clip_activity(const char *raw_activity) return activity; } +/* ---------- + * pgstat_replslot_index + * + * Return the index of entry of a replication slot with the given name, or + * -1 if the slot is not found. + * + * create_it tells whether to create the new slot entry if it is not found. + * ---------- + */ +static int +pgstat_replslot_index(const char *name, bool create_it) +{ + int i; + + Assert(nReplSlotStats <= max_replication_slots); + for (i = 0; i < nReplSlotStats; i++) + { + if (strcmp(replSlotStats[i].slotname, name) == 0) + return i; /* found */ + } + + /* + * The slot is not found. We don't want to register the new statistics if + * the list is already full or the caller didn't request. + */ + if (i == max_replication_slots || !create_it) + return -1; + + /* Register new slot */ + memset(&replSlotStats[nReplSlotStats], 0, sizeof(PgStat_ReplSlotStats)); + memcpy(&replSlotStats[nReplSlotStats].slotname, name, NAMEDATALEN); + + return nReplSlotStats++; +} + +/* ---------- + * pgstat_reset_replslot + * + * Reset the replication slot stats at index 'i'. + * ---------- + */ +static void +pgstat_reset_replslot(int i, TimestampTz ts) +{ + /* reset only counters. Don't clear slot name */ + replSlotStats[i].spill_txns = 0; + replSlotStats[i].spill_count = 0; + replSlotStats[i].spill_bytes = 0; + replSlotStats[i].stat_reset_timestamp = ts; +} + /* * pgstat_slru_index * diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 42223c0f61e20..959e3b8873818 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -112,6 +112,7 @@ #include "postmaster/autovacuum.h" #include "postmaster/bgworker_internals.h" #include "postmaster/fork_process.h" +#include "postmaster/interrupt.h" #include "postmaster/pgarch.h" #include "postmaster/postmaster.h" #include "postmaster/syslogger.h" @@ -405,7 +406,7 @@ static void SIGHUP_handler(SIGNAL_ARGS); static void pmdie(SIGNAL_ARGS); static void reaper(SIGNAL_ARGS); static void sigusr1_handler(SIGNAL_ARGS); -static void startup_die(SIGNAL_ARGS); +static void process_startup_packet_die(SIGNAL_ARGS); static void dummy_handler(SIGNAL_ARGS); static void StartupPacketTimeoutHandler(void); static void CleanupBackend(int pid, int exitstatus); @@ -1849,6 +1850,8 @@ ServerLoop(void) (now - AbortStartTime) >= SIGKILL_CHILDREN_AFTER_SECS) { /* We were gentle with them before. Not anymore */ + ereport(LOG, + (errmsg("issuing SIGKILL to recalcitrant children"))); TerminateChildren(SIGKILL); /* reset flag so we don't SIGKILL again */ AbortStartTime = 0; @@ -4297,6 +4300,8 @@ report_fork_failure_to_client(Port *port, int errnum) * returns: nothing. Will not return at all if there's any failure. * * Note: this code does not depend on having any access to shared memory. + * Indeed, our approach to SIGTERM/timeout handling *requires* that + * shared memory not have been touched yet; see comments within. * In the EXEC_BACKEND case, we are physically attached to shared memory * but have not yet set up most of our local pointers to shmem structures. */ @@ -4340,22 +4345,17 @@ BackendInitialize(Port *port) whereToSendOutput = DestRemote; /* now safe to ereport to client */ /* - * We arrange for a simple exit(1) if we receive SIGTERM or SIGQUIT or - * timeout while trying to collect the startup packet. Otherwise the - * postmaster cannot shutdown the database FAST or IMMED cleanly if a - * buggy client fails to send the packet promptly. XXX it follows that - * the remainder of this function must tolerate losing control at any - * instant. Likewise, any pg_on_exit_callback registered before or during - * this function must be prepared to execute at any instant between here - * and the end of this function. Furthermore, affected callbacks execute - * partially or not at all when a second exit-inducing signal arrives - * after proc_exit_prepare() decrements on_proc_exit_index. (Thanks to - * that mechanic, callbacks need not anticipate more than one call.) This - * is fragile; it ought to instead follow the norm of handling interrupts - * at selected, safe opportunities. - */ - pqsignal(SIGTERM, startup_die); - pqsignal(SIGQUIT, startup_die); + * We arrange to do _exit(1) if we receive SIGTERM or timeout while trying + * to collect the startup packet; while SIGQUIT results in _exit(2). + * Otherwise the postmaster cannot shutdown the database FAST or IMMED + * cleanly if a buggy client fails to send the packet promptly. + * + * Exiting with _exit(1) is only possible because we have not yet touched + * shared memory; therefore no outside-the-process state needs to get + * cleaned up. + */ + pqsignal(SIGTERM, process_startup_packet_die); + /* SIGQUIT handler was already set up by InitPostmasterChild */ InitializeTimeouts(); /* establishes SIGALRM handler */ PG_SETMASK(&StartupBlockSig); @@ -4411,8 +4411,8 @@ BackendInitialize(Port *port) port->remote_hostname = strdup(remote_host); /* - * Ready to begin client interaction. We will give up and exit(1) after a - * time delay, so that a broken client can't hog a connection + * Ready to begin client interaction. We will give up and _exit(1) after + * a time delay, so that a broken client can't hog a connection * indefinitely. PreAuthDelay and any DNS interactions above don't count * against the time limit. * @@ -4434,6 +4434,23 @@ BackendInitialize(Port *port) */ status = ProcessStartupPacket(port, false, false); + /* + * Disable the timeout, and prevent SIGTERM again. + */ + disable_timeout(STARTUP_PACKET_TIMEOUT, false); + PG_SETMASK(&BlockSig); + + /* + * As a safety check that nothing in startup has yet performed + * shared-memory modifications that would need to be undone if we had + * exited through SIGTERM or timeout above, check that no on_shmem_exit + * handlers have been registered yet. (This isn't terribly bulletproof, + * since someone might misuse an on_proc_exit handler for shmem cleanup, + * but it's a cheap and helpful check. We cannot disallow on_proc_exit + * handlers unfortunately, since pq_init() already registered one.) + */ + check_on_shmem_exit_lists_are_empty(); + /* * Stop here if it was bad or a cancel packet. ProcessStartupPacket * already did any appropriate error reporting. @@ -4459,12 +4476,6 @@ BackendInitialize(Port *port) pfree(ps_data.data); set_ps_display("initializing"); - - /* - * Disable the timeout, and prevent SIGTERM/SIGQUIT again. - */ - disable_timeout(STARTUP_PACKET_TIMEOUT, false); - PG_SETMASK(&BlockSig); } @@ -4972,10 +4983,6 @@ SubPostmasterMain(int argc, char *argv[]) if (strcmp(argv[1], "--forkavworker") == 0) AutovacuumWorkerIAm(); - /* In EXEC_BACKEND case we will not have inherited these settings */ - pqinitmask(); - PG_SETMASK(&BlockSig); - /* Read in remaining GUC variables */ read_nondefault_variables(); @@ -5359,18 +5366,22 @@ sigusr1_handler(SIGNAL_ARGS) } /* - * SIGTERM or SIGQUIT while processing startup packet. - * Clean up and exit(1). + * SIGTERM while processing startup packet. + * + * Running proc_exit() from a signal handler would be quite unsafe. + * However, since we have not yet touched shared memory, we can just + * pull the plug and exit without running any atexit handlers. * - * XXX: possible future improvement: try to send a message indicating - * why we are disconnecting. Problem is to be sure we don't block while - * doing so, nor mess up SSL initialization. In practice, if the client - * has wedged here, it probably couldn't do anything with the message anyway. + * One might be tempted to try to send a message, or log one, indicating + * why we are disconnecting. However, that would be quite unsafe in itself. + * Also, it seems undesirable to provide clues about the database's state + * to a client that has not yet completed authentication, or even sent us + * a startup packet. */ static void -startup_die(SIGNAL_ARGS) +process_startup_packet_die(SIGNAL_ARGS) { - proc_exit(1); + _exit(1); } /* @@ -5389,12 +5400,12 @@ dummy_handler(SIGNAL_ARGS) /* * Timeout while processing startup packet. - * As for startup_die(), we clean up and exit(1). + * As for process_startup_packet_die(), we exit via _exit(1). */ static void StartupPacketTimeoutHandler(void) { - proc_exit(1); + _exit(1); } diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c index fd9ac35dac1ff..64af7b8707cc6 100644 --- a/src/backend/postmaster/startup.c +++ b/src/backend/postmaster/startup.c @@ -175,7 +175,7 @@ StartupProcessMain(void) pqsignal(SIGHUP, StartupProcSigHupHandler); /* reload config file */ pqsignal(SIGINT, SIG_IGN); /* ignore query cancel */ pqsignal(SIGTERM, StartupProcShutdownHandler); /* request shutdown */ - pqsignal(SIGQUIT, SignalHandlerForCrashExit); + /* SIGQUIT handler was already set up by InitPostmasterChild */ InitializeTimeouts(); /* establishes SIGALRM handler */ pqsignal(SIGPIPE, SIG_IGN); pqsignal(SIGUSR1, procsignal_sigusr1_handler); diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c index 45a2757969be8..a52832fe900aa 100644 --- a/src/backend/postmaster/walwriter.c +++ b/src/backend/postmaster/walwriter.c @@ -101,7 +101,7 @@ WalWriterMain(void) pqsignal(SIGHUP, SignalHandlerForConfigReload); pqsignal(SIGINT, SignalHandlerForShutdownRequest); pqsignal(SIGTERM, SignalHandlerForShutdownRequest); - pqsignal(SIGQUIT, SignalHandlerForCrashExit); + /* SIGQUIT handler was already set up by InitPostmasterChild */ pqsignal(SIGALRM, SIG_IGN); pqsignal(SIGPIPE, SIG_IGN); pqsignal(SIGUSR1, procsignal_sigusr1_handler); @@ -112,9 +112,6 @@ WalWriterMain(void) */ pqsignal(SIGCHLD, SIG_DFL); - /* We allow SIGQUIT (quickdie) at all times */ - sigdelset(&BlockSig, SIGQUIT); - /* * Create a memory context that we will do all our work in. We do this so * that we can reset the context during error recovery and thereby avoid @@ -129,7 +126,20 @@ WalWriterMain(void) /* * If an exception is encountered, processing resumes here. * - * This code is heavily based on bgwriter.c, q.v. + * You might wonder why this isn't coded as an infinite loop around a + * PG_TRY construct. The reason is that this is the bottom of the + * exception stack, and so with PG_TRY there would be no exception handler + * in force at all during the CATCH part. By leaving the outermost setjmp + * always active, we have at least some chance of recovering from an error + * during error recovery. (If we get into an infinite loop thereby, it + * will soon be stopped by overflow of elog.c's internal state stack.) + * + * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask + * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus, + * signals other than SIGQUIT will be blocked until we complete error + * recovery. It might seem that this policy makes the HOLD_INTERRUPTS() + * call redundant, but it is not since InterruptPending might be set + * already. */ if (sigsetjmp(local_sigjmp_buf, 1) != 0) { diff --git a/src/backend/replication/backup_manifest.c b/src/backend/replication/backup_manifest.c index b6260049271b4..556e6b5040852 100644 --- a/src/backend/replication/backup_manifest.c +++ b/src/backend/replication/backup_manifest.c @@ -112,7 +112,7 @@ AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid, initStringInfo(&buf); if (manifest->first_file) { - appendStringInfoString(&buf, "\n"); + appendStringInfoChar(&buf, '\n'); manifest->first_file = false; } else @@ -152,7 +152,7 @@ AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid, enlargeStringInfo(&buf, 128); buf.len += pg_strftime(&buf.data[buf.len], 128, "%Y-%m-%d %H:%M:%S %Z", pg_gmtime(&mtime)); - appendStringInfoString(&buf, "\""); + appendStringInfoChar(&buf, '"'); /* Add checksum information. */ if (checksum_ctx->type != CHECKSUM_TYPE_NONE) @@ -168,7 +168,7 @@ AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid, enlargeStringInfo(&buf, 2 * checksumlen); buf.len += hex_encode((char *) checksumbuf, checksumlen, &buf.data[buf.len]); - appendStringInfoString(&buf, "\""); + appendStringInfoChar(&buf, '"'); } /* Close out the object. */ @@ -272,7 +272,7 @@ AddWALInfoToBackupManifest(backup_manifest_info *manifest, XLogRecPtr startptr, */ if (!found_start_timeline) ereport(ERROR, - errmsg("start timeline %u not found history of timeline %u", + errmsg("start timeline %u not found in history of timeline %u", starttli, endtli)); /* Terminate the list of WAL ranges. */ diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index 6064384e32a4b..b89df01fa76fb 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c @@ -719,7 +719,10 @@ perform_base_backup(basebackup_options *opt) { if (total_checksum_failures > 1) ereport(WARNING, - (errmsg("%lld total checksum verification failures", total_checksum_failures))); + (errmsg_plural("%lld total checksum verification failure", + "%lld total checksum verification failures", + total_checksum_failures, + total_checksum_failures))); ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index ad574099ff700..24f8b3e42ecee 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -427,7 +427,7 @@ libpqrcv_startstreaming(WalReceiverConn *conn, if (options->proto.logical.streaming && PQserverVersion(conn->streamConn) >= 140000) - appendStringInfo(&cmd, ", streaming 'on'"); + appendStringInfoString(&cmd, ", streaming 'on'"); pubnames = options->proto.logical.publication_names; pubnames_str = stringlist_to_identifierstr(conn->streamConn, pubnames); diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index f21f61d5e10b0..3f84ee99b8633 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -650,6 +650,12 @@ DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, /* replay actions of all transaction + subtransactions in order */ ReorderBufferCommit(ctx->reorder, xid, buf->origptr, buf->endptr, commit_time, origin_id, origin_lsn); + + /* + * Update the decoding stats at transaction commit/abort. It is not clear + * that sending more or less frequently than this would be better. + */ + UpdateDecodingStats(ctx); } /* @@ -669,6 +675,9 @@ DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, } ReorderBufferAbort(ctx->reorder, xid, buf->record->EndRecPtr); + + /* update the decoding stats */ + UpdateDecodingStats(ctx); } /* diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 0f6af952f9394..8675832f4d6e0 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -32,6 +32,7 @@ #include "access/xlog_internal.h" #include "fmgr.h" #include "miscadmin.h" +#include "pgstat.h" #include "replication/decode.h" #include "replication/logical.h" #include "replication/origin.h" @@ -1460,3 +1461,31 @@ ResetLogicalStreamingState(void) CheckXidAlive = InvalidTransactionId; bsysscan = false; } + +/* + * Report stats for a slot. + */ +void +UpdateDecodingStats(LogicalDecodingContext *ctx) +{ + ReorderBuffer *rb = ctx->reorder; + + /* + * Nothing to do if we haven't spilled anything since the last time the + * stats has been sent. + */ + if (rb->spillBytes <= 0) + return; + + elog(DEBUG2, "UpdateDecodingStats: updating stats %p %lld %lld %lld", + rb, + (long long) rb->spillTxns, + (long long) rb->spillCount, + (long long) rb->spillBytes); + + pgstat_report_replslot(NameStr(ctx->slot->data.name), + rb->spillTxns, rb->spillCount, rb->spillBytes); + rb->spillTxns = 0; + rb->spillCount = 0; + rb->spillBytes = 0; +} diff --git a/src/backend/replication/logical/message.c b/src/backend/replication/logical/message.c index db33cbe5a7a29..bd4b08543e66f 100644 --- a/src/backend/replication/logical/message.c +++ b/src/backend/replication/logical/message.c @@ -59,6 +59,7 @@ LogLogicalMessage(const char *prefix, const char *message, size_t size, xlrec.dbId = MyDatabaseId; xlrec.transactional = transactional; + /* trailing zero is critical; see logicalmsg_desc */ xlrec.prefix_size = strlen(prefix) + 1; xlrec.message_size = size; diff --git a/src/backend/replication/logical/relation.c b/src/backend/replication/logical/relation.c index a60c73d74d5b8..07aa52977f961 100644 --- a/src/backend/replication/logical/relation.c +++ b/src/backend/replication/logical/relation.c @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * relation.c - * PostgreSQL logical replication + * PostgreSQL logical replication relation mapping cache * * Copyright (c) 2016-2020, PostgreSQL Global Development Group * @@ -8,8 +8,9 @@ * src/backend/replication/logical/relation.c * * NOTES - * This file contains helper functions for logical replication relation - * mapping cache. + * Routines in this file mainly have to do with mapping the properties + * of local replication target relations to the properties of their + * remote counterpart. * *------------------------------------------------------------------------- */ @@ -77,7 +78,7 @@ logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid) { if (entry->localreloid == reloid) { - entry->localreloid = InvalidOid; + entry->localrelvalid = false; hash_seq_term(&status); break; } @@ -91,7 +92,7 @@ logicalrep_relmap_invalidate_cb(Datum arg, Oid reloid) hash_seq_init(&status, LogicalRepRelMap); while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL) - entry->localreloid = InvalidOid; + entry->localrelvalid = false; } } @@ -227,18 +228,53 @@ logicalrep_rel_att_by_name(LogicalRepRelation *remoterel, const char *attname) return -1; } +/* + * Report error with names of the missing local relation column(s), if any. + */ +static void +logicalrep_report_missing_attrs(LogicalRepRelation *remoterel, + Bitmapset *missingatts) +{ + if (!bms_is_empty(missingatts)) + { + StringInfoData missingattsbuf; + int missingattcnt = 0; + int i; + + initStringInfo(&missingattsbuf); + + while ((i = bms_first_member(missingatts)) >= 0) + { + missingattcnt++; + if (missingattcnt == 1) + appendStringInfo(&missingattsbuf, _("\"%s\""), + remoterel->attnames[i]); + else + appendStringInfo(&missingattsbuf, _(", \"%s\""), + remoterel->attnames[i]); + } + + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg_plural("logical replication target relation \"%s.%s\" is missing replicated column: %s", + "logical replication target relation \"%s.%s\" is missing replicated columns: %s", + missingattcnt, + remoterel->nspname, + remoterel->relname, + missingattsbuf.data))); + } +} + /* * Open the local relation associated with the remote one. * - * Optionally rebuilds the Relcache mapping if it was invalidated - * by local DDL. + * Rebuilds the Relcache mapping if it was invalidated by local DDL. */ LogicalRepRelMapEntry * logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) { LogicalRepRelMapEntry *entry; bool found; - Oid relid = InvalidOid; LogicalRepRelation *remoterel; if (LogicalRepRelMap == NULL) @@ -254,14 +290,45 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) remoterel = &entry->remoterel; + /* Ensure we don't leak a relcache refcount. */ + if (entry->localrel) + elog(ERROR, "remote relation ID %u is already open", remoteid); + /* * When opening and locking a relation, pending invalidation messages are - * processed which can invalidate the relation. We need to update the - * local cache both when we are first time accessing the relation and when - * the relation is invalidated (aka entry->localreloid is set InvalidOid). + * processed which can invalidate the relation. Hence, if the entry is + * currently considered valid, try to open the local relation by OID and + * see if invalidation ensues. + */ + if (entry->localrelvalid) + { + entry->localrel = try_table_open(entry->localreloid, lockmode); + if (!entry->localrel) + { + /* Table was renamed or dropped. */ + entry->localrelvalid = false; + } + else if (!entry->localrelvalid) + { + /* Note we release the no-longer-useful lock here. */ + table_close(entry->localrel, lockmode); + entry->localrel = NULL; + } + } + + /* + * If the entry has been marked invalid since we last had lock on it, + * re-open the local relation by name and rebuild all derived data. */ - if (!OidIsValid(entry->localreloid)) + if (!entry->localrelvalid) { + Oid relid; + Bitmapset *idkey; + TupleDesc desc; + MemoryContext oldctx; + int i; + Bitmapset *missingatts; + /* Try to find and lock the relation by name. */ relid = RangeVarGetRelid(makeRangeVar(remoterel->nspname, remoterel->relname, -1), @@ -272,21 +339,7 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) errmsg("logical replication target relation \"%s.%s\" does not exist", remoterel->nspname, remoterel->relname))); entry->localrel = table_open(relid, NoLock); - - } - else - { - relid = entry->localreloid; - entry->localrel = table_open(entry->localreloid, lockmode); - } - - if (!OidIsValid(entry->localreloid)) - { - int found; - Bitmapset *idkey; - TupleDesc desc; - MemoryContext oldctx; - int i; + entry->localreloid = relid; /* Check for supported relkind. */ CheckSubscriptionRelkind(entry->localrel->rd_rel->relkind, @@ -302,7 +355,8 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) entry->attrmap = make_attrmap(desc->natts); MemoryContextSwitchTo(oldctx); - found = 0; + /* check and report missing attrs, if any */ + missingatts = bms_add_range(NULL, 0, remoterel->natts - 1); for (i = 0; i < desc->natts; i++) { int attnum; @@ -319,16 +373,13 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) entry->attrmap->attnums[i] = attnum; if (attnum >= 0) - found++; + missingatts = bms_del_member(missingatts, attnum); } - /* TODO, detail message with names of missing columns */ - if (found < remoterel->natts) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("logical replication target relation \"%s.%s\" is missing " - "some replicated columns", - remoterel->nspname, remoterel->relname))); + logicalrep_report_missing_attrs(remoterel, missingatts); + + /* be tidy */ + bms_free(missingatts); /* * Check that replica identity matches. We allow for stricter replica @@ -380,14 +431,13 @@ logicalrep_rel_open(LogicalRepRelId remoteid, LOCKMODE lockmode) } } - entry->localreloid = relid; + entry->localrelvalid = true; } if (entry->state != SUBREL_STATE_READY) entry->state = GetSubscriptionRelState(MySubscription->oid, entry->localreloid, - &entry->statelsn, - true); + &entry->statelsn); return entry; } @@ -523,7 +573,7 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid) { if (entry->localreloid == reloid) { - entry->localreloid = InvalidOid; + entry->localrelvalid = false; hash_seq_term(&status); break; } @@ -537,7 +587,7 @@ logicalrep_partmap_invalidate_cb(Datum arg, Oid reloid) hash_seq_init(&status, LogicalRepPartMap); while ((entry = (LogicalRepRelMapEntry *) hash_seq_search(&status)) != NULL) - entry->localreloid = InvalidOid; + entry->localrelvalid = false; } } @@ -631,8 +681,8 @@ logicalrep_partition_open(LogicalRepRelMapEntry *root, /* * If the partition's attributes don't match the root relation's, we'll * need to make a new attrmap which maps partition attribute numbers to - * remoterel's, instead the original which maps root relation's attribute - * numbers to remoterel's. + * remoterel's, instead of the original which maps root relation's + * attribute numbers to remoterel's. * * Note that 'map' which comes from the tuple routing data structure * contains 1-based attribute numbers (of the parent relation). However, @@ -656,6 +706,8 @@ logicalrep_partition_open(LogicalRepRelMapEntry *root, entry->updatable = root->updatable; + entry->localrelvalid = true; + /* state and statelsn are left set to 0. */ MemoryContextSwitchTo(oldctx); diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 1975d629a6e2f..7a8bf760791c0 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -235,7 +235,7 @@ static void ReorderBufferIterTXNInit(ReorderBuffer *rb, ReorderBufferTXN *txn, static ReorderBufferChange *ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state); static void ReorderBufferIterTXNFinish(ReorderBuffer *rb, ReorderBufferIterTXNState *state); -static void ReorderBufferExecuteInvalidations(ReorderBuffer *rb, ReorderBufferTXN *txn); +static void ReorderBufferExecuteInvalidations(uint32 nmsgs, SharedInvalidationMessage *msgs); /* * --------------------------------------- @@ -343,6 +343,10 @@ ReorderBufferAllocate(void) buffer->outbufsize = 0; buffer->size = 0; + buffer->spillTxns = 0; + buffer->spillCount = 0; + buffer->spillBytes = 0; + buffer->current_restart_decoding_lsn = InvalidXLogRecPtr; dlist_init(&buffer->toplevel_by_lsn); @@ -482,6 +486,11 @@ ReorderBufferReturnChange(ReorderBuffer *rb, ReorderBufferChange *change, pfree(change->data.msg.message); change->data.msg.message = NULL; break; + case REORDER_BUFFER_CHANGE_INVALIDATION: + if (change->data.inval.invalidations) + pfree(change->data.inval.invalidations); + change->data.inval.invalidations = NULL; + break; case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT: if (change->data.snapshot) { @@ -1428,7 +1437,7 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) ReorderBufferCleanupTXN(rb, subtxn); } - /* cleanup changes in the toplevel txn */ + /* cleanup changes in the txn */ dlist_foreach_modify(iter, &txn->changes) { ReorderBufferChange *change; @@ -1529,7 +1538,7 @@ ReorderBufferTruncateTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) ReorderBufferTruncateTXN(rb, subtxn); } - /* cleanup changes in the toplevel txn */ + /* cleanup changes in the txn */ dlist_foreach_modify(iter, &txn->changes) { ReorderBufferChange *change; @@ -1579,6 +1588,13 @@ ReorderBufferTruncateTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) { ReorderBufferRestoreCleanup(rb, txn); txn->txn_flags &= ~RBTXN_IS_SERIALIZED; + + /* + * We set this flag to indicate if the transaction is ever serialized. + * We need this to accurately update the stats as otherwise the same + * transaction can be counted as serialized multiple times. + */ + txn->txn_flags |= RBTXN_IS_SERIALIZED_CLEAR; } /* also reset the number of entries in the transaction */ @@ -2183,6 +2199,13 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, ReorderBufferApplyMessage(rb, txn, change, streaming); break; + case REORDER_BUFFER_CHANGE_INVALIDATION: + /* Execute the invalidation messages locally */ + ReorderBufferExecuteInvalidations( + change->data.inval.ninvalidations, + change->data.inval.invalidations); + break; + case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT: /* get rid of the old */ TeardownHistoricSnapshot(false); @@ -2233,13 +2256,6 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, TeardownHistoricSnapshot(false); SetupHistoricSnapshot(snapshot_now, txn->tuplecid_hash); - - /* - * Every time the CommandId is incremented, we could - * see new catalog contents, so execute all - * invalidations. - */ - ReorderBufferExecuteInvalidations(rb, txn); } break; @@ -2306,7 +2322,7 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, AbortCurrentTransaction(); /* make sure there's no cache pollution */ - ReorderBufferExecuteInvalidations(rb, txn); + ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations); if (using_subtxn) RollbackAndReleaseCurrentSubTransaction(); @@ -2345,7 +2361,8 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, AbortCurrentTransaction(); /* make sure there's no cache pollution */ - ReorderBufferExecuteInvalidations(rb, txn); + ReorderBufferExecuteInvalidations(txn->ninvalidations, + txn->invalidations); if (using_subtxn) RollbackAndReleaseCurrentSubTransaction(); @@ -2802,10 +2819,13 @@ ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid, * Setup the invalidation of the toplevel transaction. * * This needs to be called for each XLOG_XACT_INVALIDATIONS message and - * accumulates all the invalidation messages in the toplevel transaction. - * This is required because in some cases where we skip processing the - * transaction (see ReorderBufferForget), we need to execute all the - * invalidations together. + * accumulates all the invalidation messages in the toplevel transaction as + * well as in the form of change in reorder buffer. We require to record it in + * form of the change so that we can execute only the required invalidations + * instead of executing all the invalidations on each CommandId increment. We + * also need to accumulate these in the toplevel transaction because in some + * cases we skip processing the transaction (see ReorderBufferForget), we need + * to execute all the invalidations together. */ void ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, @@ -2813,12 +2833,16 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, SharedInvalidationMessage *msgs) { ReorderBufferTXN *txn; + MemoryContext oldcontext; + ReorderBufferChange *change; txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true); + oldcontext = MemoryContextSwitchTo(rb->context); + /* - * We collect all the invalidations under the top transaction so that we - * can execute them all together. + * Collect all the invalidations under the top transaction so that we can + * execute them all together. See comment atop this function */ if (txn->toptxn) txn = txn->toptxn; @@ -2830,8 +2854,7 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, { txn->ninvalidations = nmsgs; txn->invalidations = (SharedInvalidationMessage *) - MemoryContextAlloc(rb->context, - sizeof(SharedInvalidationMessage) * nmsgs); + palloc(sizeof(SharedInvalidationMessage) * nmsgs); memcpy(txn->invalidations, msgs, sizeof(SharedInvalidationMessage) * nmsgs); } @@ -2845,6 +2868,18 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, nmsgs * sizeof(SharedInvalidationMessage)); txn->ninvalidations += nmsgs; } + + change = ReorderBufferGetChange(rb); + change->action = REORDER_BUFFER_CHANGE_INVALIDATION; + change->data.inval.ninvalidations = nmsgs; + change->data.inval.invalidations = (SharedInvalidationMessage *) + palloc(sizeof(SharedInvalidationMessage) * nmsgs); + memcpy(change->data.inval.invalidations, msgs, + sizeof(SharedInvalidationMessage) * nmsgs); + + ReorderBufferQueueChange(rb, xid, lsn, change, false); + + MemoryContextSwitchTo(oldcontext); } /* @@ -2852,12 +2887,12 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, * in the changestream but we don't know which those are. */ static void -ReorderBufferExecuteInvalidations(ReorderBuffer *rb, ReorderBufferTXN *txn) +ReorderBufferExecuteInvalidations(uint32 nmsgs, SharedInvalidationMessage *msgs) { int i; - for (i = 0; i < txn->ninvalidations; i++) - LocalExecuteInvalidationMessage(&txn->invalidations[i]); + for (i = 0; i < nmsgs; i++) + LocalExecuteInvalidationMessage(&msgs[i]); } /* @@ -3112,6 +3147,7 @@ ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) int fd = -1; XLogSegNo curOpenSegNo = 0; Size spilled = 0; + Size size = txn->size; elog(DEBUG2, "spill %u changes in XID %u to disk", (uint32) txn->nentries_mem, txn->xid); @@ -3170,6 +3206,16 @@ ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) spilled++; } + /* update the statistics iff we have spilled anything */ + if (spilled) + { + rb->spillCount += 1; + rb->spillBytes += size; + + /* don't consider already serialized transactions */ + rb->spillTxns += (rbtxn_is_serialized(txn) || rbtxn_is_serialized_clear(txn)) ? 0 : 1; + } + Assert(spilled == txn->nentries_mem); Assert(dlist_is_empty(&txn->changes)); txn->nentries_mem = 0; @@ -3279,6 +3325,24 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, change->data.msg.message_size); data += change->data.msg.message_size; + break; + } + case REORDER_BUFFER_CHANGE_INVALIDATION: + { + char *data; + Size inval_size = sizeof(SharedInvalidationMessage) * + change->data.inval.ninvalidations; + + sz += inval_size; + + ReorderBufferSerializeReserve(rb, sz); + data = ((char *) rb->outbuf) + sizeof(ReorderBufferDiskChange); + + /* might have been reallocated above */ + ondisk = (ReorderBufferDiskChange *) rb->outbuf; + memcpy(data, change->data.inval.invalidations, inval_size); + data += inval_size; + break; } case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT: @@ -3556,6 +3620,12 @@ ReorderBufferChangeSize(ReorderBufferChange *change) break; } + case REORDER_BUFFER_CHANGE_INVALIDATION: + { + sz += sizeof(SharedInvalidationMessage) * + change->data.inval.ninvalidations; + break; + } case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT: { Snapshot snap; @@ -3822,6 +3892,19 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, change->data.msg.message_size); data += change->data.msg.message_size; + break; + } + case REORDER_BUFFER_CHANGE_INVALIDATION: + { + Size inval_size = sizeof(SharedInvalidationMessage) * + change->data.inval.ninvalidations; + + change->data.inval.invalidations = + MemoryContextAlloc(rb->context, inval_size); + + /* read the message */ + memcpy(change->data.inval.invalidations, data, inval_size); + break; } case REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT: diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index c27d97058955a..a91b00ed4bcf0 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * tablesync.c - * PostgreSQL logical replication + * PostgreSQL logical replication: initial table data synchronization * * Copyright (c) 2012-2020, PostgreSQL Global Development Group * @@ -26,26 +26,30 @@ * - It allows us to synchronize any tables added after the initial * synchronization has finished. * - * The stream position synchronization works in multiple steps. - * - Sync finishes copy and sets worker state as SYNCWAIT and waits for - * state to change in a loop. - * - Apply periodically checks tables that are synchronizing for SYNCWAIT. - * When the desired state appears, it will set the worker state to - * CATCHUP and starts loop-waiting until either the table state is set - * to SYNCDONE or the sync worker exits. + * The stream position synchronization works in multiple steps: + * - Apply worker requests a tablesync worker to start, setting the new + * table state to INIT. + * - Tablesync worker starts; changes table state from INIT to DATASYNC while + * copying. + * - Tablesync worker finishes the copy and sets table state to SYNCWAIT; + * waits for state change. + * - Apply worker periodically checks for tables in SYNCWAIT state. When + * any appear, it sets the table state to CATCHUP and starts loop-waiting + * until either the table state is set to SYNCDONE or the sync worker + * exits. * - After the sync worker has seen the state change to CATCHUP, it will * read the stream and apply changes (acting like an apply worker) until * it catches up to the specified stream position. Then it sets the * state to SYNCDONE. There might be zero changes applied between * CATCHUP and SYNCDONE, because the sync worker might be ahead of the * apply worker. - * - Once the state was set to SYNCDONE, the apply will continue tracking + * - Once the state is set to SYNCDONE, the apply will continue tracking * the table until it reaches the SYNCDONE stream position, at which * point it sets state to READY and stops tracking. Again, there might * be zero changes in between. * - * So the state progression is always: INIT -> DATASYNC -> SYNCWAIT -> CATCHUP -> - * SYNCDONE -> READY. + * So the state progression is always: INIT -> DATASYNC -> SYNCWAIT -> + * CATCHUP -> SYNCDONE -> READY. * * The catalog pg_subscription_rel is used to keep information about * subscribed tables and their state. Some transient state during data @@ -67,7 +71,8 @@ * -> continue rep * apply:11 * -> set in catalog READY - * - Sync in front: + * + * - Sync is in front: * sync:10 * -> set in memory SYNCWAIT * apply:8 @@ -142,13 +147,14 @@ finish_sync_worker(void) } /* - * Wait until the relation synchronization state is set in the catalog to the - * expected one. + * Wait until the relation sync state is set in the catalog to the expected + * one; return true when it happens. * - * Used when transitioning from CATCHUP state to SYNCDONE. + * Returns false if the table sync worker or the table itself have + * disappeared, or the table state has been reset. * - * Returns false if the synchronization worker has disappeared or the table state - * has been reset. + * Currently, this is used in the apply worker when transitioning from + * CATCHUP state to SYNCDONE. */ static bool wait_for_relation_state_change(Oid relid, char expected_state) @@ -162,28 +168,23 @@ wait_for_relation_state_change(Oid relid, char expected_state) CHECK_FOR_INTERRUPTS(); - /* XXX use cache invalidation here to improve performance? */ - PushActiveSnapshot(GetLatestSnapshot()); + InvalidateCatalogSnapshot(); state = GetSubscriptionRelState(MyLogicalRepWorker->subid, - relid, &statelsn, true); - PopActiveSnapshot(); + relid, &statelsn); if (state == SUBREL_STATE_UNKNOWN) - return false; + break; if (state == expected_state) return true; /* Check if the sync worker is still running and bail if not. */ LWLockAcquire(LogicalRepWorkerLock, LW_SHARED); - - /* Check if the opposite worker is still running and bail if not. */ - worker = logicalrep_worker_find(MyLogicalRepWorker->subid, - am_tablesync_worker() ? InvalidOid : relid, + worker = logicalrep_worker_find(MyLogicalRepWorker->subid, relid, false); LWLockRelease(LogicalRepWorkerLock); if (!worker) - return false; + break; (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH, @@ -774,7 +775,7 @@ copy_table(Relation rel) * For non-tables, we need to do COPY (SELECT ...), but we can't just * do SELECT * because we need to not copy generated columns. */ - appendStringInfo(&cmd, "COPY (SELECT "); + appendStringInfoString(&cmd, "COPY (SELECT "); for (int i = 0; i < lrel.natts; i++) { appendStringInfoString(&cmd, quote_identifier(lrel.attnames[i])); @@ -810,6 +811,9 @@ copy_table(Relation rel) /* * Start syncing the table in the sync worker. * + * If nothing needs to be done to sync the table, we exit the worker without + * any further action. + * * The returned slot name is palloc'ed in current memory context. */ char * @@ -819,12 +823,14 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) char *err; char relstate; XLogRecPtr relstate_lsn; + Relation rel; + WalRcvExecResult *res; /* Check the state of the table synchronization. */ StartTransactionCommand(); relstate = GetSubscriptionRelState(MyLogicalRepWorker->subid, MyLogicalRepWorker->relid, - &relstate_lsn, true); + &relstate_lsn); CommitTransactionCommand(); SpinLockAcquire(&MyLogicalRepWorker->relmutex); @@ -832,6 +838,18 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) MyLogicalRepWorker->relstate_lsn = relstate_lsn; SpinLockRelease(&MyLogicalRepWorker->relmutex); + /* + * If synchronization is already done or no longer necessary, exit now + * that we've updated shared memory state. + */ + switch (relstate) + { + case SUBREL_STATE_SYNCDONE: + case SUBREL_STATE_READY: + case SUBREL_STATE_UNKNOWN: + finish_sync_worker(); /* doesn't return */ + } + /* * To build a slot name for the sync work, we are limited to NAMEDATALEN - * 1 characters. We cut the original slot name to NAMEDATALEN - 28 chars @@ -856,134 +874,87 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) ereport(ERROR, (errmsg("could not connect to the publisher: %s", err))); - switch (MyLogicalRepWorker->relstate) - { - case SUBREL_STATE_INIT: - case SUBREL_STATE_DATASYNC: - { - Relation rel; - WalRcvExecResult *res; + Assert(MyLogicalRepWorker->relstate == SUBREL_STATE_INIT || + MyLogicalRepWorker->relstate == SUBREL_STATE_DATASYNC); - SpinLockAcquire(&MyLogicalRepWorker->relmutex); - MyLogicalRepWorker->relstate = SUBREL_STATE_DATASYNC; - MyLogicalRepWorker->relstate_lsn = InvalidXLogRecPtr; - SpinLockRelease(&MyLogicalRepWorker->relmutex); - - /* Update the state and make it visible to others. */ - StartTransactionCommand(); - UpdateSubscriptionRelState(MyLogicalRepWorker->subid, - MyLogicalRepWorker->relid, - MyLogicalRepWorker->relstate, - MyLogicalRepWorker->relstate_lsn); - CommitTransactionCommand(); - pgstat_report_stat(false); + SpinLockAcquire(&MyLogicalRepWorker->relmutex); + MyLogicalRepWorker->relstate = SUBREL_STATE_DATASYNC; + MyLogicalRepWorker->relstate_lsn = InvalidXLogRecPtr; + SpinLockRelease(&MyLogicalRepWorker->relmutex); - /* - * We want to do the table data sync in a single transaction. - */ - StartTransactionCommand(); + /* Update the state and make it visible to others. */ + StartTransactionCommand(); + UpdateSubscriptionRelState(MyLogicalRepWorker->subid, + MyLogicalRepWorker->relid, + MyLogicalRepWorker->relstate, + MyLogicalRepWorker->relstate_lsn); + CommitTransactionCommand(); + pgstat_report_stat(false); - /* - * Use a standard write lock here. It might be better to - * disallow access to the table while it's being synchronized. - * But we don't want to block the main apply process from - * working and it has to open the relation in RowExclusiveLock - * when remapping remote relation id to local one. - */ - rel = table_open(MyLogicalRepWorker->relid, RowExclusiveLock); + /* + * We want to do the table data sync in a single transaction. + */ + StartTransactionCommand(); - /* - * Create a temporary slot for the sync process. We do this - * inside the transaction so that we can use the snapshot made - * by the slot to get existing data. - */ - res = walrcv_exec(wrconn, - "BEGIN READ ONLY ISOLATION LEVEL " - "REPEATABLE READ", 0, NULL); - if (res->status != WALRCV_OK_COMMAND) - ereport(ERROR, - (errmsg("table copy could not start transaction on publisher"), - errdetail("The error was: %s", res->err))); - walrcv_clear_result(res); + /* + * Use a standard write lock here. It might be better to disallow access + * to the table while it's being synchronized. But we don't want to block + * the main apply process from working and it has to open the relation in + * RowExclusiveLock when remapping remote relation id to local one. + */ + rel = table_open(MyLogicalRepWorker->relid, RowExclusiveLock); - /* - * Create new temporary logical decoding slot. - * - * We'll use slot for data copy so make sure the snapshot is - * used for the transaction; that way the COPY will get data - * that is consistent with the lsn used by the slot to start - * decoding. - */ - walrcv_create_slot(wrconn, slotname, true, - CRS_USE_SNAPSHOT, origin_startpos); + /* + * Start a transaction in the remote node in REPEATABLE READ mode. This + * ensures that both the replication slot we create (see below) and the + * COPY are consistent with each other. + */ + res = walrcv_exec(wrconn, + "BEGIN READ ONLY ISOLATION LEVEL REPEATABLE READ", + 0, NULL); + if (res->status != WALRCV_OK_COMMAND) + ereport(ERROR, + (errmsg("table copy could not start transaction on publisher"), + errdetail("The error was: %s", res->err))); + walrcv_clear_result(res); - PushActiveSnapshot(GetTransactionSnapshot()); - copy_table(rel); - PopActiveSnapshot(); + /* + * Create a new temporary logical decoding slot. This slot will be used + * for the catchup phase after COPY is done, so tell it to use the + * snapshot to make the final data consistent. + */ + walrcv_create_slot(wrconn, slotname, true, + CRS_USE_SNAPSHOT, origin_startpos); - res = walrcv_exec(wrconn, "COMMIT", 0, NULL); - if (res->status != WALRCV_OK_COMMAND) - ereport(ERROR, - (errmsg("table copy could not finish transaction on publisher"), - errdetail("The error was: %s", res->err))); - walrcv_clear_result(res); + /* Now do the initial data copy */ + PushActiveSnapshot(GetTransactionSnapshot()); + copy_table(rel); + PopActiveSnapshot(); - table_close(rel, NoLock); + res = walrcv_exec(wrconn, "COMMIT", 0, NULL); + if (res->status != WALRCV_OK_COMMAND) + ereport(ERROR, + (errmsg("table copy could not finish transaction on publisher"), + errdetail("The error was: %s", res->err))); + walrcv_clear_result(res); - /* Make the copy visible. */ - CommandCounterIncrement(); + table_close(rel, NoLock); - /* - * We are done with the initial data synchronization, update - * the state. - */ - SpinLockAcquire(&MyLogicalRepWorker->relmutex); - MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT; - MyLogicalRepWorker->relstate_lsn = *origin_startpos; - SpinLockRelease(&MyLogicalRepWorker->relmutex); - - /* Wait for main apply worker to tell us to catchup. */ - wait_for_worker_state_change(SUBREL_STATE_CATCHUP); - - /*---------- - * There are now two possible states here: - * a) Sync is behind the apply. If that's the case we need to - * catch up with it by consuming the logical replication - * stream up to the relstate_lsn. For that, we exit this - * function and continue in ApplyWorkerMain(). - * b) Sync is caught up with the apply. So it can just set - * the state to SYNCDONE and finish. - *---------- - */ - if (*origin_startpos >= MyLogicalRepWorker->relstate_lsn) - { - /* - * Update the new state in catalog. No need to bother - * with the shmem state as we are exiting for good. - */ - UpdateSubscriptionRelState(MyLogicalRepWorker->subid, - MyLogicalRepWorker->relid, - SUBREL_STATE_SYNCDONE, - *origin_startpos); - finish_sync_worker(); - } - break; - } - case SUBREL_STATE_SYNCDONE: - case SUBREL_STATE_READY: - case SUBREL_STATE_UNKNOWN: + /* Make the copy visible. */ + CommandCounterIncrement(); - /* - * Nothing to do here but finish. (UNKNOWN means the relation was - * removed from pg_subscription_rel before the sync worker could - * start.) - */ - finish_sync_worker(); - break; - default: - elog(ERROR, "unknown relation state \"%c\"", - MyLogicalRepWorker->relstate); - } + /* + * We are done with the initial data synchronization, update the state. + */ + SpinLockAcquire(&MyLogicalRepWorker->relmutex); + MyLogicalRepWorker->relstate = SUBREL_STATE_SYNCWAIT; + MyLogicalRepWorker->relstate_lsn = *origin_startpos; + SpinLockRelease(&MyLogicalRepWorker->relmutex); + /* + * Finally, wait until the main apply worker tells us to catch up and then + * return to let LogicalRepApplyLoop do it. + */ + wait_for_worker_state_change(SUBREL_STATE_CATCHUP); return slotname; } diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index c37aafed0d29e..3a5b733ee38c2 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -344,7 +344,6 @@ static EState * create_estate_for_relation(LogicalRepRelMapEntry *rel) { EState *estate; - ResultRelInfo *resultRelInfo; RangeTblEntry *rte; estate = CreateExecutorState(); @@ -356,13 +355,6 @@ create_estate_for_relation(LogicalRepRelMapEntry *rel) rte->rellockmode = AccessShareLock; ExecInitRangeTable(estate, list_make1(rte)); - resultRelInfo = makeNode(ResultRelInfo); - InitResultRelInfo(resultRelInfo, rel->localrel, 1, NULL, 0); - - estate->es_result_relations = resultRelInfo; - estate->es_num_result_relations = 1; - estate->es_result_relation_info = resultRelInfo; - estate->es_output_cid = GetCurrentCommandId(true); /* Prepare to catch AFTER triggers. */ @@ -1150,6 +1142,7 @@ GetRelationIdentityOrPK(Relation rel) static void apply_handle_insert(StringInfo s) { + ResultRelInfo *resultRelInfo; LogicalRepRelMapEntry *rel; LogicalRepTupleData newtup; LogicalRepRelId relid; @@ -1179,6 +1172,8 @@ apply_handle_insert(StringInfo s) remoteslot = ExecInitExtraTupleSlot(estate, RelationGetDescr(rel->localrel), &TTSOpsVirtual); + resultRelInfo = makeNode(ResultRelInfo); + InitResultRelInfo(resultRelInfo, rel->localrel, 1, NULL, 0); /* Input functions may need an active snapshot, so get one */ PushActiveSnapshot(GetTransactionSnapshot()); @@ -1191,10 +1186,10 @@ apply_handle_insert(StringInfo s) /* For a partitioned table, insert the tuple into a partition. */ if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) - apply_handle_tuple_routing(estate->es_result_relation_info, estate, + apply_handle_tuple_routing(resultRelInfo, estate, remoteslot, NULL, rel, CMD_INSERT); else - apply_handle_insert_internal(estate->es_result_relation_info, estate, + apply_handle_insert_internal(resultRelInfo, estate, remoteslot); PopActiveSnapshot(); @@ -1218,7 +1213,7 @@ apply_handle_insert_internal(ResultRelInfo *relinfo, ExecOpenIndices(relinfo, false); /* Do the insert. */ - ExecSimpleRelationInsert(estate, remoteslot); + ExecSimpleRelationInsert(relinfo, estate, remoteslot); /* Cleanup. */ ExecCloseIndices(relinfo); @@ -1265,6 +1260,7 @@ check_relation_updatable(LogicalRepRelMapEntry *rel) static void apply_handle_update(StringInfo s) { + ResultRelInfo *resultRelInfo; LogicalRepRelMapEntry *rel; LogicalRepRelId relid; EState *estate; @@ -1301,6 +1297,8 @@ apply_handle_update(StringInfo s) remoteslot = ExecInitExtraTupleSlot(estate, RelationGetDescr(rel->localrel), &TTSOpsVirtual); + resultRelInfo = makeNode(ResultRelInfo); + InitResultRelInfo(resultRelInfo, rel->localrel, 1, NULL, 0); /* * Populate updatedCols so that per-column triggers can fire. This could @@ -1337,10 +1335,10 @@ apply_handle_update(StringInfo s) /* For a partitioned table, apply update to correct partition. */ if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) - apply_handle_tuple_routing(estate->es_result_relation_info, estate, + apply_handle_tuple_routing(resultRelInfo, estate, remoteslot, &newtup, rel, CMD_UPDATE); else - apply_handle_update_internal(estate->es_result_relation_info, estate, + apply_handle_update_internal(resultRelInfo, estate, remoteslot, &newtup, rel); PopActiveSnapshot(); @@ -1392,7 +1390,8 @@ apply_handle_update_internal(ResultRelInfo *relinfo, EvalPlanQualSetSlot(&epqstate, remoteslot); /* Do the actual update. */ - ExecSimpleRelationUpdate(estate, &epqstate, localslot, remoteslot); + ExecSimpleRelationUpdate(relinfo, estate, &epqstate, localslot, + remoteslot); } else { @@ -1420,6 +1419,7 @@ apply_handle_update_internal(ResultRelInfo *relinfo, static void apply_handle_delete(StringInfo s) { + ResultRelInfo *resultRelInfo; LogicalRepRelMapEntry *rel; LogicalRepTupleData oldtup; LogicalRepRelId relid; @@ -1452,6 +1452,8 @@ apply_handle_delete(StringInfo s) remoteslot = ExecInitExtraTupleSlot(estate, RelationGetDescr(rel->localrel), &TTSOpsVirtual); + resultRelInfo = makeNode(ResultRelInfo); + InitResultRelInfo(resultRelInfo, rel->localrel, 1, NULL, 0); PushActiveSnapshot(GetTransactionSnapshot()); @@ -1462,10 +1464,10 @@ apply_handle_delete(StringInfo s) /* For a partitioned table, apply delete to correct partition. */ if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) - apply_handle_tuple_routing(estate->es_result_relation_info, estate, + apply_handle_tuple_routing(resultRelInfo, estate, remoteslot, NULL, rel, CMD_DELETE); else - apply_handle_delete_internal(estate->es_result_relation_info, estate, + apply_handle_delete_internal(resultRelInfo, estate, remoteslot, &rel->remoterel); PopActiveSnapshot(); @@ -1504,7 +1506,7 @@ apply_handle_delete_internal(ResultRelInfo *relinfo, EState *estate, EvalPlanQualSetSlot(&epqstate, localslot); /* Do the actual delete. */ - ExecSimpleRelationDelete(estate, &epqstate, localslot); + ExecSimpleRelationDelete(relinfo, estate, &epqstate, localslot); } else { @@ -1570,7 +1572,6 @@ apply_handle_tuple_routing(ResultRelInfo *relinfo, ResultRelInfo *partrelinfo; Relation partrel; TupleTableSlot *remoteslot_part; - PartitionRoutingInfo *partinfo; TupleConversionMap *map; MemoryContext oldctx; @@ -1597,11 +1598,10 @@ apply_handle_tuple_routing(ResultRelInfo *relinfo, * partition's rowtype. Convert if needed or just copy, using a dedicated * slot to store the tuple in any case. */ - partinfo = partrelinfo->ri_PartitionInfo; - remoteslot_part = partinfo->pi_PartitionTupleSlot; + remoteslot_part = partrelinfo->ri_PartitionTupleSlot; if (remoteslot_part == NULL) remoteslot_part = table_slot_create(partrel, &estate->es_tupleTable); - map = partinfo->pi_RootToPartitionMap; + map = partrelinfo->ri_RootToPartitionMap; if (map != NULL) remoteslot_part = execute_attr_map_slot(map->attrMap, remoteslot, remoteslot_part); @@ -1612,7 +1612,6 @@ apply_handle_tuple_routing(ResultRelInfo *relinfo, } MemoryContextSwitchTo(oldctx); - estate->es_result_relation_info = partrelinfo; switch (operation) { case CMD_INSERT: @@ -1676,7 +1675,7 @@ apply_handle_tuple_routing(ResultRelInfo *relinfo, * Does the updated tuple still satisfy the current * partition's constraint? */ - if (partrelinfo->ri_PartitionCheck == NULL || + if (!partrel->rd_rel->relispartition || ExecPartitionCheck(partrelinfo, remoteslot_part, estate, false)) { @@ -1693,8 +1692,8 @@ apply_handle_tuple_routing(ResultRelInfo *relinfo, ExecOpenIndices(partrelinfo, false); EvalPlanQualSetSlot(&epqstate, remoteslot_part); - ExecSimpleRelationUpdate(estate, &epqstate, localslot, - remoteslot_part); + ExecSimpleRelationUpdate(partrelinfo, estate, &epqstate, + localslot, remoteslot_part); ExecCloseIndices(partrelinfo); EvalPlanQualEnd(&epqstate); } @@ -1735,7 +1734,6 @@ apply_handle_tuple_routing(ResultRelInfo *relinfo, Assert(partrelinfo_new != partrelinfo); /* DELETE old tuple found in the old partition. */ - estate->es_result_relation_info = partrelinfo; apply_handle_delete_internal(partrelinfo, estate, localslot, &relmapentry->remoterel); @@ -1748,12 +1746,11 @@ apply_handle_tuple_routing(ResultRelInfo *relinfo, */ oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); partrel = partrelinfo_new->ri_RelationDesc; - partinfo = partrelinfo_new->ri_PartitionInfo; - remoteslot_part = partinfo->pi_PartitionTupleSlot; + remoteslot_part = partrelinfo_new->ri_PartitionTupleSlot; if (remoteslot_part == NULL) remoteslot_part = table_slot_create(partrel, &estate->es_tupleTable); - map = partinfo->pi_RootToPartitionMap; + map = partrelinfo_new->ri_RootToPartitionMap; if (map != NULL) { remoteslot_part = execute_attr_map_slot(map->attrMap, @@ -1767,7 +1764,6 @@ apply_handle_tuple_routing(ResultRelInfo *relinfo, slot_getallattrs(remoteslot); } MemoryContextSwitchTo(oldctx); - estate->es_result_relation_info = partrelinfo_new; apply_handle_insert_internal(partrelinfo_new, estate, remoteslot_part); } @@ -2061,6 +2057,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received) { TimestampTz last_recv_timestamp = GetCurrentTimestamp(); bool ping_sent = false; + TimeLineID tli; /* * Init the ApplyMessageContext which we clean up after each replication @@ -2202,12 +2199,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received) /* Check if we need to exit the streaming loop. */ if (endofstream) - { - TimeLineID tli; - - walrcv_endstreaming(wrconn, &tli); break; - } /* * Wait for more data or latch. If we have unflushed transactions, @@ -2284,6 +2276,9 @@ LogicalRepApplyLoop(XLogRecPtr last_received) send_feedback(last_received, requestReply, requestReply); } } + + /* All done */ + walrcv_endstreaming(wrconn, &tli); } /* @@ -3025,10 +3020,8 @@ ApplyWorkerMain(Datum main_arg) /* This is table synchronization worker, call initial sync. */ syncslotname = LogicalRepSyncTableStart(&origin_startpos); - /* The slot name needs to be allocated in permanent memory context. */ - oldctx = MemoryContextSwitchTo(ApplyContext); - myslotname = pstrdup(syncslotname); - MemoryContextSwitchTo(oldctx); + /* allocate slot name in long-lived context */ + myslotname = MemoryContextStrdup(ApplyContext, syncslotname); pfree(syncslotname); } @@ -3072,7 +3065,6 @@ ApplyWorkerMain(Datum main_arg) * does some initializations on the upstream so let's still call it. */ (void) walrcv_identify_system(wrconn, &startpointTLI); - } /* @@ -3087,7 +3079,9 @@ ApplyWorkerMain(Datum main_arg) options.logical = true; options.startpoint = origin_startpos; options.slotname = myslotname; - options.proto.logical.proto_version = LOGICALREP_PROTO_VERSION_NUM; + options.proto.logical.proto_version = + walrcv_server_version(wrconn) >= 140000 ? + LOGICALREP_PROTO_STREAM_VERSION_NUM : LOGICALREP_PROTO_VERSION_NUM; options.proto.logical.publication_names = MySubscription->publications; options.proto.logical.binary = MySubscription->binary; options.proto.logical.streaming = MySubscription->stream; diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index c29c0888133af..9c997aed83676 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -77,7 +77,7 @@ static void send_relation_and_attrs(Relation relation, TransactionId xid, * and with streamed transactions the commit order may be different from * the order the transactions are sent in. Also, the (sub) transactions * might get aborted so we need to send the schema for each (sub) transaction - * so that we don't loose the schema information on abort. For handling this, + * so that we don't lose the schema information on abort. For handling this, * we maintain the list of xids (streamed_txns) for those we have already sent * the schema. * @@ -272,11 +272,11 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, &enable_streaming); /* Check if we support requested protocol */ - if (data->protocol_version > LOGICALREP_PROTO_VERSION_NUM) + if (data->protocol_version > LOGICALREP_PROTO_MAX_VERSION_NUM) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("client sent proto_version=%d but we only support protocol %d or lower", - data->protocol_version, LOGICALREP_PROTO_VERSION_NUM))); + data->protocol_version, LOGICALREP_PROTO_MAX_VERSION_NUM))); if (data->protocol_version < LOGICALREP_PROTO_MIN_VERSION_NUM) ereport(ERROR, @@ -945,16 +945,26 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) Assert(RelationSyncCache != NULL); - /* Find cached function info, creating if not found */ - oldctx = MemoryContextSwitchTo(CacheMemoryContext); + /* Find cached relation info, creating if not found */ entry = (RelationSyncEntry *) hash_search(RelationSyncCache, (void *) &relid, HASH_ENTER, &found); - MemoryContextSwitchTo(oldctx); Assert(entry != NULL); /* Not found means schema wasn't sent */ - if (!found || !entry->replicate_valid) + if (!found) + { + /* immediately make a new entry valid enough to satisfy callbacks */ + entry->schema_sent = false; + entry->streamed_txns = NIL; + entry->replicate_valid = false; + entry->pubactions.pubinsert = entry->pubactions.pubupdate = + entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false; + entry->publish_as_relid = InvalidOid; + } + + /* Validate the entry */ + if (!entry->replicate_valid) { List *pubids = GetRelationPublications(relid); ListCell *lc; @@ -977,9 +987,6 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) * relcache considers all publications given relation is in, but here * we only need to consider ones that the subscriber requested. */ - entry->pubactions.pubinsert = entry->pubactions.pubupdate = - entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false; - foreach(lc, data->publications) { Publication *pub = lfirst(lc); @@ -1054,12 +1061,6 @@ get_rel_sync_entry(PGOutputData *data, Oid relid) entry->replicate_valid = true; } - if (!found) - { - entry->schema_sent = false; - entry->streamed_txns = NULL; - } - return entry; } @@ -1145,7 +1146,7 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid) { entry->schema_sent = false; list_free(entry->streamed_txns); - entry->streamed_txns = NULL; + entry->streamed_txns = NIL; } } diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 42c78eabd4eb9..220b4cd6e99cd 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -99,7 +99,6 @@ ReplicationSlot *MyReplicationSlot = NULL; int max_replication_slots = 0; /* the maximum number of replication * slots */ -static ReplicationSlot *SearchNamedReplicationSlot(const char *name); static int ReplicationSlotAcquireInternal(ReplicationSlot *slot, const char *name, SlotAcquireBehavior behavior); static void ReplicationSlotDropAcquired(void); @@ -314,6 +313,15 @@ ReplicationSlotCreate(const char *name, bool db_specific, LWLockRelease(ReplicationSlotControlLock); + /* + * Create statistics entry for the new logical slot. We don't collect any + * stats for physical slots, so no need to create an entry for the same. + * See ReplicationSlotDropPtr for why we need to do this before releasing + * ReplicationSlotAllocationLock. + */ + if (SlotIsLogical(slot)) + pgstat_report_replslot(NameStr(slot->data.name), 0, 0, 0); + /* * Now that the slot has been marked as in_use and active, it's safe to * let somebody else try to allocate a slot. @@ -331,7 +339,7 @@ ReplicationSlotCreate(const char *name, bool db_specific, * * The caller must hold ReplicationSlotControlLock in shared mode. */ -static ReplicationSlot * +ReplicationSlot * SearchNamedReplicationSlot(const char *name) { int i; @@ -683,6 +691,19 @@ ReplicationSlotDropPtr(ReplicationSlot *slot) ereport(WARNING, (errmsg("could not remove directory \"%s\"", tmppath))); + /* + * Send a message to drop the replication slot to the stats collector. + * Since there is no guarantee of the order of message transfer on a UDP + * connection, it's possible that a message for creating a new slot + * reaches before a message for removing the old slot. We send the drop + * and create messages while holding ReplicationSlotAllocationLock to + * reduce that possibility. If the messages reached in reverse, we would + * lose one statistics update message. But the next update message will + * create the statistics for the replication slot. + */ + if (SlotIsLogical(slot)) + pgstat_report_replslot_drop(NameStr(slot->data.name)); + /* * We release this at the very end, so that nobody starts trying to create * a slot while we're still cleaning up the detritus of the old one. diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index 7c11e1ab44cb1..bb1d44ccb7a0e 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -270,7 +270,7 @@ WalReceiverMain(void) pqsignal(SIGHUP, WalRcvSigHupHandler); /* set flag to read config file */ pqsignal(SIGINT, SIG_IGN); pqsignal(SIGTERM, WalRcvShutdownHandler); /* request shutdown */ - pqsignal(SIGQUIT, SignalHandlerForCrashExit); + /* SIGQUIT handler was already set up by InitPostmasterChild */ pqsignal(SIGALRM, SIG_IGN); pqsignal(SIGPIPE, SIG_IGN); pqsignal(SIGUSR1, procsignal_sigusr1_handler); @@ -279,9 +279,6 @@ WalReceiverMain(void) /* Reset some signals that are accepted by postmaster but not here */ pqsignal(SIGCHLD, SIG_DFL); - /* We allow SIGQUIT (quickdie) at all times */ - sigdelset(&BlockSig, SIGQUIT); - /* Load the libpq-specific functions */ load_file("libpqwalreceiver", false); if (WalReceiverFunctions == NULL) @@ -761,6 +758,15 @@ WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last) */ writeTimeLineHistoryFile(tli, content, len); + /* + * Mark the streamed history file as ready for archiving + * if archive_mode is always. + */ + if (XLogArchiveMode != ARCHIVE_MODE_ALWAYS) + XLogArchiveForceDone(fname); + else + XLogArchiveNotify(fname); + pfree(fname); pfree(content); } diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index 3f756b470af11..df27e84761757 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -799,7 +799,7 @@ StartReplication(StartReplicationCmd *cmd) } /* Send CommandComplete message */ - pq_puttextmessage('C', "START_STREAMING"); + EndReplicationCommand("START_STREAMING"); } /* @@ -1122,11 +1122,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd) static void DropReplicationSlot(DropReplicationSlotCmd *cmd) { - QueryCompletion qc; - ReplicationSlotDrop(cmd->slotname, !cmd->wait); - SetQueryCompletion(&qc, CMDTAG_DROP_REPLICATION_SLOT, 0); - EndCommand(&qc, DestRemote, false); } /* @@ -1517,9 +1513,9 @@ exec_replication_command(const char *cmd_string) { int parse_rc; Node *cmd_node; + const char *cmdtag; MemoryContext cmd_context; MemoryContext old_context; - QueryCompletion qc; /* * If WAL sender has been told that shutdown is getting close, switch its @@ -1545,6 +1541,9 @@ exec_replication_command(const char *cmd_string) CHECK_FOR_INTERRUPTS(); + /* + * Parse the command. + */ cmd_context = AllocSetContextCreate(CurrentMemoryContext, "Replication command context", ALLOCSET_DEFAULT_SIZES); @@ -1557,31 +1556,47 @@ exec_replication_command(const char *cmd_string) (errcode(ERRCODE_SYNTAX_ERROR), errmsg_internal("replication command parser returned %d", parse_rc))); + replication_scanner_finish(); cmd_node = replication_parse_result; /* - * Log replication command if log_replication_commands is enabled. Even - * when it's disabled, log the command with DEBUG1 level for backward - * compatibility. Note that SQL commands are not logged here, and will be - * logged later if log_statement is enabled. + * If it's a SQL command, just clean up our mess and return false; the + * caller will take care of executing it. */ - if (cmd_node->type != T_SQLCmd) - ereport(log_replication_commands ? LOG : DEBUG1, - (errmsg("received replication command: %s", cmd_string))); + if (IsA(cmd_node, SQLCmd)) + { + if (MyDatabaseId == InvalidOid) + ereport(ERROR, + (errmsg("cannot execute SQL commands in WAL sender for physical replication"))); + + MemoryContextSwitchTo(old_context); + MemoryContextDelete(cmd_context); + + /* Tell the caller that this wasn't a WalSender command. */ + return false; + } /* - * CREATE_REPLICATION_SLOT ... LOGICAL exports a snapshot. If it was - * called outside of transaction the snapshot should be cleared here. + * Report query to various monitoring facilities. For this purpose, we + * report replication commands just like SQL commands. */ - if (!IsTransactionBlock()) - SnapBuildClearExportedSnapshot(); + debug_query_string = cmd_string; + + pgstat_report_activity(STATE_RUNNING, cmd_string); /* - * For aborted transactions, don't allow anything except pure SQL, the - * exec_simple_query() will handle it correctly. + * Log replication command if log_replication_commands is enabled. Even + * when it's disabled, log the command with DEBUG1 level for backward + * compatibility. */ - if (IsAbortedTransactionBlockState() && !IsA(cmd_node, SQLCmd)) + ereport(log_replication_commands ? LOG : DEBUG1, + (errmsg("received replication command: %s", cmd_string))); + + /* + * Disallow replication commands in aborted transaction blocks. + */ + if (IsAbortedTransactionBlockState()) ereport(ERROR, (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION), errmsg("current transaction is aborted, " @@ -1597,46 +1612,63 @@ exec_replication_command(const char *cmd_string) initStringInfo(&reply_message); initStringInfo(&tmpbuf); - /* Report to pgstat that this process is running */ - pgstat_report_activity(STATE_RUNNING, NULL); - switch (cmd_node->type) { case T_IdentifySystemCmd: + cmdtag = "IDENTIFY_SYSTEM"; + set_ps_display(cmdtag); IdentifySystem(); + EndReplicationCommand(cmdtag); break; case T_BaseBackupCmd: - PreventInTransactionBlock(true, "BASE_BACKUP"); + cmdtag = "BASE_BACKUP"; + set_ps_display(cmdtag); + PreventInTransactionBlock(true, cmdtag); SendBaseBackup((BaseBackupCmd *) cmd_node); + EndReplicationCommand(cmdtag); break; case T_CreateReplicationSlotCmd: + cmdtag = "CREATE_REPLICATION_SLOT"; + set_ps_display(cmdtag); CreateReplicationSlot((CreateReplicationSlotCmd *) cmd_node); + EndReplicationCommand(cmdtag); break; case T_DropReplicationSlotCmd: + cmdtag = "DROP_REPLICATION_SLOT"; + set_ps_display(cmdtag); DropReplicationSlot((DropReplicationSlotCmd *) cmd_node); + EndReplicationCommand(cmdtag); break; case T_StartReplicationCmd: { StartReplicationCmd *cmd = (StartReplicationCmd *) cmd_node; - PreventInTransactionBlock(true, "START_REPLICATION"); + cmdtag = "START_REPLICATION"; + set_ps_display(cmdtag); + PreventInTransactionBlock(true, cmdtag); if (cmd->kind == REPLICATION_KIND_PHYSICAL) StartReplication(cmd); else StartLogicalReplication(cmd); + /* dupe, but necessary per libpqrcv_endstreaming */ + EndReplicationCommand(cmdtag); + Assert(xlogreader != NULL); break; } case T_TimeLineHistoryCmd: - PreventInTransactionBlock(true, "TIMELINE_HISTORY"); + cmdtag = "TIMELINE_HISTORY"; + set_ps_display(cmdtag); + PreventInTransactionBlock(true, cmdtag); SendTimeLineHistory((TimeLineHistoryCmd *) cmd_node); + EndReplicationCommand(cmdtag); break; case T_VariableShowStmt: @@ -1644,24 +1676,17 @@ exec_replication_command(const char *cmd_string) DestReceiver *dest = CreateDestReceiver(DestRemoteSimple); VariableShowStmt *n = (VariableShowStmt *) cmd_node; + cmdtag = "SHOW"; + set_ps_display(cmdtag); + /* syscache access needs a transaction environment */ StartTransactionCommand(); GetPGVariable(n->name, dest); CommitTransactionCommand(); + EndReplicationCommand(cmdtag); } break; - case T_SQLCmd: - if (MyDatabaseId == InvalidOid) - ereport(ERROR, - (errmsg("cannot execute SQL commands in WAL sender for physical replication"))); - - /* Report to pgstat that this process is now idle */ - pgstat_report_activity(STATE_IDLE, NULL); - - /* Tell the caller that this wasn't a WalSender command. */ - return false; - default: elog(ERROR, "unrecognized replication command node tag: %u", cmd_node->type); @@ -1671,12 +1696,12 @@ exec_replication_command(const char *cmd_string) MemoryContextSwitchTo(old_context); MemoryContextDelete(cmd_context); - /* Send CommandComplete message */ - SetQueryCompletion(&qc, CMDTAG_SELECT, 0); - EndCommand(&qc, DestRemote, true); - - /* Report to pgstat that this process is now idle */ - pgstat_report_activity(STATE_IDLE, NULL); + /* + * We need not update ps display or pg_stat_activity, because PostgresMain + * will reset those to "idle". But we must reset debug_query_string to + * ensure it doesn't become a dangling pointer. + */ + debug_query_string = NULL; return true; } @@ -3035,7 +3060,7 @@ WalSndSignals(void) pqsignal(SIGHUP, SignalHandlerForConfigReload); pqsignal(SIGINT, StatementCancelHandler); /* query cancel */ pqsignal(SIGTERM, die); /* request shutdown */ - pqsignal(SIGQUIT, quickdie); /* hard crash time */ + /* SIGQUIT handler was already set up by InitPostmasterChild */ InitializeTimeouts(); /* establishes SIGALRM handler */ pqsignal(SIGPIPE, SIG_IGN); pqsignal(SIGUSR1, procsignal_sigusr1_handler); diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index fe777c3103dfe..1faaafab08a6a 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -650,11 +650,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index) if (IsA(rtr, RangeTblRef) && rtr->rtindex == rt_index) { - newjointree = list_delete_ptr(newjointree, rtr); - - /* - * foreach is safe because we exit loop after list_delete... - */ + newjointree = foreach_delete_current(newjointree, l); break; } } diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index a2a963bd5b41f..e549fa1d309fe 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -2636,14 +2636,7 @@ PrintBufferLeakWarning(Buffer buffer) void CheckPointBuffers(int flags) { - TRACE_POSTGRESQL_BUFFER_CHECKPOINT_START(flags); - CheckpointStats.ckpt_write_t = GetCurrentTimestamp(); BufferSync(flags); - CheckpointStats.ckpt_sync_t = GetCurrentTimestamp(); - TRACE_POSTGRESQL_BUFFER_CHECKPOINT_SYNC_START(); - ProcessSyncRequests(); - CheckpointStats.ckpt_sync_end_t = GetCurrentTimestamp(); - TRACE_POSTGRESQL_BUFFER_CHECKPOINT_DONE(); } diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c index 11c3f132a1002..36a067c9244c8 100644 --- a/src/backend/storage/ipc/ipc.c +++ b/src/backend/storage/ipc/ipc.c @@ -416,3 +416,20 @@ on_exit_reset(void) on_proc_exit_index = 0; reset_on_dsm_detach(); } + +/* ---------------------------------------------------------------- + * check_on_shmem_exit_lists_are_empty + * + * Debugging check that no shmem cleanup handlers have been registered + * prematurely in the current process. + * ---------------------------------------------------------------- + */ +void +check_on_shmem_exit_lists_are_empty(void) +{ + if (before_shmem_exit_index) + elog(FATAL, "before_shmem_exit has been called prematurely"); + if (on_shmem_exit_index) + elog(FATAL, "on_shmem_exit has been called prematurely"); + /* Checking DSM detach state seems unnecessary given the above */ +} diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c index 4153cc85579f7..24d44c982dabc 100644 --- a/src/backend/storage/ipc/latch.c +++ b/src/backend/storage/ipc/latch.c @@ -924,7 +924,22 @@ ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch) if (events == WL_LATCH_SET) { + if (latch && latch->owner_pid != MyProcPid) + elog(ERROR, "cannot wait on a latch owned by another process"); set->latch = latch; + /* + * On Unix, we don't need to modify the kernel object because the + * underlying pipe is the same for all latches so we can return + * immediately. On Windows, we need to update our array of handles, + * but we leave the old one in place and tolerate spurious wakeups if + * the latch is disabled. + */ +#if defined(WAIT_USE_WIN32) + if (!latch) + return; +#else + return; +#endif } #if defined(WAIT_USE_EPOLL) @@ -1133,7 +1148,8 @@ WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events) if (rc < 0) { - if (event->events == WL_POSTMASTER_DEATH && errno == ESRCH) + if (event->events == WL_POSTMASTER_DEATH && + (errno == ESRCH || errno == EACCES)) set->report_postmaster_not_running = true; else ereport(ERROR, @@ -1386,7 +1402,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, /* There's data in the self-pipe, clear it. */ drainSelfPipe(); - if (set->latch->is_set) + if (set->latch && set->latch->is_set) { occurred_events->fd = PGINVALID_SOCKET; occurred_events->events = WL_LATCH_SET; @@ -1477,7 +1493,10 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, timeout_p = &timeout; } - /* Report events discovered by WaitEventAdjustKqueue(). */ + /* + * Report postmaster events discovered by WaitEventAdjustKqueue() or an + * earlier call to WaitEventSetWait(). + */ if (unlikely(set->report_postmaster_not_running)) { if (set->exit_on_postmaster_death) @@ -1536,7 +1555,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, /* There's data in the self-pipe, clear it. */ drainSelfPipe(); - if (set->latch->is_set) + if (set->latch && set->latch->is_set) { occurred_events->fd = PGINVALID_SOCKET; occurred_events->events = WL_LATCH_SET; @@ -1548,6 +1567,13 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, cur_kqueue_event->filter == EVFILT_PROC && (cur_kqueue_event->fflags & NOTE_EXIT) != 0) { + /* + * The kernel will tell this kqueue object only once about the exit + * of the postmaster, so let's remember that for next time so that + * we provide level-triggered semantics. + */ + set->report_postmaster_not_running = true; + if (set->exit_on_postmaster_death) proc_exit(1); occurred_events->fd = PGINVALID_SOCKET; @@ -1645,7 +1671,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, /* There's data in the self-pipe, clear it. */ drainSelfPipe(); - if (set->latch->is_set) + if (set->latch && set->latch->is_set) { occurred_events->fd = PGINVALID_SOCKET; occurred_events->events = WL_LATCH_SET; @@ -1812,7 +1838,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, if (!ResetEvent(set->latch->event)) elog(ERROR, "ResetEvent failed: error code %lu", GetLastError()); - if (set->latch->is_set) + if (set->latch && set->latch->is_set) { occurred_events->fd = PGINVALID_SOCKET; occurred_events->events = WL_LATCH_SET; diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 1c0cd6b2487b9..07c5eeb749517 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -1699,8 +1699,8 @@ ComputeXidHorizons(ComputeXidHorizonsResult *h) */ xmin = TransactionIdOlder(xmin, xid); - /* if neither is set, this proc doesn't influence the horizon */ - if (!TransactionIdIsValid(xmin)) + /* if neither is set, this proc doesn't influence the horizon */ + if (!TransactionIdIsValid(xmin)) continue; /* @@ -3627,7 +3627,7 @@ TerminateOtherDBBackends(Oid databaseId) if (nprepared > 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), - errmsg("database \"%s\" is being used by prepared transaction", + errmsg("database \"%s\" is being used by prepared transactions", get_database_name(databaseId)), errdetail_plural("There is %d prepared transaction using the database.", "There are %d prepared transactions using the database.", @@ -4106,7 +4106,7 @@ GlobalVisCheckRemovableXid(Relation rel, TransactionId xid) * * Be very careful about when to use this function. It can only safely be used * when there is a guarantee that xid is within MaxTransactionId / 2 xids of - * rel. That e.g. can be guaranteed if the the caller assures a snapshot is + * rel. That e.g. can be guaranteed if the caller assures a snapshot is * held by the backend and xid is from a table (where vacuum/freezing ensures * the xid has to be within that range), or if xid is from the procarray and * prevents xid wraparound that way. @@ -4280,6 +4280,9 @@ ExpireTreeKnownAssignedTransactionIds(TransactionId xid, int nsubxids, /* As in ProcArrayEndTransaction, advance latestCompletedXid */ MaintainLatestCompletedXidRecovery(max_xid); + /* ... and xactCompletionCount */ + ShmemVariableCache->xactCompletionCount++; + LWLockRelease(ProcArrayLock); } diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c index 6f40fb165d285..ac9d23a3403ad 100644 --- a/src/backend/storage/ipc/shm_mq.c +++ b/src/backend/storage/ipc/shm_mq.c @@ -24,6 +24,7 @@ #include "storage/procsignal.h" #include "storage/shm_mq.h" #include "storage/spin.h" +#include "utils/memutils.h" /* * This structure represents the actual queue, stored in shared memory. @@ -360,6 +361,13 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait) for (i = 0; i < iovcnt; ++i) nbytes += iov[i].len; + /* Prevent writing messages overwhelming the receiver. */ + if (nbytes > MaxAllocSize) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("cannot send a message of size %zu via shared memory queue", + nbytes))); + /* Try to write, or finish writing, the length word into the buffer. */ while (!mqh->mqh_length_word_complete) { @@ -675,6 +683,17 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) } nbytes = mqh->mqh_expected_bytes; + /* + * Should be disallowed on the sending side already, but better check and + * error out on the receiver side as well rather than trying to read a + * prohibitively large message. + */ + if (nbytes > MaxAllocSize) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("invalid message size %zu in shared memory queue", + nbytes))); + if (mqh->mqh_partial_bytes == 0) { /* @@ -703,8 +722,13 @@ shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait) { Size newbuflen = Max(mqh->mqh_buflen, MQH_INITIAL_BUFSIZE); + /* + * Double the buffer size until the payload fits, but limit to + * MaxAllocSize. + */ while (newbuflen < nbytes) newbuflen *= 2; + newbuflen = Min(newbuflen, MaxAllocSize); if (mqh->mqh_buffer != NULL) { diff --git a/src/backend/storage/lmgr/generate-lwlocknames.pl b/src/backend/storage/lmgr/generate-lwlocknames.pl index ca54acdfb0f88..39cb97f5c3d95 100644 --- a/src/backend/storage/lmgr/generate-lwlocknames.pl +++ b/src/backend/storage/lmgr/generate-lwlocknames.pl @@ -3,8 +3,8 @@ # Generate lwlocknames.h and lwlocknames.c from lwlocknames.txt # Copyright (c) 2000-2020, PostgreSQL Global Development Group -use warnings; use strict; +use warnings; my $lastlockidx = -1; my $continue = "\n"; diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index a2f8e7524b499..8a365b400c6b6 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -821,9 +821,7 @@ SerialInit(void) SerialSlruCtl->PagePrecedes = SerialPagePrecedesLogically; SimpleLruInit(SerialSlruCtl, "Serial", NUM_SERIAL_BUFFERS, 0, SerialSLRULock, "pg_serial", - LWTRANCHE_SERIAL_BUFFER); - /* Override default assumption that writes should be fsync'd */ - SerialSlruCtl->do_fsync = false; + LWTRANCHE_SERIAL_BUFFER, SYNC_HANDLER_NONE); /* * Create or attach to the SerialControl structure. @@ -1052,7 +1050,7 @@ CheckPointPredicate(void) SimpleLruTruncate(SerialSlruCtl, tailPage); /* - * Flush dirty SLRU pages to disk + * Write dirty SLRU pages to disk * * This is not actually necessary from a correctness point of view. We do * it merely as a debugging aid. @@ -1061,7 +1059,7 @@ CheckPointPredicate(void) * before deleting the file in which they sit, which would be completely * pointless. */ - SimpleLruFlush(SerialSlruCtl, true); + SimpleLruWriteAll(SerialSlruCtl, true); } /*------------------------------------------------------------------------*/ diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 19a9f9394921a..88566bd9fab09 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -1369,7 +1369,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) else LWLockRelease(ProcArrayLock); - /* prevent signal from being resent more than once */ + /* prevent signal from being sent again more than once */ allow_autovacuum_cancel = false; } diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index d708117a4067e..4bc2bf955dfd5 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -411,51 +411,250 @@ PageRestoreTempPage(Page tempPage, Page oldPage) } /* - * sorting support for PageRepairFragmentation and PageIndexMultiDelete + * Tuple defrag support for PageRepairFragmentation and PageIndexMultiDelete */ -typedef struct itemIdSortData +typedef struct itemIdCompactData { uint16 offsetindex; /* linp array index */ int16 itemoff; /* page offset of item data */ uint16 alignedlen; /* MAXALIGN(item data len) */ -} itemIdSortData; -typedef itemIdSortData *itemIdSort; - -static int -itemoffcompare(const void *itemidp1, const void *itemidp2) -{ - /* Sort in decreasing itemoff order */ - return ((itemIdSort) itemidp2)->itemoff - - ((itemIdSort) itemidp1)->itemoff; -} +} itemIdCompactData; +typedef itemIdCompactData *itemIdCompact; /* * After removing or marking some line pointers unused, move the tuples to - * remove the gaps caused by the removed items. + * remove the gaps caused by the removed items and reorder them back into + * reverse line pointer order in the page. + * + * This function can often be fairly hot, so it pays to take some measures to + * make it as optimal as possible. + * + * Callers may pass 'presorted' as true if the 'itemidbase' array is sorted in + * descending order of itemoff. When this is true we can just memmove() + * tuples towards the end of the page. This is quite a common case as it's + * the order that tuples are initially inserted into pages. When we call this + * function to defragment the tuples in the page then any new line pointers + * added to the page will keep that presorted order, so hitting this case is + * still very common for tables that are commonly updated. + * + * When the 'itemidbase' array is not presorted then we're unable to just + * memmove() tuples around freely. Doing so could cause us to overwrite the + * memory belonging to a tuple we've not moved yet. In this case, we copy all + * the tuples that need to be moved into a temporary buffer. We can then + * simply memcpy() out of that temp buffer back into the page at the correct + * location. Tuples are copied back into the page in the same order as the + * 'itemidbase' array, so we end up reordering the tuples back into reverse + * line pointer order. This will increase the chances of hitting the + * presorted case the next time around. + * + * Callers must ensure that nitems is > 0 */ static void -compactify_tuples(itemIdSort itemidbase, int nitems, Page page) +compactify_tuples(itemIdCompact itemidbase, int nitems, Page page, bool presorted) { PageHeader phdr = (PageHeader) page; Offset upper; + Offset copy_tail; + Offset copy_head; + itemIdCompact itemidptr; int i; - /* sort itemIdSortData array into decreasing itemoff order */ - qsort((char *) itemidbase, nitems, sizeof(itemIdSortData), - itemoffcompare); + /* Code within will not work correctly if nitems == 0 */ + Assert(nitems > 0); - upper = phdr->pd_special; - for (i = 0; i < nitems; i++) + if (presorted) { - itemIdSort itemidptr = &itemidbase[i]; - ItemId lp; - lp = PageGetItemId(page, itemidptr->offsetindex + 1); - upper -= itemidptr->alignedlen; +#ifdef USE_ASSERT_CHECKING + { + /* + * Verify we've not gotten any new callers that are incorrectly + * passing a true presorted value. + */ + Offset lastoff = phdr->pd_special; + + for (i = 0; i < nitems; i++) + { + itemidptr = &itemidbase[i]; + + Assert(lastoff > itemidptr->itemoff); + + lastoff = itemidptr->itemoff; + } + } +#endif /* USE_ASSERT_CHECKING */ + + /* + * 'itemidbase' is already in the optimal order, i.e, lower item + * pointers have a higher offset. This allows us to memmove() the + * tuples up to the end of the page without having to worry about + * overwriting other tuples that have not been moved yet. + * + * There's a good chance that there are tuples already right at the + * end of the page that we can simply skip over because they're + * already in the correct location within the page. We'll do that + * first... + */ + upper = phdr->pd_special; + i = 0; + do + { + itemidptr = &itemidbase[i]; + if (upper != itemidptr->itemoff + itemidptr->alignedlen) + break; + upper -= itemidptr->alignedlen; + + i++; + } while (i < nitems); + + /* + * Now that we've found the first tuple that needs to be moved, we can + * do the tuple compactification. We try and make the least number of + * memmove() calls and only call memmove() when there's a gap. When + * we see a gap we just move all tuples after the gap up until the + * point of the last move operation. + */ + copy_tail = copy_head = itemidptr->itemoff + itemidptr->alignedlen; + for (; i < nitems; i++) + { + ItemId lp; + + itemidptr = &itemidbase[i]; + lp = PageGetItemId(page, itemidptr->offsetindex + 1); + + if (copy_head != itemidptr->itemoff + itemidptr->alignedlen) + { + memmove((char *) page + upper, + page + copy_head, + copy_tail - copy_head); + + /* + * We've now moved all tuples already seen, but not the + * current tuple, so we set the copy_tail to the end of this + * tuple so it can be moved in another iteration of the loop. + */ + copy_tail = itemidptr->itemoff + itemidptr->alignedlen; + } + /* shift the target offset down by the length of this tuple */ + upper -= itemidptr->alignedlen; + /* point the copy_head to the start of this tuple */ + copy_head = itemidptr->itemoff; + + /* update the line pointer to reference the new offset */ + lp->lp_off = upper; + + } + + /* move the remaining tuples. */ memmove((char *) page + upper, - (char *) page + itemidptr->itemoff, - itemidptr->alignedlen); - lp->lp_off = upper; + page + copy_head, + copy_tail - copy_head); + } + else + { + PGAlignedBlock scratch; + char *scratchptr = scratch.data; + + /* + * Non-presorted case: The tuples in the itemidbase array may be in + * any order. So, in order to move these to the end of the page we + * must make a temp copy of each tuple that needs to be moved before + * we copy them back into the page at the new offset. + * + * If a large percentage of tuples have been pruned (>75%) then we'll + * copy these into the temp buffer tuple-by-tuple, otherwise, we'll + * just do a single memcpy() for all tuples that need to be moved. + * When so many tuples have been removed there's likely to be a lot of + * gaps and it's unlikely that many non-movable tuples remain at the + * end of the page. + */ + if (nitems < PageGetMaxOffsetNumber(page) / 4) + { + i = 0; + do + { + itemidptr = &itemidbase[i]; + memcpy(scratchptr + itemidptr->itemoff, page + itemidptr->itemoff, + itemidptr->alignedlen); + i++; + } while (i < nitems); + + /* Set things up for the compactification code below */ + i = 0; + itemidptr = &itemidbase[0]; + upper = phdr->pd_special; + } + else + { + upper = phdr->pd_special; + + /* + * Many tuples are likely to already be in the correct location. + * There's no need to copy these into the temp buffer. Instead + * we'll just skip forward in the itemidbase array to the position + * that we do need to move tuples from so that the code below just + * leaves these ones alone. + */ + i = 0; + do + { + itemidptr = &itemidbase[i]; + if (upper != itemidptr->itemoff + itemidptr->alignedlen) + break; + upper -= itemidptr->alignedlen; + + i++; + } while (i < nitems); + + /* Copy all tuples that need to be moved into the temp buffer */ + memcpy(scratchptr + phdr->pd_upper, + page + phdr->pd_upper, + upper - phdr->pd_upper); + } + + /* + * Do the tuple compactification. itemidptr is already pointing to + * the first tuple that we're going to move. Here we collapse the + * memcpy calls for adjacent tuples into a single call. This is done + * by delaying the memcpy call until we find a gap that needs to be + * closed. + */ + copy_tail = copy_head = itemidptr->itemoff + itemidptr->alignedlen; + for (; i < nitems; i++) + { + ItemId lp; + + itemidptr = &itemidbase[i]; + lp = PageGetItemId(page, itemidptr->offsetindex + 1); + + /* copy pending tuples when we detect a gap */ + if (copy_head != itemidptr->itemoff + itemidptr->alignedlen) + { + memcpy((char *) page + upper, + scratchptr + copy_head, + copy_tail - copy_head); + + /* + * We've now copied all tuples already seen, but not the + * current tuple, so we set the copy_tail to the end of this + * tuple. + */ + copy_tail = itemidptr->itemoff + itemidptr->alignedlen; + } + /* shift the target offset down by the length of this tuple */ + upper -= itemidptr->alignedlen; + /* point the copy_head to the start of this tuple */ + copy_head = itemidptr->itemoff; + + /* update the line pointer to reference the new offset */ + lp->lp_off = upper; + + } + + /* Copy the remaining chunk */ + memcpy((char *) page + upper, + scratchptr + copy_head, + copy_tail - copy_head); } phdr->pd_upper = upper; @@ -477,14 +676,16 @@ PageRepairFragmentation(Page page) Offset pd_lower = ((PageHeader) page)->pd_lower; Offset pd_upper = ((PageHeader) page)->pd_upper; Offset pd_special = ((PageHeader) page)->pd_special; - itemIdSortData itemidbase[MaxHeapTuplesPerPage]; - itemIdSort itemidptr; + Offset last_offset; + itemIdCompactData itemidbase[MaxHeapTuplesPerPage]; + itemIdCompact itemidptr; ItemId lp; int nline, nstorage, nunused; int i; Size totallen; + bool presorted = true; /* For now */ /* * It's worth the trouble to be more paranoid here than in most places, @@ -509,6 +710,7 @@ PageRepairFragmentation(Page page) nline = PageGetMaxOffsetNumber(page); itemidptr = itemidbase; nunused = totallen = 0; + last_offset = pd_special; for (i = FirstOffsetNumber; i <= nline; i++) { lp = PageGetItemId(page, i); @@ -518,6 +720,12 @@ PageRepairFragmentation(Page page) { itemidptr->offsetindex = i - 1; itemidptr->itemoff = ItemIdGetOffset(lp); + + if (last_offset > itemidptr->itemoff) + last_offset = itemidptr->itemoff; + else + presorted = false; + if (unlikely(itemidptr->itemoff < (int) pd_upper || itemidptr->itemoff >= (int) pd_special)) ereport(ERROR, @@ -552,7 +760,7 @@ PageRepairFragmentation(Page page) errmsg("corrupted item lengths: total %u, available space %u", (unsigned int) totallen, pd_special - pd_lower))); - compactify_tuples(itemidbase, nstorage, page); + compactify_tuples(itemidbase, nstorage, page, presorted); } /* Set hint bit for PageAddItem */ @@ -831,9 +1039,10 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) Offset pd_lower = phdr->pd_lower; Offset pd_upper = phdr->pd_upper; Offset pd_special = phdr->pd_special; - itemIdSortData itemidbase[MaxIndexTuplesPerPage]; + Offset last_offset; + itemIdCompactData itemidbase[MaxIndexTuplesPerPage]; ItemIdData newitemids[MaxIndexTuplesPerPage]; - itemIdSort itemidptr; + itemIdCompact itemidptr; ItemId lp; int nline, nused; @@ -842,6 +1051,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) unsigned offset; int nextitm; OffsetNumber offnum; + bool presorted = true; /* For now */ Assert(nitems <= MaxIndexTuplesPerPage); @@ -883,6 +1093,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) totallen = 0; nused = 0; nextitm = 0; + last_offset = pd_special; for (offnum = FirstOffsetNumber; offnum <= nline; offnum = OffsetNumberNext(offnum)) { lp = PageGetItemId(page, offnum); @@ -906,6 +1117,12 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) { itemidptr->offsetindex = nused; /* where it will go */ itemidptr->itemoff = offset; + + if (last_offset > itemidptr->itemoff) + last_offset = itemidptr->itemoff; + else + presorted = false; + itemidptr->alignedlen = MAXALIGN(size); totallen += itemidptr->alignedlen; newitemids[nused] = *lp; @@ -932,7 +1149,10 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) phdr->pd_lower = SizeOfPageHeaderData + nused * sizeof(ItemIdData); /* and compactify the tuple data */ - compactify_tuples(itemidbase, nused, page); + if (nused > 0) + compactify_tuples(itemidbase, nused, page, presorted); + else + phdr->pd_upper = pd_special; } diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c index 3ded2cdd716bc..1d635d596cad5 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c @@ -18,6 +18,9 @@ #include #include +#include "access/commit_ts.h" +#include "access/clog.h" +#include "access/multixact.h" #include "access/xlog.h" #include "access/xlogutils.h" #include "commands/tablespace.h" @@ -90,12 +93,31 @@ typedef struct SyncOps const FileTag *candidate); } SyncOps; +/* + * These indexes must correspond to the values of the SyncRequestHandler enum. + */ static const SyncOps syncsw[] = { /* magnetic disk */ - { + [SYNC_HANDLER_MD] = { .sync_syncfiletag = mdsyncfiletag, .sync_unlinkfiletag = mdunlinkfiletag, .sync_filetagmatches = mdfiletagmatches + }, + /* pg_xact */ + [SYNC_HANDLER_CLOG] = { + .sync_syncfiletag = clogsyncfiletag + }, + /* pg_commit_ts */ + [SYNC_HANDLER_COMMIT_TS] = { + .sync_syncfiletag = committssyncfiletag + }, + /* pg_multixact/offsets */ + [SYNC_HANDLER_MULTIXACT_OFFSET] = { + .sync_syncfiletag = multixactoffsetssyncfiletag + }, + /* pg_multixact/members */ + [SYNC_HANDLER_MULTIXACT_MEMBER] = { + .sync_syncfiletag = multixactmemberssyncfiletag } }; @@ -505,8 +527,8 @@ RememberSyncRequest(const FileTag *ftag, SyncRequestType type) (void *) ftag, HASH_ENTER, &found); - /* if new entry, initialize it */ - if (!found) + /* if new entry, or was previously canceled, initialize it */ + if (!found || entry->canceled) { entry->cycle_ctr = sync_cycle_ctr; entry->canceled = false; diff --git a/src/backend/tcop/dest.c b/src/backend/tcop/dest.c index 7208751ec781c..96789f88ef938 100644 --- a/src/backend/tcop/dest.c +++ b/src/backend/tcop/dest.c @@ -211,6 +211,18 @@ EndCommand(const QueryCompletion *qc, CommandDest dest, bool force_undecorated_o } } +/* ---------------- + * EndReplicationCommand - stripped down version of EndCommand + * + * For use by replication commands. + * ---------------- + */ +void +EndReplicationCommand(const char *commandTag) +{ + pq_putmessage('C', commandTag, strlen(commandTag) + 1); +} + /* ---------------- * NullCommand - tell dest that an empty query string was recognized * diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index c9424f167c8d9..411cfadbff35b 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -3820,7 +3820,8 @@ PostgresMain(int argc, char *argv[], } /* - * Set up signal handlers and masks. + * Set up signal handlers. (InitPostmasterChild or InitStandaloneProcess + * has already set up BlockSig and made that the active signal mask.) * * Note that postmaster blocked all signals before forking child process, * so there is no race condition whereby we might receive a signal before @@ -3842,6 +3843,9 @@ PostgresMain(int argc, char *argv[], pqsignal(SIGTERM, die); /* cancel current query and exit */ /* + * In a postmaster child backend, replace SignalHandlerForCrashExit + * with quickdie, so we can tell the client we're dying. + * * In a standalone backend, SIGQUIT can be generated from the keyboard * easily, while SIGTERM cannot, so we make both signals do die() * rather than quickdie(). @@ -3871,16 +3875,6 @@ PostgresMain(int argc, char *argv[], * platforms */ } - pqinitmask(); - - if (IsUnderPostmaster) - { - /* We allow SIGQUIT (quickdie) at all times */ - sigdelset(&BlockSig, SIGQUIT); - } - - PG_SETMASK(&BlockSig); /* block everything except SIGQUIT */ - if (!IsUnderPostmaster) { /* diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 9713a7ac41a49..9a35147b26af3 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1139,6 +1139,7 @@ ProcessUtilitySlow(ParseState *pstate, { List *stmts; ListCell *l; + RangeVar *table_rv = NULL; /* Run parse analysis ... */ stmts = transformCreateStmt((CreateStmt *) parsetree, @@ -1151,11 +1152,15 @@ ProcessUtilitySlow(ParseState *pstate, if (IsA(stmt, CreateStmt)) { + CreateStmt *cstmt = (CreateStmt *) stmt; Datum toast_options; static char *validnsps[] = HEAP_RELOPT_NAMESPACES; + /* Remember transformed RangeVar for LIKE */ + table_rv = cstmt->relation; + /* Create the table itself */ - address = DefineRelation((CreateStmt *) stmt, + address = DefineRelation(cstmt, RELKIND_RELATION, InvalidOid, NULL, queryString); @@ -1174,7 +1179,7 @@ ProcessUtilitySlow(ParseState *pstate, * table */ toast_options = transformRelOptions((Datum) 0, - ((CreateStmt *) stmt)->options, + cstmt->options, "toast", validnsps, true, @@ -1188,12 +1193,17 @@ ProcessUtilitySlow(ParseState *pstate, } else if (IsA(stmt, CreateForeignTableStmt)) { + CreateForeignTableStmt *cstmt = (CreateForeignTableStmt *) stmt; + + /* Remember transformed RangeVar for LIKE */ + table_rv = cstmt->base.relation; + /* Create the table itself */ - address = DefineRelation((CreateStmt *) stmt, + address = DefineRelation(&cstmt->base, RELKIND_FOREIGN_TABLE, InvalidOid, NULL, queryString); - CreateForeignTable((CreateForeignTableStmt *) stmt, + CreateForeignTable(cstmt, address.objectId); EventTriggerCollectSimpleCommand(address, secondaryObject, @@ -1208,10 +1218,11 @@ ProcessUtilitySlow(ParseState *pstate, * to-do list. */ TableLikeClause *like = (TableLikeClause *) stmt; - RangeVar *rv = ((CreateStmt *) parsetree)->relation; List *morestmts; - morestmts = expandTableLikeClause(rv, like); + Assert(table_rv != NULL); + + morestmts = expandTableLikeClause(table_rv, like); stmts = list_concat(stmts, morestmts); /* diff --git a/src/backend/tsearch/dict_thesaurus.c b/src/backend/tsearch/dict_thesaurus.c index cb0835982d85c..64c979086d1eb 100644 --- a/src/backend/tsearch/dict_thesaurus.c +++ b/src/backend/tsearch/dict_thesaurus.c @@ -286,11 +286,6 @@ thesaurusRead(const char *filename, DictThesaurus *d) (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("unexpected end of line"))); - /* - * Note: currently, tsearch_readline can't return lines exceeding 4KB, - * so overflow of the word counts is impossible. But that may not - * always be true, so let's check. - */ if (nwrd != (uint16) nwrd || posinsubst != (uint16) posinsubst) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), diff --git a/src/backend/tsearch/ts_locale.c b/src/backend/tsearch/ts_locale.c index a916dd6cb67b8..d362e86d61a26 100644 --- a/src/backend/tsearch/ts_locale.c +++ b/src/backend/tsearch/ts_locale.c @@ -14,6 +14,7 @@ #include "postgres.h" #include "catalog/pg_collation.h" +#include "common/string.h" #include "storage/fd.h" #include "tsearch/ts_locale.h" #include "tsearch/ts_public.h" @@ -128,6 +129,7 @@ tsearch_readline_begin(tsearch_readline_state *stp, return false; stp->filename = filename; stp->lineno = 0; + initStringInfo(&stp->buf); stp->curline = NULL; /* Setup error traceback support for ereport() */ stp->cb.callback = tsearch_readline_callback; @@ -145,13 +147,43 @@ tsearch_readline_begin(tsearch_readline_state *stp, char * tsearch_readline(tsearch_readline_state *stp) { - char *result; + char *recoded; + /* Advance line number to use in error reports */ stp->lineno++; - stp->curline = NULL; - result = t_readline(stp->fp); - stp->curline = result; - return result; + + /* Clear curline, it's no longer relevant */ + if (stp->curline) + { + if (stp->curline != stp->buf.data) + pfree(stp->curline); + stp->curline = NULL; + } + + /* Collect next line, if there is one */ + if (!pg_get_line_buf(stp->fp, &stp->buf)) + return NULL; + + /* Validate the input as UTF-8, then convert to DB encoding if needed */ + recoded = pg_any_to_server(stp->buf.data, stp->buf.len, PG_UTF8); + + /* Save the correctly-encoded string for possible error reports */ + stp->curline = recoded; /* might be equal to buf.data */ + + /* + * We always return a freshly pstrdup'd string. This is clearly necessary + * if pg_any_to_server() returned buf.data, and we need a second copy even + * if encoding conversion did occur. The caller is entitled to pfree the + * returned string at any time, which would leave curline pointing to + * recycled storage, causing problems if an error occurs after that point. + * (It's preferable to return the result of pstrdup instead of the output + * of pg_any_to_server, because the conversion result tends to be + * over-allocated. Since callers might save the result string directly + * into a long-lived dictionary structure, we don't want it to be a larger + * palloc chunk than necessary. We'll reclaim the conversion result on + * the next call.) + */ + return pstrdup(recoded); } /* @@ -160,7 +192,18 @@ tsearch_readline(tsearch_readline_state *stp) void tsearch_readline_end(tsearch_readline_state *stp) { + /* Suppress use of curline in any error reported below */ + if (stp->curline) + { + if (stp->curline != stp->buf.data) + pfree(stp->curline); + stp->curline = NULL; + } + + /* Release other resources */ + pfree(stp->buf.data); FreeFile(stp->fp); + /* Pop the error context stack */ error_context_stack = stp->cb.previous; } @@ -176,8 +219,7 @@ tsearch_readline_callback(void *arg) /* * We can't include the text of the config line for errors that occur - * during t_readline() itself. This is only partly a consequence of our - * arms-length use of that routine: the major cause of such errors is + * during tsearch_readline() itself. The major cause of such errors is * encoding violations, and we daren't try to print error messages * containing badly-encoded data. */ @@ -193,43 +235,6 @@ tsearch_readline_callback(void *arg) } -/* - * Read the next line from a tsearch data file (expected to be in UTF-8), and - * convert it to database encoding if needed. The returned string is palloc'd. - * NULL return means EOF. - * - * Note: direct use of this function is now deprecated. Go through - * tsearch_readline() to provide better error reporting. - */ -char * -t_readline(FILE *fp) -{ - int len; - char *recoded; - char buf[4096]; /* lines must not be longer than this */ - - if (fgets(buf, sizeof(buf), fp) == NULL) - return NULL; - - len = strlen(buf); - - /* Make sure the input is valid UTF-8 */ - (void) pg_verify_mbstr(PG_UTF8, buf, len, false); - - /* And convert */ - recoded = pg_any_to_server(buf, len, PG_UTF8); - if (recoded == buf) - { - /* - * conversion didn't pstrdup, so we must. We can use the length of the - * original string, because no conversion was done. - */ - recoded = pnstrdup(recoded, len); - } - - return recoded; -} - /* * lowerstr --- fold null-terminated string to lower case * diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index de3f49637e22e..f97489f0644ac 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -5217,6 +5217,7 @@ get_rolespec_oid(const RoleSpec *role, bool missing_ok) oid = get_role_oid(role->rolename, missing_ok); break; + case ROLESPEC_CURRENT_ROLE: case ROLESPEC_CURRENT_USER: oid = GetUserId(); break; @@ -5259,6 +5260,7 @@ get_rolespec_tuple(const RoleSpec *role) errmsg("role \"%s\" does not exist", role->rolename))); break; + case ROLESPEC_CURRENT_ROLE: case ROLESPEC_CURRENT_USER: tuple = SearchSysCache1(AUTHOID, GetUserId()); if (!HeapTupleIsValid(tuple)) diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c index 6515fc8ec6955..d093ce80386f4 100644 --- a/src/backend/utils/adt/cash.c +++ b/src/backend/utils/adt/cash.c @@ -1042,7 +1042,7 @@ cash_numeric(PG_FUNCTION_ARGS) fpoint = 2; /* convert the integral money value to numeric */ - result = DirectFunctionCall1(int8_numeric, Int64GetDatum(money)); + result = NumericGetDatum(int64_to_numeric(money)); /* scale appropriately, if needed */ if (fpoint > 0) @@ -1056,8 +1056,7 @@ cash_numeric(PG_FUNCTION_ARGS) scale = 1; for (i = 0; i < fpoint; i++) scale *= 10; - numeric_scale = DirectFunctionCall1(int8_numeric, - Int64GetDatum(scale)); + numeric_scale = NumericGetDatum(int64_to_numeric(scale)); /* * Given integral inputs approaching INT64_MAX, select_div_scale() @@ -1107,7 +1106,7 @@ numeric_cash(PG_FUNCTION_ARGS) scale *= 10; /* multiply the input amount by scale factor */ - numeric_scale = DirectFunctionCall1(int8_numeric, Int64GetDatum(scale)); + numeric_scale = NumericGetDatum(int64_to_numeric(scale)); amount = DirectFunctionCall2(numeric_mul, amount, numeric_scale); /* note that numeric_int8 will round to nearest integer for us */ diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c index eaaffa7137dc7..a470cf890a205 100644 --- a/src/backend/utils/adt/date.c +++ b/src/backend/utils/adt/date.c @@ -299,20 +299,31 @@ EncodeSpecialDate(DateADT dt, char *str) DateADT GetSQLCurrentDate(void) { - TimestampTz ts; - struct pg_tm tt, - *tm = &tt; - fsec_t fsec; - int tz; + struct pg_tm tm; - ts = GetCurrentTransactionStartTimestamp(); + static int cache_year = 0; + static int cache_mon = 0; + static int cache_mday = 0; + static DateADT cache_date; - if (timestamp2tm(ts, &tz, tm, &fsec, NULL, NULL) != 0) - ereport(ERROR, - (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("timestamp out of range"))); + GetCurrentDateTime(&tm); - return date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE; + /* + * date2j involves several integer divisions; moreover, unless our session + * lives across local midnight, we don't really have to do it more than + * once. So it seems worth having a separate cache here. + */ + if (tm.tm_year != cache_year || + tm.tm_mon != cache_mon || + tm.tm_mday != cache_mday) + { + cache_date = date2j(tm.tm_year, tm.tm_mon, tm.tm_mday) - POSTGRES_EPOCH_JDATE; + cache_year = tm.tm_year; + cache_mon = tm.tm_mon; + cache_mday = tm.tm_mday; + } + + return cache_date; } /* @@ -322,18 +333,12 @@ TimeTzADT * GetSQLCurrentTime(int32 typmod) { TimeTzADT *result; - TimestampTz ts; struct pg_tm tt, *tm = &tt; fsec_t fsec; int tz; - ts = GetCurrentTransactionStartTimestamp(); - - if (timestamp2tm(ts, &tz, tm, &fsec, NULL, NULL) != 0) - ereport(ERROR, - (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("timestamp out of range"))); + GetCurrentTimeUsec(tm, &fsec, &tz); result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); tm2timetz(tm, fsec, tz, result); @@ -348,18 +353,12 @@ TimeADT GetSQLLocalTime(int32 typmod) { TimeADT result; - TimestampTz ts; struct pg_tm tt, *tm = &tt; fsec_t fsec; int tz; - ts = GetCurrentTransactionStartTimestamp(); - - if (timestamp2tm(ts, &tz, tm, &fsec, NULL, NULL) != 0) - ereport(ERROR, - (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("timestamp out of range"))); + GetCurrentTimeUsec(tm, &fsec, &tz); tm2time(tm, fsec, &result); AdjustTimeForTypmod(&result, typmod); @@ -555,15 +554,24 @@ date_mii(PG_FUNCTION_ARGS) /* * Promote date to timestamp. * - * On overflow error is thrown if 'overflow' is NULL. Otherwise, '*overflow' - * is set to -1 (+1) when result value exceed lower (upper) boundary and zero - * returned. + * On successful conversion, *overflow is set to zero if it's not NULL. + * + * If the date is finite but out of the valid range for timestamp, then: + * if overflow is NULL, we throw an out-of-range error. + * if overflow is not NULL, we store +1 or -1 there to indicate the sign + * of the overflow, and return the appropriate timestamp infinity. + * + * Note: *overflow = -1 is actually not possible currently, since both + * datatypes have the same lower bound, Julian day zero. */ Timestamp date2timestamp_opt_overflow(DateADT dateVal, int *overflow) { Timestamp result; + if (overflow) + *overflow = 0; + if (DATE_IS_NOBEGIN(dateVal)) TIMESTAMP_NOBEGIN(result); else if (DATE_IS_NOEND(dateVal)) @@ -571,7 +579,6 @@ date2timestamp_opt_overflow(DateADT dateVal, int *overflow) else { /* - * Date's range is wider than timestamp's, so check for boundaries. * Since dates have the same minimum values as timestamps, only upper * boundary need be checked for overflow. */ @@ -580,7 +587,8 @@ date2timestamp_opt_overflow(DateADT dateVal, int *overflow) if (overflow) { *overflow = 1; - return (Timestamp) 0; + TIMESTAMP_NOEND(result); + return result; } else { @@ -598,7 +606,7 @@ date2timestamp_opt_overflow(DateADT dateVal, int *overflow) } /* - * Single-argument version of date2timestamp_opt_overflow(). + * Promote date to timestamp, throwing error for overflow. */ static TimestampTz date2timestamp(DateADT dateVal) @@ -609,9 +617,12 @@ date2timestamp(DateADT dateVal) /* * Promote date to timestamp with time zone. * - * On overflow error is thrown if 'overflow' is NULL. Otherwise, '*overflow' - * is set to -1 (+1) when result value exceed lower (upper) boundary and zero - * returned. + * On successful conversion, *overflow is set to zero if it's not NULL. + * + * If the date is finite but out of the valid range for timestamptz, then: + * if overflow is NULL, we throw an out-of-range error. + * if overflow is not NULL, we store +1 or -1 there to indicate the sign + * of the overflow, and return the appropriate timestamptz infinity. */ TimestampTz date2timestamptz_opt_overflow(DateADT dateVal, int *overflow) @@ -621,6 +632,9 @@ date2timestamptz_opt_overflow(DateADT dateVal, int *overflow) *tm = &tt; int tz; + if (overflow) + *overflow = 0; + if (DATE_IS_NOBEGIN(dateVal)) TIMESTAMP_NOBEGIN(result); else if (DATE_IS_NOEND(dateVal)) @@ -628,7 +642,6 @@ date2timestamptz_opt_overflow(DateADT dateVal, int *overflow) else { /* - * Date's range is wider than timestamp's, so check for boundaries. * Since dates have the same minimum values as timestamps, only upper * boundary need be checked for overflow. */ @@ -637,7 +650,8 @@ date2timestamptz_opt_overflow(DateADT dateVal, int *overflow) if (overflow) { *overflow = 1; - return (TimestampTz) 0; + TIMESTAMP_NOEND(result); + return result; } else { @@ -665,13 +679,15 @@ date2timestamptz_opt_overflow(DateADT dateVal, int *overflow) if (overflow) { if (result < MIN_TIMESTAMP) + { *overflow = -1; + TIMESTAMP_NOBEGIN(result); + } else { - Assert(result >= END_TIMESTAMP); *overflow = 1; + TIMESTAMP_NOEND(result); } - return (TimestampTz) 0; } else { @@ -686,7 +702,7 @@ date2timestamptz_opt_overflow(DateADT dateVal, int *overflow) } /* - * Single-argument version of date2timestamptz_opt_overflow(). + * Promote date to timestamptz, throwing error for overflow. */ static TimestampTz date2timestamptz(DateADT dateVal) @@ -727,16 +743,30 @@ date2timestamp_no_overflow(DateADT dateVal) * Crosstype comparison functions for dates */ +int32 +date_cmp_timestamp_internal(DateADT dateVal, Timestamp dt2) +{ + Timestamp dt1; + int overflow; + + dt1 = date2timestamp_opt_overflow(dateVal, &overflow); + if (overflow > 0) + { + /* dt1 is larger than any finite timestamp, but less than infinity */ + return TIMESTAMP_IS_NOEND(dt2) ? -1 : +1; + } + Assert(overflow == 0); /* -1 case cannot occur */ + + return timestamp_cmp_internal(dt1, dt2); +} + Datum date_eq_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); - Timestamp dt1; - dt1 = date2timestamp(dateVal); - - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) == 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt2) == 0); } Datum @@ -744,11 +774,8 @@ date_ne_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); - Timestamp dt1; - - dt1 = date2timestamp(dateVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) != 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt2) != 0); } Datum @@ -756,11 +783,8 @@ date_lt_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); - Timestamp dt1; - - dt1 = date2timestamp(dateVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) < 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt2) < 0); } Datum @@ -768,11 +792,8 @@ date_gt_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); - Timestamp dt1; - - dt1 = date2timestamp(dateVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) > 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt2) > 0); } Datum @@ -780,11 +801,8 @@ date_le_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); - Timestamp dt1; - dt1 = date2timestamp(dateVal); - - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) <= 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt2) <= 0); } Datum @@ -792,11 +810,8 @@ date_ge_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); - Timestamp dt1; - - dt1 = date2timestamp(dateVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) >= 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt2) >= 0); } Datum @@ -804,11 +819,29 @@ date_cmp_timestamp(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); Timestamp dt2 = PG_GETARG_TIMESTAMP(1); - Timestamp dt1; - dt1 = date2timestamp(dateVal); + PG_RETURN_INT32(date_cmp_timestamp_internal(dateVal, dt2)); +} + +int32 +date_cmp_timestamptz_internal(DateADT dateVal, TimestampTz dt2) +{ + TimestampTz dt1; + int overflow; - PG_RETURN_INT32(timestamp_cmp_internal(dt1, dt2)); + dt1 = date2timestamptz_opt_overflow(dateVal, &overflow); + if (overflow > 0) + { + /* dt1 is larger than any finite timestamp, but less than infinity */ + return TIMESTAMP_IS_NOEND(dt2) ? -1 : +1; + } + if (overflow < 0) + { + /* dt1 is less than any finite timestamp, but more than -infinity */ + return TIMESTAMP_IS_NOBEGIN(dt2) ? +1 : -1; + } + + return timestamptz_cmp_internal(dt1, dt2); } Datum @@ -816,11 +849,8 @@ date_eq_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - dt1 = date2timestamptz(dateVal); - - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) == 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt2) == 0); } Datum @@ -828,11 +858,8 @@ date_ne_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - - dt1 = date2timestamptz(dateVal); - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) != 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt2) != 0); } Datum @@ -840,11 +867,8 @@ date_lt_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - - dt1 = date2timestamptz(dateVal); - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) < 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt2) < 0); } Datum @@ -852,11 +876,8 @@ date_gt_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - - dt1 = date2timestamptz(dateVal); - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) > 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt2) > 0); } Datum @@ -864,11 +885,8 @@ date_le_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - dt1 = date2timestamptz(dateVal); - - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) <= 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt2) <= 0); } Datum @@ -876,11 +894,8 @@ date_ge_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - - dt1 = date2timestamptz(dateVal); - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) >= 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt2) >= 0); } Datum @@ -888,11 +903,8 @@ date_cmp_timestamptz(PG_FUNCTION_ARGS) { DateADT dateVal = PG_GETARG_DATEADT(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - - dt1 = date2timestamptz(dateVal); - PG_RETURN_INT32(timestamptz_cmp_internal(dt1, dt2)); + PG_RETURN_INT32(date_cmp_timestamptz_internal(dateVal, dt2)); } Datum @@ -900,11 +912,8 @@ timestamp_eq_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); - Timestamp dt2; - - dt2 = date2timestamp(dateVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) == 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt1) == 0); } Datum @@ -912,11 +921,8 @@ timestamp_ne_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); - Timestamp dt2; - dt2 = date2timestamp(dateVal); - - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) != 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt1) != 0); } Datum @@ -924,11 +930,8 @@ timestamp_lt_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); - Timestamp dt2; - - dt2 = date2timestamp(dateVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) < 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt1) > 0); } Datum @@ -936,11 +939,8 @@ timestamp_gt_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); - Timestamp dt2; - dt2 = date2timestamp(dateVal); - - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) > 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt1) < 0); } Datum @@ -948,11 +948,8 @@ timestamp_le_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); - Timestamp dt2; - - dt2 = date2timestamp(dateVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) <= 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt1) >= 0); } Datum @@ -960,11 +957,8 @@ timestamp_ge_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); - Timestamp dt2; - - dt2 = date2timestamp(dateVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) >= 0); + PG_RETURN_BOOL(date_cmp_timestamp_internal(dateVal, dt1) <= 0); } Datum @@ -972,11 +966,8 @@ timestamp_cmp_date(PG_FUNCTION_ARGS) { Timestamp dt1 = PG_GETARG_TIMESTAMP(0); DateADT dateVal = PG_GETARG_DATEADT(1); - Timestamp dt2; - dt2 = date2timestamp(dateVal); - - PG_RETURN_INT32(timestamp_cmp_internal(dt1, dt2)); + PG_RETURN_INT32(-date_cmp_timestamp_internal(dateVal, dt1)); } Datum @@ -984,11 +975,8 @@ timestamptz_eq_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); - TimestampTz dt2; - - dt2 = date2timestamptz(dateVal); - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) == 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt1) == 0); } Datum @@ -996,11 +984,8 @@ timestamptz_ne_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); - TimestampTz dt2; - - dt2 = date2timestamptz(dateVal); - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) != 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt1) != 0); } Datum @@ -1008,11 +993,8 @@ timestamptz_lt_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); - TimestampTz dt2; - dt2 = date2timestamptz(dateVal); - - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) < 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt1) > 0); } Datum @@ -1020,11 +1002,8 @@ timestamptz_gt_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); - TimestampTz dt2; - - dt2 = date2timestamptz(dateVal); - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) > 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt1) < 0); } Datum @@ -1032,11 +1011,8 @@ timestamptz_le_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); - TimestampTz dt2; - - dt2 = date2timestamptz(dateVal); - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) <= 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt1) >= 0); } Datum @@ -1044,11 +1020,8 @@ timestamptz_ge_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); - TimestampTz dt2; - dt2 = date2timestamptz(dateVal); - - PG_RETURN_BOOL(timestamptz_cmp_internal(dt1, dt2) >= 0); + PG_RETURN_BOOL(date_cmp_timestamptz_internal(dateVal, dt1) <= 0); } Datum @@ -1056,11 +1029,8 @@ timestamptz_cmp_date(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); DateADT dateVal = PG_GETARG_DATEADT(1); - TimestampTz dt2; - - dt2 = date2timestamptz(dateVal); - PG_RETURN_INT32(timestamptz_cmp_internal(dt1, dt2)); + PG_RETURN_INT32(-date_cmp_timestamptz_internal(dateVal, dt1)); } /* @@ -1080,6 +1050,7 @@ in_range_date_interval(PG_FUNCTION_ARGS) Timestamp valStamp; Timestamp baseStamp; + /* XXX we could support out-of-range cases here, perhaps */ valStamp = date2timestamp(val); baseStamp = date2timestamp(base); diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index dec2fad82a685..91fab8cc9cb39 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -339,35 +339,80 @@ j2day(int date) /* * GetCurrentDateTime() * - * Get the transaction start time ("now()") broken down as a struct pg_tm. + * Get the transaction start time ("now()") broken down as a struct pg_tm, + * converted according to the session timezone setting. + * + * This is just a convenience wrapper for GetCurrentTimeUsec, to cover the + * case where caller doesn't need either fractional seconds or tz offset. */ void GetCurrentDateTime(struct pg_tm *tm) { - int tz; fsec_t fsec; - timestamp2tm(GetCurrentTransactionStartTimestamp(), &tz, tm, &fsec, - NULL, NULL); - /* Note: don't pass NULL tzp to timestamp2tm; affects behavior */ + GetCurrentTimeUsec(tm, &fsec, NULL); } /* * GetCurrentTimeUsec() * * Get the transaction start time ("now()") broken down as a struct pg_tm, - * including fractional seconds and timezone offset. + * including fractional seconds and timezone offset. The time is converted + * according to the session timezone setting. + * + * Callers may pass tzp = NULL if they don't need the offset, but this does + * not affect the conversion behavior (unlike timestamp2tm()). + * + * Internally, we cache the result, since this could be called many times + * in a transaction, within which now() doesn't change. */ void GetCurrentTimeUsec(struct pg_tm *tm, fsec_t *fsec, int *tzp) { - int tz; + TimestampTz cur_ts = GetCurrentTransactionStartTimestamp(); + + /* + * The cache key must include both current time and current timezone. By + * representing the timezone by just a pointer, we're assuming that + * distinct timezone settings could never have the same pointer value. + * This is true by virtue of the hashtable used inside pg_tzset(); + * however, it might need another look if we ever allow entries in that + * hash to be recycled. + */ + static TimestampTz cache_ts = 0; + static pg_tz *cache_timezone = NULL; + static struct pg_tm cache_tm; + static fsec_t cache_fsec; + static int cache_tz; + + if (cur_ts != cache_ts || session_timezone != cache_timezone) + { + /* + * Make sure cache is marked invalid in case of error after partial + * update within timestamp2tm. + */ + cache_timezone = NULL; + + /* + * Perform the computation, storing results into cache. We do not + * really expect any error here, since current time surely ought to be + * within range, but check just for sanity's sake. + */ + if (timestamp2tm(cur_ts, &cache_tz, &cache_tm, &cache_fsec, + NULL, session_timezone) != 0) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("timestamp out of range"))); + + /* OK, so mark the cache valid. */ + cache_ts = cur_ts; + cache_timezone = session_timezone; + } - timestamp2tm(GetCurrentTransactionStartTimestamp(), &tz, tm, fsec, - NULL, NULL); - /* Note: don't pass NULL tzp to timestamp2tm; affects behavior */ + *tm = cache_tm; + *fsec = cache_fsec; if (tzp != NULL) - *tzp = tz; + *tzp = cache_tz; } diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index 2320c06a9bc73..3319e9761e4c0 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -579,14 +579,6 @@ numeric_to_cstring(Numeric n) return DatumGetCString(DirectFunctionCall1(numeric_out, d)); } -static Numeric -int64_to_numeric(int64 v) -{ - Datum d = Int64GetDatum(v); - - return DatumGetNumeric(DirectFunctionCall1(int8_numeric, d)); -} - static bool numeric_is_less(Numeric a, Numeric b) { @@ -615,9 +607,9 @@ numeric_half_rounded(Numeric n) Datum two; Datum result; - zero = DirectFunctionCall1(int8_numeric, Int64GetDatum(0)); - one = DirectFunctionCall1(int8_numeric, Int64GetDatum(1)); - two = DirectFunctionCall1(int8_numeric, Int64GetDatum(2)); + zero = NumericGetDatum(int64_to_numeric(0)); + one = NumericGetDatum(int64_to_numeric(1)); + two = NumericGetDatum(int64_to_numeric(2)); if (DatumGetBool(DirectFunctionCall2(numeric_ge, d, zero))) d = DirectFunctionCall2(numeric_add, d, one); @@ -632,12 +624,10 @@ static Numeric numeric_shift_right(Numeric n, unsigned count) { Datum d = NumericGetDatum(n); - Datum divisor_int64; Datum divisor_numeric; Datum result; - divisor_int64 = Int64GetDatum((int64) (1 << count)); - divisor_numeric = DirectFunctionCall1(int8_numeric, divisor_int64); + divisor_numeric = NumericGetDatum(int64_to_numeric(((int64) 1) << count)); result = DirectFunctionCall2(numeric_div_trunc, d, divisor_numeric); return DatumGetNumeric(result); } @@ -832,8 +822,7 @@ pg_size_bytes(PG_FUNCTION_ARGS) { Numeric mul_num; - mul_num = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - Int64GetDatum(multiplier))); + mul_num = int64_to_numeric(multiplier); num = DatumGetNumeric(DirectFunctionCall2(numeric_mul, NumericGetDatum(mul_num), diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 7d09537d82b95..3bb01cdb65ab1 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -1381,10 +1381,12 @@ parse_format(FormatNode *node, const char *str, const KeyWord *kw, { int chlen; - if (flags & STD_FLAG) + if ((flags & STD_FLAG) && *str != '"') { /* - * Standard mode, allow only following separators: "-./,':; " + * Standard mode, allow only following separators: "-./,':; ". + * However, we support double quotes even in standard mode + * (see below). This is our extension of standard mode. */ if (strchr("-./,':; ", *str) == NULL) ereport(ERROR, @@ -3346,7 +3348,19 @@ DCH_from_char(FormatNode *node, const char *in, TmFromChar *out, } else { - s += pg_mblen(s); + int chlen = pg_mblen(s); + + /* + * Standard mode requires strict match of format characters. + */ + if (std && n->type == NODE_TYPE_CHAR && + strncmp(s, n->character, chlen) != 0) + RETURN_ERROR(ereport(ERROR, + (errcode(ERRCODE_INVALID_DATETIME_FORMAT), + errmsg("unmatched format character \"%s\"", + n->character)))); + + s += chlen; } continue; } @@ -4555,8 +4569,11 @@ do_to_timestamp(text *date_txt, text *fmt, Oid collid, bool std, { /* If a 4-digit year is provided, we use that and ignore CC. */ tm->tm_year = tmfc.year; - if (tmfc.bc && tm->tm_year > 0) - tm->tm_year = -(tm->tm_year - 1); + if (tmfc.bc) + tm->tm_year = -tm->tm_year; + /* correct for our representation of BC years */ + if (tm->tm_year < 0) + tm->tm_year++; } fmask |= DTK_M(YEAR); } @@ -6070,10 +6087,8 @@ numeric_to_number(PG_FUNCTION_ARGS) if (IS_MULTI(&Num)) { Numeric x; - Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(10))); - Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(-Num.multi))); + Numeric a = int64_to_numeric(10); + Numeric b = int64_to_numeric(-Num.multi); x = DatumGetNumeric(DirectFunctionCall2(numeric_power, NumericGetDatum(a), @@ -6162,10 +6177,8 @@ numeric_to_char(PG_FUNCTION_ARGS) if (IS_MULTI(&Num)) { - Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(10))); - Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(Num.multi))); + Numeric a = int64_to_numeric(10); + Numeric b = int64_to_numeric(Num.multi); x = DatumGetNumeric(DirectFunctionCall2(numeric_power, NumericGetDatum(a), @@ -6339,11 +6352,8 @@ int8_to_char(PG_FUNCTION_ARGS) else if (IS_EEEE(&Num)) { /* to avoid loss of precision, must go via numeric not float8 */ - Numeric val; - - val = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - Int64GetDatum(value))); - orgnum = numeric_out_sci(val, Num.post); + orgnum = numeric_out_sci(int64_to_numeric(value), + Num.post); /* * numeric_out_sci() does not emit a sign for positive numbers. We diff --git a/src/backend/utils/adt/jsonpath.c b/src/backend/utils/adt/jsonpath.c index 3c0dc38a7f84e..31d9d92d14ed5 100644 --- a/src/backend/utils/adt/jsonpath.c +++ b/src/backend/utils/adt/jsonpath.c @@ -660,7 +660,7 @@ printJsonPathItem(StringInfo buf, JsonPathItem *v, bool inKey, else if (v->content.anybounds.first == v->content.anybounds.last) { if (v->content.anybounds.first == PG_UINT32_MAX) - appendStringInfo(buf, "**{last}"); + appendStringInfoString(buf, "**{last}"); else appendStringInfo(buf, "**{%u}", v->content.anybounds.first); diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c index f146767bfc3a0..1059f34130aee 100644 --- a/src/backend/utils/adt/jsonpath_exec.c +++ b/src/backend/utils/adt/jsonpath_exec.c @@ -35,7 +35,7 @@ * executeItemOptUnwrapTarget() function have 'unwrap' argument, which indicates * whether unwrapping of array is needed. When unwrap == true, each of array * members is passed to executeItemOptUnwrapTarget() again but with unwrap == false - * in order to evade subsequent array unwrapping. + * in order to avoid subsequent array unwrapping. * * All boolean expressions (predicates) are evaluated by executeBoolItem() * function, which returns tri-state JsonPathBool. When error is occurred @@ -842,9 +842,7 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, lastjbv = hasNext ? &tmpjbv : palloc(sizeof(*lastjbv)); lastjbv->type = jbvNumeric; - lastjbv->val.numeric = - DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(last))); + lastjbv->val.numeric = int64_to_numeric(last); res = executeNextItem(cxt, jsp, &elem, lastjbv, found, hasNext); @@ -1012,9 +1010,7 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp, jb = palloc(sizeof(*jb)); jb->type = jbvNumeric; - jb->val.numeric = - DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(size))); + jb->val.numeric = int64_to_numeric(size); res = executeNextItem(cxt, jsp, NULL, jb, found, false); } @@ -1837,16 +1833,22 @@ executeDateTimeMethod(JsonPathExecContext *cxt, JsonPathItem *jsp, /* * According to SQL/JSON standard enumerate ISO formats for: date, * timetz, time, timestamptz, timestamp. + * + * We also support ISO 8601 for timestamps, because to_json[b]() + * functions use this format. */ static const char *fmt_str[] = { "yyyy-mm-dd", - "HH24:MI:SS TZH:TZM", - "HH24:MI:SS TZH", + "HH24:MI:SSTZH:TZM", + "HH24:MI:SSTZH", "HH24:MI:SS", - "yyyy-mm-dd HH24:MI:SS TZH:TZM", - "yyyy-mm-dd HH24:MI:SS TZH", - "yyyy-mm-dd HH24:MI:SS" + "yyyy-mm-dd HH24:MI:SSTZH:TZM", + "yyyy-mm-dd HH24:MI:SSTZH", + "yyyy-mm-dd HH24:MI:SS", + "yyyy-mm-dd\"T\"HH24:MI:SSTZH:TZM", + "yyyy-mm-dd\"T\"HH24:MI:SSTZH", + "yyyy-mm-dd\"T\"HH24:MI:SS" }; /* cache for format texts */ @@ -1979,8 +1981,7 @@ executeKeyValueMethod(JsonPathExecContext *cxt, JsonPathItem *jsp, id += (int64) cxt->baseObject.id * INT64CONST(10000000000); idval.type = jbvNumeric; - idval.val.numeric = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - Int64GetDatum(id))); + idval.val.numeric = int64_to_numeric(id); it = JsonbIteratorInit(jbc); @@ -2587,9 +2588,9 @@ checkTimezoneIsUsedForCast(bool useTz, const char *type1, const char *type2) if (!useTz) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert value from %s to %s without timezone usage", + errmsg("cannot convert value from %s to %s without time zone usage", type1, type2), - errhint("Use *_tz() function for timezone support."))); + errhint("Use *_tz() function for time zone support."))); } /* Convert time datum to timetz datum */ @@ -2601,93 +2602,36 @@ castTimeToTimeTz(Datum time, bool useTz) return DirectFunctionCall1(time_timetz, time); } -/*--- - * Compares 'ts1' and 'ts2' timestamp, assuming that ts1 might be overflowed - * during cast from another datatype. - * - * 'overflow1' specifies overflow of 'ts1' value: - * 0 - no overflow, - * -1 - exceed lower boundary, - * 1 - exceed upper boundary. - */ -static int -cmpTimestampWithOverflow(Timestamp ts1, int overflow1, Timestamp ts2) -{ - /* - * All the timestamps we deal with in jsonpath are produced by - * to_datetime() method. So, they should be valid. - */ - Assert(IS_VALID_TIMESTAMP(ts2)); - - /* - * Timestamp, which exceed lower (upper) bound, is always lower (higher) - * than any valid timestamp except minus (plus) infinity. - */ - if (overflow1) - { - if (overflow1 < 0) - { - if (TIMESTAMP_IS_NOBEGIN(ts2)) - return 1; - else - return -1; - } - if (overflow1 > 0) - { - if (TIMESTAMP_IS_NOEND(ts2)) - return -1; - else - return 1; - } - } - - return timestamp_cmp_internal(ts1, ts2); -} - /* - * Compare date to timestamptz without throwing overflow error during cast. + * Compare date to timestamp. + * Note that this doesn't involve any timezone considerations. */ static int cmpDateToTimestamp(DateADT date1, Timestamp ts2, bool useTz) { - TimestampTz ts1; - int overflow = 0; - - ts1 = date2timestamp_opt_overflow(date1, &overflow); - - return cmpTimestampWithOverflow(ts1, overflow, ts2); + return date_cmp_timestamp_internal(date1, ts2); } /* - * Compare date to timestamptz without throwing overflow error during cast. + * Compare date to timestamptz. */ static int cmpDateToTimestampTz(DateADT date1, TimestampTz tstz2, bool useTz) { - TimestampTz tstz1; - int overflow = 0; - checkTimezoneIsUsedForCast(useTz, "date", "timestamptz"); - tstz1 = date2timestamptz_opt_overflow(date1, &overflow); - - return cmpTimestampWithOverflow(tstz1, overflow, tstz2); + return date_cmp_timestamptz_internal(date1, tstz2); } /* - * Compare timestamp to timestamptz without throwing overflow error during cast. + * Compare timestamp to timestamptz. */ static int cmpTimestampToTimestampTz(Timestamp ts1, TimestampTz tstz2, bool useTz) { - TimestampTz tstz1; - int overflow = 0; - checkTimezoneIsUsedForCast(useTz, "timestamp", "timestamptz"); - tstz1 = timestamp2timestamptz_opt_overflow(ts1, &overflow); - - return cmpTimestampWithOverflow(tstz1, overflow, tstz2); + return timestamp_cmp_timestamptz_internal(ts1, tstz2); } /* diff --git a/src/backend/utils/adt/jsonpath_gram.y b/src/backend/utils/adt/jsonpath_gram.y index 88ef9550e9db0..53f422260c382 100644 --- a/src/backend/utils/adt/jsonpath_gram.y +++ b/src/backend/utils/adt/jsonpath_gram.y @@ -441,7 +441,7 @@ makeItemList(List *list) while (end->next) end = end->next; - for_each_cell(cell, list, list_second_cell(list)) + for_each_from(cell, list, 1) { JsonPathParseItem *c = (JsonPathParseItem *) lfirst(cell); diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index 37c23c9155afb..b2bf9fa8cbcfd 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -416,12 +416,16 @@ pg_get_keywords(PG_FUNCTION_ARGS) funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - tupdesc = CreateTemplateTupleDesc(3); + tupdesc = CreateTemplateTupleDesc(5); TupleDescInitEntry(tupdesc, (AttrNumber) 1, "word", TEXTOID, -1, 0); TupleDescInitEntry(tupdesc, (AttrNumber) 2, "catcode", CHAROID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "catdesc", + TupleDescInitEntry(tupdesc, (AttrNumber) 3, "barelabel", + BOOLOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 4, "catdesc", + TEXTOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 5, "baredesc", TEXTOID, -1, 0); funcctx->attinmeta = TupleDescGetAttInMetadata(tupdesc); @@ -433,7 +437,7 @@ pg_get_keywords(PG_FUNCTION_ARGS) if (funcctx->call_cntr < ScanKeywords.num_keywords) { - char *values[3]; + char *values[5]; HeapTuple tuple; /* cast-away-const is ugly but alternatives aren't much better */ @@ -445,26 +449,37 @@ pg_get_keywords(PG_FUNCTION_ARGS) { case UNRESERVED_KEYWORD: values[1] = "U"; - values[2] = _("unreserved"); + values[3] = _("unreserved"); break; case COL_NAME_KEYWORD: values[1] = "C"; - values[2] = _("unreserved (cannot be function or type name)"); + values[3] = _("unreserved (cannot be function or type name)"); break; case TYPE_FUNC_NAME_KEYWORD: values[1] = "T"; - values[2] = _("reserved (can be function or type name)"); + values[3] = _("reserved (can be function or type name)"); break; case RESERVED_KEYWORD: values[1] = "R"; - values[2] = _("reserved"); + values[3] = _("reserved"); break; default: /* shouldn't be possible */ values[1] = NULL; - values[2] = NULL; + values[3] = NULL; break; } + if (ScanKeywordBareLabel[funcctx->call_cntr]) + { + values[2] = "true"; + values[4] = _("can be bare label"); + } + else + { + values[2] = "false"; + values[4] = _("requires AS"); + } + tuple = BuildTupleFromCStrings(funcctx->attinmeta, values); SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 69d313dd52b1b..20c9cac2fa2e0 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -592,7 +592,8 @@ static void round_var(NumericVar *var, int rscale); static void trunc_var(NumericVar *var, int rscale); static void strip_var(NumericVar *var); static void compute_bucket(Numeric operand, Numeric bound1, Numeric bound2, - const NumericVar *count_var, NumericVar *result_var); + const NumericVar *count_var, bool reversed_bounds, + NumericVar *result_var); static void accum_sum_add(NumericSumAccum *accum, const NumericVar *var1); static void accum_sum_rescale(NumericSumAccum *accum, const NumericVar *val); @@ -1724,10 +1725,11 @@ width_bucket_numeric(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), errmsg("operand, lower bound, and upper bound cannot be NaN"))); - else + /* We allow "operand" to be infinite; cmp_numerics will cope */ + if (NUMERIC_IS_INF(bound1) || NUMERIC_IS_INF(bound2)) ereport(ERROR, (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - errmsg("operand, lower bound, and upper bound cannot be infinity"))); + errmsg("lower and upper bounds must be finite"))); } init_var(&result_var); @@ -1751,8 +1753,8 @@ width_bucket_numeric(PG_FUNCTION_ARGS) else if (cmp_numerics(operand, bound2) >= 0) add_var(&count_var, &const_one, &result_var); else - compute_bucket(operand, bound1, bound2, - &count_var, &result_var); + compute_bucket(operand, bound1, bound2, &count_var, false, + &result_var); break; /* bound1 > bound2 */ @@ -1762,8 +1764,8 @@ width_bucket_numeric(PG_FUNCTION_ARGS) else if (cmp_numerics(operand, bound2) <= 0) add_var(&count_var, &const_one, &result_var); else - compute_bucket(operand, bound1, bound2, - &count_var, &result_var); + compute_bucket(operand, bound1, bound2, &count_var, true, + &result_var); break; } @@ -1782,11 +1784,13 @@ width_bucket_numeric(PG_FUNCTION_ARGS) /* * If 'operand' is not outside the bucket range, determine the correct * bucket for it to go. The calculations performed by this function - * are derived directly from the SQL2003 spec. + * are derived directly from the SQL2003 spec. Note however that we + * multiply by count before dividing, to avoid unnecessary roundoff error. */ static void compute_bucket(Numeric operand, Numeric bound1, Numeric bound2, - const NumericVar *count_var, NumericVar *result_var) + const NumericVar *count_var, bool reversed_bounds, + NumericVar *result_var) { NumericVar bound1_var; NumericVar bound2_var; @@ -1796,23 +1800,21 @@ compute_bucket(Numeric operand, Numeric bound1, Numeric bound2, init_var_from_num(bound2, &bound2_var); init_var_from_num(operand, &operand_var); - if (cmp_var(&bound1_var, &bound2_var) < 0) + if (!reversed_bounds) { sub_var(&operand_var, &bound1_var, &operand_var); sub_var(&bound2_var, &bound1_var, &bound2_var); - div_var(&operand_var, &bound2_var, result_var, - select_div_scale(&operand_var, &bound2_var), true); } else { sub_var(&bound1_var, &operand_var, &operand_var); - sub_var(&bound1_var, &bound2_var, &bound1_var); - div_var(&operand_var, &bound1_var, result_var, - select_div_scale(&operand_var, &bound1_var), true); + sub_var(&bound1_var, &bound2_var, &bound2_var); } - mul_var(result_var, count_var, result_var, - result_var->dscale + count_var->dscale); + mul_var(&operand_var, count_var, &operand_var, + operand_var.dscale + count_var->dscale); + div_var(&operand_var, &bound2_var, result_var, + select_div_scale(&operand_var, &bound2_var), true); add_var(result_var, &const_one, result_var); floor_var(result_var, result_var); @@ -4073,23 +4075,29 @@ numeric_trim_scale(PG_FUNCTION_ARGS) * ---------------------------------------------------------------------- */ - -Datum -int4_numeric(PG_FUNCTION_ARGS) +Numeric +int64_to_numeric(int64 val) { - int32 val = PG_GETARG_INT32(0); Numeric res; NumericVar result; init_var(&result); - int64_to_numericvar((int64) val, &result); + int64_to_numericvar(val, &result); res = make_result(&result); free_var(&result); - PG_RETURN_NUMERIC(res); + return res; +} + +Datum +int4_numeric(PG_FUNCTION_ARGS) +{ + int32 val = PG_GETARG_INT32(0); + + PG_RETURN_NUMERIC(int64_to_numeric(val)); } int32 @@ -4174,18 +4182,8 @@ Datum int8_numeric(PG_FUNCTION_ARGS) { int64 val = PG_GETARG_INT64(0); - Numeric res; - NumericVar result; - - init_var(&result); - - int64_to_numericvar(val, &result); - - res = make_result(&result); - - free_var(&result); - PG_RETURN_NUMERIC(res); + PG_RETURN_NUMERIC(int64_to_numeric(val)); } @@ -4224,18 +4222,8 @@ Datum int2_numeric(PG_FUNCTION_ARGS) { int16 val = PG_GETARG_INT16(0); - Numeric res; - NumericVar result; - init_var(&result); - - int64_to_numericvar((int64) val, &result); - - res = make_result(&result); - - free_var(&result); - - PG_RETURN_NUMERIC(res); + PG_RETURN_NUMERIC(int64_to_numeric(val)); } @@ -5290,11 +5278,7 @@ int2_accum(PG_FUNCTION_ARGS) #ifdef HAVE_INT128 do_int128_accum(state, (int128) PG_GETARG_INT16(1)); #else - Numeric newval; - - newval = DatumGetNumeric(DirectFunctionCall1(int2_numeric, - PG_GETARG_DATUM(1))); - do_numeric_accum(state, newval); + do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT16(1))); #endif } @@ -5317,11 +5301,7 @@ int4_accum(PG_FUNCTION_ARGS) #ifdef HAVE_INT128 do_int128_accum(state, (int128) PG_GETARG_INT32(1)); #else - Numeric newval; - - newval = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - PG_GETARG_DATUM(1))); - do_numeric_accum(state, newval); + do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT32(1))); #endif } @@ -5340,13 +5320,7 @@ int8_accum(PG_FUNCTION_ARGS) state = makeNumericAggState(fcinfo, true); if (!PG_ARGISNULL(1)) - { - Numeric newval; - - newval = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - PG_GETARG_DATUM(1))); - do_numeric_accum(state, newval); - } + do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT64(1))); PG_RETURN_POINTER(state); } @@ -5570,11 +5544,7 @@ int8_avg_accum(PG_FUNCTION_ARGS) #ifdef HAVE_INT128 do_int128_accum(state, (int128) PG_GETARG_INT64(1)); #else - Numeric newval; - - newval = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - PG_GETARG_DATUM(1))); - do_numeric_accum(state, newval); + do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT64(1))); #endif } @@ -5767,13 +5737,8 @@ int2_accum_inv(PG_FUNCTION_ARGS) #ifdef HAVE_INT128 do_int128_discard(state, (int128) PG_GETARG_INT16(1)); #else - Numeric newval; - - newval = DatumGetNumeric(DirectFunctionCall1(int2_numeric, - PG_GETARG_DATUM(1))); - /* Should never fail, all inputs have dscale 0 */ - if (!do_numeric_discard(state, newval)) + if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT16(1)))) elog(ERROR, "do_numeric_discard failed unexpectedly"); #endif } @@ -5797,13 +5762,8 @@ int4_accum_inv(PG_FUNCTION_ARGS) #ifdef HAVE_INT128 do_int128_discard(state, (int128) PG_GETARG_INT32(1)); #else - Numeric newval; - - newval = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - PG_GETARG_DATUM(1))); - /* Should never fail, all inputs have dscale 0 */ - if (!do_numeric_discard(state, newval)) + if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT32(1)))) elog(ERROR, "do_numeric_discard failed unexpectedly"); #endif } @@ -5824,13 +5784,8 @@ int8_accum_inv(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { - Numeric newval; - - newval = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - PG_GETARG_DATUM(1))); - /* Should never fail, all inputs have dscale 0 */ - if (!do_numeric_discard(state, newval)) + if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT64(1)))) elog(ERROR, "do_numeric_discard failed unexpectedly"); } @@ -5853,13 +5808,8 @@ int8_avg_accum_inv(PG_FUNCTION_ARGS) #ifdef HAVE_INT128 do_int128_discard(state, (int128) PG_GETARG_INT64(1)); #else - Numeric newval; - - newval = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - PG_GETARG_DATUM(1))); - /* Should never fail, all inputs have dscale 0 */ - if (!do_numeric_discard(state, newval)) + if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT64(1)))) elog(ERROR, "do_numeric_discard failed unexpectedly"); #endif } @@ -5914,8 +5864,7 @@ numeric_poly_avg(PG_FUNCTION_ARGS) int128_to_numericvar(state->sumX, &result); - countd = DirectFunctionCall1(int8_numeric, - Int64GetDatumFast(state->N)); + countd = NumericGetDatum(int64_to_numeric(state->N)); sumd = NumericGetDatum(make_result(&result)); free_var(&result); @@ -5951,7 +5900,7 @@ numeric_avg(PG_FUNCTION_ARGS) if (state->nInfcount > 0) PG_RETURN_NUMERIC(make_result(&const_ninf)); - N_datum = DirectFunctionCall1(int8_numeric, Int64GetDatum(state->N)); + N_datum = NumericGetDatum(int64_to_numeric(state->N)); init_var(&sumX_var); accum_sum_final(&state->sumX, &sumX_var); @@ -6411,7 +6360,6 @@ Datum int8_sum(PG_FUNCTION_ARGS) { Numeric oldsum; - Datum newval; if (PG_ARGISNULL(0)) { @@ -6419,8 +6367,7 @@ int8_sum(PG_FUNCTION_ARGS) if (PG_ARGISNULL(1)) PG_RETURN_NULL(); /* still no non-null */ /* This is the first non-null input. */ - newval = DirectFunctionCall1(int8_numeric, PG_GETARG_DATUM(1)); - PG_RETURN_DATUM(newval); + PG_RETURN_NUMERIC(int64_to_numeric(PG_GETARG_INT64(1))); } /* @@ -6436,10 +6383,9 @@ int8_sum(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(oldsum); /* OK to do the addition. */ - newval = DirectFunctionCall1(int8_numeric, PG_GETARG_DATUM(1)); - PG_RETURN_DATUM(DirectFunctionCall2(numeric_add, - NumericGetDatum(oldsum), newval)); + NumericGetDatum(oldsum), + NumericGetDatum(int64_to_numeric(PG_GETARG_INT64(1))))); } @@ -6618,10 +6564,8 @@ int8_avg(PG_FUNCTION_ARGS) if (transdata->count == 0) PG_RETURN_NULL(); - countd = DirectFunctionCall1(int8_numeric, - Int64GetDatumFast(transdata->count)); - sumd = DirectFunctionCall1(int8_numeric, - Int64GetDatumFast(transdata->sum)); + countd = NumericGetDatum(int64_to_numeric(transdata->count)); + sumd = NumericGetDatum(int64_to_numeric(transdata->sum)); PG_RETURN_DATUM(DirectFunctionCall2(numeric_div, sumd, countd)); } diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 95738a4e34eec..472fa596e1f8a 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -1697,6 +1697,42 @@ pg_stat_get_buf_alloc(PG_FUNCTION_ARGS) PG_RETURN_INT64(pgstat_fetch_global()->buf_alloc); } +/* + * Returns statistics of WAL activity + */ +Datum +pg_stat_get_wal(PG_FUNCTION_ARGS) +{ +#define PG_STAT_GET_WAL_COLS 2 + TupleDesc tupdesc; + Datum values[PG_STAT_GET_WAL_COLS]; + bool nulls[PG_STAT_GET_WAL_COLS]; + PgStat_WalStats *wal_stats; + + /* Initialise values and NULL flags arrays */ + MemSet(values, 0, sizeof(values)); + MemSet(nulls, 0, sizeof(nulls)); + + /* Initialise attributes information in the tuple descriptor */ + tupdesc = CreateTemplateTupleDesc(PG_STAT_GET_WAL_COLS); + TupleDescInitEntry(tupdesc, (AttrNumber) 1, "wal_buffers_full", + INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 2, "stats_reset", + TIMESTAMPTZOID, -1, 0); + + BlessTupleDesc(tupdesc); + + /* Get statistics about WAL activity */ + wal_stats = pgstat_fetch_stat_wal(); + + /* Fill values and NULLs */ + values[0] = Int64GetDatum(wal_stats->wal_buffers_full); + values[1] = TimestampTzGetDatum(wal_stats->stat_reset_timestamp); + + /* Returns the record as Datum */ + PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); +} + /* * Returns statistics of SLRU caches. */ @@ -2033,6 +2069,20 @@ pg_stat_reset_slru(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* Reset replication slots stats (a specific one or all of them). */ +Datum +pg_stat_reset_replication_slot(PG_FUNCTION_ARGS) +{ + char *target = NULL; + + if (!PG_ARGISNULL(0)) + target = text_to_cstring(PG_GETARG_TEXT_PP(0)); + + pgstat_reset_replslot_counter(target); + + PG_RETURN_VOID(); +} + Datum pg_stat_get_archiver(PG_FUNCTION_ARGS) { @@ -2098,3 +2148,69 @@ pg_stat_get_archiver(PG_FUNCTION_ARGS) /* Returns the record as Datum */ PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); } + +/* Get the statistics for the replication slots */ +Datum +pg_stat_get_replication_slots(PG_FUNCTION_ARGS) +{ +#define PG_STAT_GET_REPLICATION_SLOT_COLS 5 + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate *tupstore; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + PgStat_ReplSlotStats *slotstats; + int nstats; + int i; + + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsinfo->allowedModes & SFRM_Materialize)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("materialize mode required, but it is not allowed in this context"))); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); + + tupstore = tuplestore_begin_heap(true, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + + MemoryContextSwitchTo(oldcontext); + + slotstats = pgstat_fetch_replslot(&nstats); + for (i = 0; i < nstats; i++) + { + Datum values[PG_STAT_GET_REPLICATION_SLOT_COLS]; + bool nulls[PG_STAT_GET_REPLICATION_SLOT_COLS]; + PgStat_ReplSlotStats *s = &(slotstats[i]); + + MemSet(values, 0, sizeof(values)); + MemSet(nulls, 0, sizeof(nulls)); + + values[0] = PointerGetDatum(cstring_to_text(s->slotname)); + values[1] = Int64GetDatum(s->spill_txns); + values[2] = Int64GetDatum(s->spill_count); + values[3] = Int64GetDatum(s->spill_bytes); + + if (s->stat_reset_timestamp == 0) + nulls[4] = true; + else + values[4] = TimestampTzGetDatum(s->stat_reset_timestamp); + + tuplestore_putvalues(tupstore, tupdesc, values, nulls); + } + + tuplestore_donestoring(tupstore); + + return (Datum) 0; +} diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 06cf16d9d7164..7e2b2e3dd6468 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -1663,7 +1663,7 @@ RI_PartitionRemove_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel) appendStringInfo(&querybuf, ") WHERE %s AND (", constraintDef); else - appendStringInfo(&querybuf, ") WHERE ("); + appendStringInfoString(&querybuf, ") WHERE ("); sep = ""; for (i = 0; i < riinfo->nkeys; i++) diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 60dd80c23c87d..6c656586e8578 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -5250,7 +5250,7 @@ get_select_query_def(Query *query, deparse_context *context, appendContextKeyword(context, " FETCH FIRST ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); get_rule_expr(query->limitCount, context, false); - appendStringInfo(buf, " ROWS WITH TIES"); + appendStringInfoString(buf, " ROWS WITH TIES"); } else { @@ -8113,7 +8113,7 @@ get_rule_expr(Node *node, deparse_context *context, { BoolExpr *expr = (BoolExpr *) node; Node *first_arg = linitial(expr->args); - ListCell *arg = list_second_cell(expr->args); + ListCell *arg; switch (expr->boolop) { @@ -8122,12 +8122,11 @@ get_rule_expr(Node *node, deparse_context *context, appendStringInfoChar(buf, '('); get_rule_expr_paren(first_arg, context, false, node); - while (arg) + for_each_from(arg, expr->args, 1) { appendStringInfoString(buf, " AND "); get_rule_expr_paren((Node *) lfirst(arg), context, false, node); - arg = lnext(expr->args, arg); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); @@ -8138,12 +8137,11 @@ get_rule_expr(Node *node, deparse_context *context, appendStringInfoChar(buf, '('); get_rule_expr_paren(first_arg, context, false, node); - while (arg) + for_each_from(arg, expr->args, 1) { appendStringInfoString(buf, " OR "); get_rule_expr_paren((Node *) lfirst(arg), context, false, node); - arg = lnext(expr->args, arg); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); @@ -8192,7 +8190,12 @@ get_rule_expr(Node *node, deparse_context *context, AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; ListCell *lc; - /* As above, this can only happen during EXPLAIN */ + /* + * This case cannot be reached in normal usage, since no + * AlternativeSubPlan can appear either in parsetrees or + * finished plan trees. We keep it just in case somebody + * wants to use this code to print planner data structures. + */ appendStringInfoString(buf, "(alternatives: "); foreach(lc, asplan->subplans) { @@ -9198,35 +9201,14 @@ get_oper_expr(OpExpr *expr, deparse_context *context) } else { - /* unary operator --- but which side? */ + /* prefix operator */ Node *arg = (Node *) linitial(args); - HeapTuple tp; - Form_pg_operator optup; - - tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for operator %u", opno); - optup = (Form_pg_operator) GETSTRUCT(tp); - switch (optup->oprkind) - { - case 'l': - appendStringInfo(buf, "%s ", - generate_operator_name(opno, - InvalidOid, - exprType(arg))); - get_rule_expr_paren(arg, context, true, (Node *) expr); - break; - case 'r': - get_rule_expr_paren(arg, context, true, (Node *) expr); - appendStringInfo(buf, " %s", - generate_operator_name(opno, - exprType(arg), - InvalidOid)); - break; - default: - elog(ERROR, "bogus oprkind: %d", optup->oprkind); - } - ReleaseSysCache(tp); + + appendStringInfo(buf, "%s ", + generate_operator_name(opno, + InvalidOid, + exprType(arg))); + get_rule_expr_paren(arg, context, true, (Node *) expr); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); @@ -11087,10 +11069,6 @@ generate_operator_name(Oid operid, Oid arg1, Oid arg2) p_result = left_oper(NULL, list_make1(makeString(oprname)), arg2, true, -1); break; - case 'r': - p_result = right_oper(NULL, list_make1(makeString(oprname)), arg1, - true, -1); - break; default: elog(ERROR, "unrecognized oprkind: %d", operform->oprkind); p_result = NULL; /* keep compiler quiet */ @@ -11384,7 +11362,7 @@ get_range_partbound_string(List *bound_datums) memset(&context, 0, sizeof(deparse_context)); context.buf = buf; - appendStringInfoString(buf, "("); + appendStringInfoChar(buf, '('); sep = ""; foreach(cell, bound_datums) { diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 00c7afc66fc27..bec357fcef042 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -3519,7 +3519,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, * for remaining Vars on other rels. */ relvarinfos = lappend(relvarinfos, varinfo1); - for_each_cell(l, varinfos, list_second_cell(varinfos)) + for_each_from(l, varinfos, 1) { GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l); diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index 5fe304cea7530..ea0ada704f207 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -556,17 +556,21 @@ make_timestamp_internal(int year, int month, int day, TimeOffset date; TimeOffset time; int dterr; + bool bc = false; Timestamp result; tm.tm_year = year; tm.tm_mon = month; tm.tm_mday = day; - /* - * Note: we'll reject zero or negative year values. Perhaps negatives - * should be allowed to represent BC years? - */ - dterr = ValidateDate(DTK_DATE_M, false, false, false, &tm); + /* Handle negative years as BC */ + if (tm.tm_year < 0) + { + bc = true; + tm.tm_year = -tm.tm_year; + } + + dterr = ValidateDate(DTK_DATE_M, false, false, bc, &tm); if (dterr != 0) ereport(ERROR, @@ -2152,16 +2156,34 @@ timestamp_hash_extended(PG_FUNCTION_ARGS) * Cross-type comparison functions for timestamp vs timestamptz */ +int32 +timestamp_cmp_timestamptz_internal(Timestamp timestampVal, TimestampTz dt2) +{ + TimestampTz dt1; + int overflow; + + dt1 = timestamp2timestamptz_opt_overflow(timestampVal, &overflow); + if (overflow > 0) + { + /* dt1 is larger than any finite timestamp, but less than infinity */ + return TIMESTAMP_IS_NOEND(dt2) ? -1 : +1; + } + if (overflow < 0) + { + /* dt1 is less than any finite timestamp, but more than -infinity */ + return TIMESTAMP_IS_NOBEGIN(dt2) ? +1 : -1; + } + + return timestamptz_cmp_internal(dt1, dt2); +} + Datum timestamp_eq_timestamptz(PG_FUNCTION_ARGS) { Timestamp timestampVal = PG_GETARG_TIMESTAMP(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - - dt1 = timestamp2timestamptz(timestampVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) == 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt2) == 0); } Datum @@ -2169,11 +2191,8 @@ timestamp_ne_timestamptz(PG_FUNCTION_ARGS) { Timestamp timestampVal = PG_GETARG_TIMESTAMP(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - - dt1 = timestamp2timestamptz(timestampVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) != 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt2) != 0); } Datum @@ -2181,11 +2200,8 @@ timestamp_lt_timestamptz(PG_FUNCTION_ARGS) { Timestamp timestampVal = PG_GETARG_TIMESTAMP(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - dt1 = timestamp2timestamptz(timestampVal); - - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) < 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt2) < 0); } Datum @@ -2193,11 +2209,8 @@ timestamp_gt_timestamptz(PG_FUNCTION_ARGS) { Timestamp timestampVal = PG_GETARG_TIMESTAMP(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - - dt1 = timestamp2timestamptz(timestampVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) > 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt2) > 0); } Datum @@ -2205,11 +2218,8 @@ timestamp_le_timestamptz(PG_FUNCTION_ARGS) { Timestamp timestampVal = PG_GETARG_TIMESTAMP(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - - dt1 = timestamp2timestamptz(timestampVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) <= 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt2) <= 0); } Datum @@ -2217,11 +2227,8 @@ timestamp_ge_timestamptz(PG_FUNCTION_ARGS) { Timestamp timestampVal = PG_GETARG_TIMESTAMP(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - dt1 = timestamp2timestamptz(timestampVal); - - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) >= 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt2) >= 0); } Datum @@ -2229,11 +2236,8 @@ timestamp_cmp_timestamptz(PG_FUNCTION_ARGS) { Timestamp timestampVal = PG_GETARG_TIMESTAMP(0); TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1); - TimestampTz dt1; - dt1 = timestamp2timestamptz(timestampVal); - - PG_RETURN_INT32(timestamp_cmp_internal(dt1, dt2)); + PG_RETURN_INT32(timestamp_cmp_timestamptz_internal(timestampVal, dt2)); } Datum @@ -2241,11 +2245,8 @@ timestamptz_eq_timestamp(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); Timestamp timestampVal = PG_GETARG_TIMESTAMP(1); - TimestampTz dt2; - dt2 = timestamp2timestamptz(timestampVal); - - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) == 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt1) == 0); } Datum @@ -2253,11 +2254,8 @@ timestamptz_ne_timestamp(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); Timestamp timestampVal = PG_GETARG_TIMESTAMP(1); - TimestampTz dt2; - dt2 = timestamp2timestamptz(timestampVal); - - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) != 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt1) != 0); } Datum @@ -2265,11 +2263,8 @@ timestamptz_lt_timestamp(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); Timestamp timestampVal = PG_GETARG_TIMESTAMP(1); - TimestampTz dt2; - - dt2 = timestamp2timestamptz(timestampVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) < 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt1) > 0); } Datum @@ -2277,11 +2272,8 @@ timestamptz_gt_timestamp(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); Timestamp timestampVal = PG_GETARG_TIMESTAMP(1); - TimestampTz dt2; - - dt2 = timestamp2timestamptz(timestampVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) > 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt1) < 0); } Datum @@ -2289,11 +2281,8 @@ timestamptz_le_timestamp(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); Timestamp timestampVal = PG_GETARG_TIMESTAMP(1); - TimestampTz dt2; - - dt2 = timestamp2timestamptz(timestampVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) <= 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt1) >= 0); } Datum @@ -2301,11 +2290,8 @@ timestamptz_ge_timestamp(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); Timestamp timestampVal = PG_GETARG_TIMESTAMP(1); - TimestampTz dt2; - - dt2 = timestamp2timestamptz(timestampVal); - PG_RETURN_BOOL(timestamp_cmp_internal(dt1, dt2) >= 0); + PG_RETURN_BOOL(timestamp_cmp_timestamptz_internal(timestampVal, dt1) <= 0); } Datum @@ -2313,11 +2299,8 @@ timestamptz_cmp_timestamp(PG_FUNCTION_ARGS) { TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0); Timestamp timestampVal = PG_GETARG_TIMESTAMP(1); - TimestampTz dt2; - dt2 = timestamp2timestamptz(timestampVal); - - PG_RETURN_INT32(timestamp_cmp_internal(dt1, dt2)); + PG_RETURN_INT32(-timestamp_cmp_timestamptz_internal(timestampVal, dt1)); } @@ -5174,9 +5157,12 @@ timestamp_timestamptz(PG_FUNCTION_ARGS) /* * Convert timestamp to timestamp with time zone. * - * On overflow error is thrown if 'overflow' is NULL. Otherwise, '*overflow' - * is set to -1 (+1) when result value exceed lower (upper) boundary and zero - * returned. + * On successful conversion, *overflow is set to zero if it's not NULL. + * + * If the timestamp is finite but out of the valid range for timestamptz, then: + * if overflow is NULL, we throw an out-of-range error. + * if overflow is not NULL, we store +1 or -1 there to indicate the sign + * of the overflow, and return the appropriate timestamptz infinity. */ TimestampTz timestamp2timestamptz_opt_overflow(Timestamp timestamp, int *overflow) @@ -5187,10 +5173,14 @@ timestamp2timestamptz_opt_overflow(Timestamp timestamp, int *overflow) fsec_t fsec; int tz; + if (overflow) + *overflow = 0; + if (TIMESTAMP_NOT_FINITE(timestamp)) return timestamp; - if (!timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL)) + /* We don't expect this to fail, but check it pro forma */ + if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) == 0) { tz = DetermineTimeZoneOffset(tm, session_timezone); @@ -5203,13 +5193,16 @@ timestamp2timestamptz_opt_overflow(Timestamp timestamp, int *overflow) else if (overflow) { if (result < MIN_TIMESTAMP) + { *overflow = -1; + TIMESTAMP_NOBEGIN(result); + } else { - Assert(result >= END_TIMESTAMP); *overflow = 1; + TIMESTAMP_NOEND(result); } - return (TimestampTz) 0; + return result; } } @@ -5221,7 +5214,7 @@ timestamp2timestamptz_opt_overflow(Timestamp timestamp, int *overflow) } /* - * Single-argument version of timestamp2timestamptz_opt_overflow(). + * Promote timestamp to timestamptz, throwing error for overflow. */ static TimestampTz timestamp2timestamptz(Timestamp timestamp) diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 96ecad02ddb19..9061af81a3e3a 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -1243,14 +1243,6 @@ RelationBuildDesc(Oid targetRelId, bool insertIt) if (insertIt) RelationCacheInsert(relation, true); - /* - * For RelationNeedsWAL() to answer correctly on parallel workers, restore - * rd_firstRelfilenodeSubid. No subtransactions start or end while in - * parallel mode, so the specific SubTransactionId does not matter. - */ - if (IsParallelWorker() && RelFileNodeSkippingWAL(relation->rd_node)) - relation->rd_firstRelfilenodeSubid = TopSubTransactionId; - /* It's fully valid */ relation->rd_isvalid = true; @@ -1273,6 +1265,8 @@ RelationBuildDesc(Oid targetRelId, bool insertIt) static void RelationInitPhysicalAddr(Relation relation) { + Oid oldnode = relation->rd_node.relNode; + /* these relations kinds never have storage */ if (!RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) return; @@ -1330,6 +1324,19 @@ RelationInitPhysicalAddr(Relation relation) elog(ERROR, "could not find relation mapping for relation \"%s\", OID %u", RelationGetRelationName(relation), relation->rd_id); } + + /* + * For RelationNeedsWAL() to answer correctly on parallel workers, restore + * rd_firstRelfilenodeSubid. No subtransactions start or end while in + * parallel mode, so the specific SubTransactionId does not matter. + */ + if (IsParallelWorker() && oldnode != relation->rd_node.relNode) + { + if (RelFileNodeSkippingWAL(relation->rd_node)) + relation->rd_firstRelfilenodeSubid = TopSubTransactionId; + else + relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId; + } } /* diff --git a/src/backend/utils/error/assert.c b/src/backend/utils/error/assert.c index 4dfa3269238f0..a8c0a8ec487c9 100644 --- a/src/backend/utils/error/assert.c +++ b/src/backend/utils/error/assert.c @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * assert.c - * Assert code. + * Assert support code. * * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -10,9 +10,6 @@ * IDENTIFICATION * src/backend/utils/error/assert.c * - * NOTE - * This should eventually work with elog() - * *------------------------------------------------------------------------- */ #include "postgres.h" @@ -24,6 +21,10 @@ /* * ExceptionalCondition - Handles the failure of an Assert() + * + * We intentionally do not go through elog() here, on the grounds of + * wanting to minimize the amount of infrastructure that has to be + * working to report an assertion failure. */ void ExceptionalCondition(const char *conditionName, @@ -31,20 +32,21 @@ ExceptionalCondition(const char *conditionName, const char *fileName, int lineNumber) { + /* Report the failure on stderr (or local equivalent) */ if (!PointerIsValid(conditionName) || !PointerIsValid(fileName) || !PointerIsValid(errorType)) - write_stderr("TRAP: ExceptionalCondition: bad arguments\n"); + write_stderr("TRAP: ExceptionalCondition: bad arguments in PID %d\n", + (int) getpid()); else - { - write_stderr("TRAP: %s(\"%s\", File: \"%s\", Line: %d)\n", + write_stderr("TRAP: %s(\"%s\", File: \"%s\", Line: %d, PID: %d)\n", errorType, conditionName, - fileName, lineNumber); - } + fileName, lineNumber, (int) getpid()); /* Usually this shouldn't be needed, but make sure the msg went out */ fflush(stderr); + /* If we have support for it, dump a simple backtrace */ #ifdef HAVE_BACKTRACE_SYMBOLS { void *buf[100]; @@ -55,12 +57,12 @@ ExceptionalCondition(const char *conditionName, } #endif -#ifdef SLEEP_ON_ASSERT - /* - * It would be nice to use pg_usleep() here, but only does 2000 sec or 33 - * minutes, which seems too short. + * If configured to do so, sleep indefinitely to allow user to attach a + * debugger. It would be nice to use pg_usleep() here, but that can sleep + * at most 2G usec or ~33 minutes, which seems too short. */ +#ifdef SLEEP_ON_ASSERT sleep(1000000); #endif diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index d0b368530e7e3..1ba47c194b2f7 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -711,10 +711,7 @@ errcode_for_socket_access(void) switch (edata->saved_errno) { /* Loss of connection */ - case EPIPE: -#ifdef ECONNRESET - case ECONNRESET: -#endif + case ALL_CONNECTION_FAILURE_ERRNOS: edata->sqlerrcode = ERRCODE_CONNECTION_FAILURE; break; diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index 03c614b234a33..2681b7fbc6035 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -2000,7 +2000,7 @@ get_fn_opclass_options(FmgrInfo *flinfo) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("opclass options info is absent in function call context"))); + errmsg("operator class options info is absent in function call context"))); return NULL; } diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c index 78ed85720385c..b9efa7729152e 100644 --- a/src/backend/utils/fmgr/funcapi.c +++ b/src/backend/utils/fmgr/funcapi.c @@ -1233,7 +1233,8 @@ get_func_trftypes(HeapTuple procTup, * are set to NULL. You don't get anything if proargnames is NULL. */ int -get_func_input_arg_names(Datum proargnames, Datum proargmodes, +get_func_input_arg_names(char prokind, + Datum proargnames, Datum proargmodes, char ***arg_names) { ArrayType *arr; @@ -1291,6 +1292,7 @@ get_func_input_arg_names(Datum proargnames, Datum proargmodes, if (argmodes == NULL || argmodes[i] == PROARGMODE_IN || argmodes[i] == PROARGMODE_INOUT || + (argmodes[i] == PROARGMODE_OUT && prokind == PROKIND_PROCEDURE) || argmodes[i] == PROARGMODE_VARIADIC) { char *pname = TextDatumGetCString(argnames[i]); diff --git a/src/backend/utils/generate-errcodes.pl b/src/backend/utils/generate-errcodes.pl index 868a163578d7d..1a071fbb1f43c 100644 --- a/src/backend/utils/generate-errcodes.pl +++ b/src/backend/utils/generate-errcodes.pl @@ -3,8 +3,8 @@ # Generate the errcodes.h header from errcodes.txt # Copyright (c) 2000-2020, PostgreSQL Global Development Group -use warnings; use strict; +use warnings; print "/* autogenerated from src/backend/utils/errcodes.txt, do not edit */\n"; diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index f4fbccdd7e444..d14d875c9341d 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -122,7 +122,6 @@ #define DEF_SEGSIZE 256 #define DEF_SEGSIZE_SHIFT 8 /* must be log2(DEF_SEGSIZE) */ #define DEF_DIRSIZE 256 -#define DEF_FFACTOR 1 /* default fill factor */ /* Number of freelists to be used for a partitioned hash table. */ #define NUM_FREELISTS 32 @@ -191,7 +190,6 @@ struct HASHHDR Size keysize; /* hash key length in bytes */ Size entrysize; /* total user element size in bytes */ long num_partitions; /* # partitions (must be power of 2), or 0 */ - long ffactor; /* target fill factor */ long max_dsize; /* 'dsize' limit if directory is fixed size */ long ssize; /* segment size --- must be power of 2 */ int sshift; /* segment shift = log2(ssize) */ @@ -497,8 +495,6 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags) /* ssize had better be a power of 2 */ Assert(hctl->ssize == (1L << hctl->sshift)); } - if (flags & HASH_FFACTOR) - hctl->ffactor = info->ffactor; /* * SHM hash tables have fixed directory size passed by the caller. @@ -603,8 +599,6 @@ hdefault(HTAB *hashp) hctl->num_partitions = 0; /* not partitioned */ - hctl->ffactor = DEF_FFACTOR; - /* table has no fixed maximum size */ hctl->max_dsize = NO_MAX_DSIZE; @@ -670,11 +664,10 @@ init_htab(HTAB *hashp, long nelem) SpinLockInit(&(hctl->freeList[i].mutex)); /* - * Divide number of elements by the fill factor to determine a desired - * number of buckets. Allocate space for the next greater power of two - * number of buckets + * Allocate space for the next greater power of two number of buckets, + * assuming a desired maximum load factor of 1. */ - nbuckets = next_pow2_int((nelem - 1) / hctl->ffactor + 1); + nbuckets = next_pow2_int(nelem); /* * In a partitioned table, nbuckets must be at least equal to @@ -733,7 +726,6 @@ init_htab(HTAB *hashp, long nelem) "DIRECTORY SIZE ", hctl->dsize, "SEGMENT SIZE ", hctl->ssize, "SEGMENT SHIFT ", hctl->sshift, - "FILL FACTOR ", hctl->ffactor, "MAX BUCKET ", hctl->max_bucket, "HIGH MASK ", hctl->high_mask, "LOW MASK ", hctl->low_mask, @@ -761,7 +753,7 @@ hash_estimate_size(long num_entries, Size entrysize) elementAllocCnt; /* estimate number of buckets wanted */ - nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1); + nBuckets = next_pow2_long(num_entries); /* # of segments needed for nBuckets */ nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1); /* directory entries */ @@ -804,7 +796,7 @@ hash_select_dirsize(long num_entries) nDirEntries; /* estimate number of buckets wanted */ - nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1); + nBuckets = next_pow2_long(num_entries); /* # of segments needed for nBuckets */ nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1); /* directory entries */ @@ -971,11 +963,10 @@ hash_search_with_hash_value(HTAB *hashp, { /* * Can't split if running in partitioned mode, nor if frozen, nor if - * table is the subject of any active hash_seq_search scans. Strange - * order of these tests is to try to check cheaper conditions first. + * table is the subject of any active hash_seq_search scans. */ - if (!IS_PARTITIONED(hctl) && !hashp->frozen && - hctl->freeList[0].nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor && + if (hctl->freeList[0].nentries > (long) hctl->max_bucket && + !IS_PARTITIONED(hctl) && !hashp->frozen && !has_seq_scans(hashp)) (void) expand_table(hashp); } diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index cf8f9579c345f..ed2ab4b5b29a8 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -32,10 +32,12 @@ #include "catalog/pg_authid.h" #include "common/file_perm.h" #include "libpq/libpq.h" +#include "libpq/pqsignal.h" #include "mb/pg_wchar.h" #include "miscadmin.h" #include "pgstat.h" #include "postmaster/autovacuum.h" +#include "postmaster/interrupt.h" #include "postmaster/postmaster.h" #include "storage/fd.h" #include "storage/ipc.h" @@ -133,6 +135,23 @@ InitPostmasterChild(void) elog(FATAL, "setsid() failed: %m"); #endif + /* In EXEC_BACKEND case we will not have inherited BlockSig etc values */ +#ifdef EXEC_BACKEND + pqinitmask(); +#endif + + /* + * Every postmaster child process is expected to respond promptly to + * SIGQUIT at all times. Therefore we centrally remove SIGQUIT from + * BlockSig and install a suitable signal handler. (Client-facing + * processes may choose to replace this default choice of handler with + * quickdie().) All other blockable signals remain blocked for now. + */ + pqsignal(SIGQUIT, SignalHandlerForCrashExit); + + sigdelset(&BlockSig, SIGQUIT); + PG_SETMASK(&BlockSig); + /* Request a signal if the postmaster dies, if possible. */ PostmasterDeathSignalInit(); } @@ -155,6 +174,13 @@ InitStandaloneProcess(const char *argv0) InitLatch(MyLatch); InitializeLatchWaitSet(); + /* + * For consistency with InitPostmasterChild, initialize signal mask here. + * But we don't unblock SIGQUIT or provide a default handler for it. + */ + pqinitmask(); + PG_SETMASK(&BlockSig); + /* Compute paths, no postmaster to inherit from */ if (my_exec_path[0] == '\0') { diff --git a/src/backend/utils/misc/guc-file.l b/src/backend/utils/misc/guc-file.l index 268b745528383..c98e2202951ce 100644 --- a/src/backend/utils/misc/guc-file.l +++ b/src/backend/utils/misc/guc-file.l @@ -55,7 +55,6 @@ static void record_config_file_error(const char *errmsg, ConfigVariable **tail_p); static int GUC_flex_fatal(const char *msg); -static char *GUC_scanstr(const char *s); /* LCOV_EXCL_START */ @@ -797,7 +796,7 @@ ParseConfigFp(FILE *fp, const char *config_file, int depth, int elevel, token != GUC_UNQUOTED_STRING) goto parse_error; if (token == GUC_STRING) /* strip quotes and escapes */ - opt_value = GUC_scanstr(yytext); + opt_value = DeescapeQuotedString(yytext); else opt_value = pstrdup(yytext); @@ -1132,22 +1131,25 @@ FreeConfigVariable(ConfigVariable *item) /* - * scanstr + * DeescapeQuotedString * * Strip the quotes surrounding the given string, and collapse any embedded * '' sequences and backslash escapes. * - * the string returned is palloc'd and should eventually be pfree'd by the + * The string returned is palloc'd and should eventually be pfree'd by the * caller. + * + * This is exported because it is also used by the bootstrap scanner. */ -static char * -GUC_scanstr(const char *s) +char * +DeescapeQuotedString(const char *s) { char *newStr; int len, i, j; + /* We just Assert that there are leading and trailing quotes */ Assert(s != NULL && s[0] == '\''); len = strlen(s); Assert(len >= 2); diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index de87ad6ef7028..a62d64eaa4794 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -2837,7 +2837,7 @@ static struct config_int ConfigureNamesInt[] = gettext_noop("Sets the minimum execution time above which " "a sample of statements will be logged." " Sampling is determined by log_statement_sample_rate."), - gettext_noop("Zero log a sample of all queries. -1 turns this feature off."), + gettext_noop("Zero logs a sample of all queries. -1 turns this feature off."), GUC_UNIT_MS }, &log_min_duration_sample, @@ -3140,7 +3140,7 @@ static struct config_int ConfigureNamesInt[] = }, { {"autovacuum_vacuum_insert_threshold", PGC_SIGHUP, AUTOVACUUM, - gettext_noop("Minimum number of tuple inserts prior to vacuum, or -1 to disable insert vacuums"), + gettext_noop("Minimum number of tuple inserts prior to vacuum, or -1 to disable insert vacuums."), NULL }, &autovacuum_vac_ins_thresh, @@ -3700,7 +3700,7 @@ static struct config_string ConfigureNamesString[] = { {"restore_command", PGC_POSTMASTER, WAL_ARCHIVE_RECOVERY, - gettext_noop("Sets the shell command that will retrieve an archived WAL file."), + gettext_noop("Sets the shell command that will be called to retrieve an archived WAL file."), NULL }, &recoveryRestoreCommand, @@ -7222,6 +7222,10 @@ set_config_option(const char *name, const char *value, if (prohibitValueChange) { + /* Release newextra, unless it's reset_extra */ + if (newextra && !extra_field_used(&conf->gen, newextra)) + free(newextra); + if (*conf->variable != newval) { record->status |= GUC_PENDING_RESTART; @@ -7312,6 +7316,10 @@ set_config_option(const char *name, const char *value, if (prohibitValueChange) { + /* Release newextra, unless it's reset_extra */ + if (newextra && !extra_field_used(&conf->gen, newextra)) + free(newextra); + if (*conf->variable != newval) { record->status |= GUC_PENDING_RESTART; @@ -7402,6 +7410,10 @@ set_config_option(const char *name, const char *value, if (prohibitValueChange) { + /* Release newextra, unless it's reset_extra */ + if (newextra && !extra_field_used(&conf->gen, newextra)) + free(newextra); + if (*conf->variable != newval) { record->status |= GUC_PENDING_RESTART; @@ -7508,9 +7520,21 @@ set_config_option(const char *name, const char *value, if (prohibitValueChange) { + bool newval_different; + /* newval shouldn't be NULL, so we're a bit sloppy here */ - if (*conf->variable == NULL || newval == NULL || - strcmp(*conf->variable, newval) != 0) + newval_different = (*conf->variable == NULL || + newval == NULL || + strcmp(*conf->variable, newval) != 0); + + /* Release newval, unless it's reset_val */ + if (newval && !string_field_used(conf, newval)) + free(newval); + /* Release newextra, unless it's reset_extra */ + if (newextra && !extra_field_used(&conf->gen, newextra)) + free(newextra); + + if (newval_different) { record->status |= GUC_PENDING_RESTART; ereport(elevel, @@ -7605,6 +7629,10 @@ set_config_option(const char *name, const char *value, if (prohibitValueChange) { + /* Release newextra, unless it's reset_extra */ + if (newextra && !extra_field_used(&conf->gen, newextra)) + free(newextra); + if (*conf->variable != newval) { record->status |= GUC_PENDING_RESTART; diff --git a/src/backend/utils/sort/gen_qsort_tuple.pl b/src/backend/utils/sort/gen_qsort_tuple.pl index eb0f7c5814f4b..4c305806c7ce3 100644 --- a/src/backend/utils/sort/gen_qsort_tuple.pl +++ b/src/backend/utils/sort/gen_qsort_tuple.pl @@ -115,7 +115,8 @@ sub emit_qsort_boilerplate { do { - SortTuple t = *a; + SortTuple t = *a; + *a++ = *b; *b++ = t; } while (--n > 0); @@ -143,9 +144,9 @@ sub emit_qsort_implementation { return cmp_$SUFFIX(a, b$CMPPARAMS) < 0 ? (cmp_$SUFFIX(b, c$CMPPARAMS) < 0 ? b : - (cmp_$SUFFIX(a, c$CMPPARAMS) < 0 ? c : a)) + (cmp_$SUFFIX(a, c$CMPPARAMS) < 0 ? c : a)) : (cmp_$SUFFIX(b, c$CMPPARAMS) > 0 ? b : - (cmp_$SUFFIX(a, c$CMPPARAMS) < 0 ? a : c)); + (cmp_$SUFFIX(a, c$CMPPARAMS) < 0 ? a : c)); } static void diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c index bbb01f6d33739..28905124f965a 100644 --- a/src/backend/utils/sort/logtape.c +++ b/src/backend/utils/sort/logtape.c @@ -212,6 +212,7 @@ struct LogicalTapeSet long *freeBlocks; /* resizable array holding minheap */ long nFreeBlocks; /* # of currently free blocks */ Size freeBlocksLen; /* current allocated length of freeBlocks[] */ + bool enable_prealloc; /* preallocate write blocks? */ /* The array of logical tapes. */ int nTapes; /* # of logical tapes in set */ @@ -220,6 +221,7 @@ struct LogicalTapeSet static void ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer); static void ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer); +static long ltsGetBlock(LogicalTapeSet *lts, LogicalTape *lt); static long ltsGetFreeBlock(LogicalTapeSet *lts); static long ltsGetPreallocBlock(LogicalTapeSet *lts, LogicalTape *lt); static void ltsReleaseBlock(LogicalTapeSet *lts, long blocknum); @@ -242,12 +244,8 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer) * that's past the current end of file, fill the space between the current * end of file and the target block with zeros. * - * This should happen rarely, otherwise you are not writing very - * sequentially. In current use, this only happens when the sort ends - * writing a run, and switches to another tape. The last block of the - * previous tape isn't flushed to disk until the end of the sort, so you - * get one-block hole, where the last block of the previous tape will - * later go. + * This can happen either when tapes preallocate blocks; or for the last + * block of a tape which might not have been flushed. * * Note that BufFile concatenation can leave "holes" in BufFile between * worker-owned block ranges. These are tracked for reporting purposes @@ -373,8 +371,20 @@ parent_offset(unsigned long i) } /* - * Select the lowest currently unused block by taking the first element from - * the freelist min heap. + * Get the next block for writing. + */ +static long +ltsGetBlock(LogicalTapeSet *lts, LogicalTape *lt) +{ + if (lts->enable_prealloc) + return ltsGetPreallocBlock(lts, lt); + else + return ltsGetFreeBlock(lts); +} + +/* + * Select the lowest currently unused block from the tape set's global free + * list min heap. */ static long ltsGetFreeBlock(LogicalTapeSet *lts) @@ -430,7 +440,8 @@ ltsGetFreeBlock(LogicalTapeSet *lts) /* * Return the lowest free block number from the tape's preallocation list. - * Refill the preallocation list if necessary. + * Refill the preallocation list with blocks from the tape set's free list if + * necessary. */ static long ltsGetPreallocBlock(LogicalTapeSet *lts, LogicalTape *lt) @@ -671,8 +682,8 @@ ltsInitReadBuffer(LogicalTapeSet *lts, LogicalTape *lt) * infrastructure that may be lifted in the future. */ LogicalTapeSet * -LogicalTapeSetCreate(int ntapes, TapeShare *shared, SharedFileSet *fileset, - int worker) +LogicalTapeSetCreate(int ntapes, bool preallocate, TapeShare *shared, + SharedFileSet *fileset, int worker) { LogicalTapeSet *lts; int i; @@ -689,6 +700,7 @@ LogicalTapeSetCreate(int ntapes, TapeShare *shared, SharedFileSet *fileset, lts->freeBlocksLen = 32; /* reasonable initial guess */ lts->freeBlocks = (long *) palloc(lts->freeBlocksLen * sizeof(long)); lts->nFreeBlocks = 0; + lts->enable_prealloc = preallocate; lts->nTapes = ntapes; lts->tapes = (LogicalTape *) palloc(ntapes * sizeof(LogicalTape)); @@ -782,7 +794,7 @@ LogicalTapeWrite(LogicalTapeSet *lts, int tapenum, Assert(lt->firstBlockNumber == -1); Assert(lt->pos == 0); - lt->curBlockNumber = ltsGetPreallocBlock(lts, lt); + lt->curBlockNumber = ltsGetBlock(lts, lt); lt->firstBlockNumber = lt->curBlockNumber; TapeBlockGetTrailer(lt->buffer)->prev = -1L; @@ -806,7 +818,7 @@ LogicalTapeWrite(LogicalTapeSet *lts, int tapenum, * First allocate the next block, so that we can store it in the * 'next' pointer of this block. */ - nextBlockNumber = ltsGetPreallocBlock(lts, lt); + nextBlockNumber = ltsGetBlock(lts, lt); /* set the next-pointer and dump the current block. */ TapeBlockGetTrailer(lt->buffer)->next = nextBlockNumber; @@ -1252,9 +1264,19 @@ LogicalTapeTell(LogicalTapeSet *lts, int tapenum, /* * Obtain total disk space currently used by a LogicalTapeSet, in blocks. + * + * This should not be called while there are open write buffers; otherwise it + * may not account for buffered data. */ long LogicalTapeSetBlocks(LogicalTapeSet *lts) { - return lts->nBlocksAllocated - lts->nHoleBlocks; +#ifdef USE_ASSERT_CHECKING + for (int i = 0; i < lts->nTapes; i++) + { + LogicalTape *lt = <s->tapes[i]; + Assert(!lt->writing || lt->buffer == NULL); + } +#endif + return lts->nBlocksWritten - lts->nHoleBlocks; } diff --git a/src/backend/utils/sort/sharedtuplestore.c b/src/backend/utils/sort/sharedtuplestore.c index b83fb50dac8f3..fe298ce92ed5c 100644 --- a/src/backend/utils/sort/sharedtuplestore.c +++ b/src/backend/utils/sort/sharedtuplestore.c @@ -566,7 +566,7 @@ sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, void *meta_data) if (BufFileSeekBlock(accessor->read_file, read_page) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not seek block %u in shared tuplestore temporary file", + errmsg("could not seek to block %u in shared tuplestore temporary file", read_page))); nread = BufFileRead(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE); diff --git a/src/backend/utils/sort/sortsupport.c b/src/backend/utils/sort/sortsupport.c index fcfe6e831a19b..c436fbb4ce1e8 100644 --- a/src/backend/utils/sort/sortsupport.c +++ b/src/backend/utils/sort/sortsupport.c @@ -15,6 +15,7 @@ #include "postgres.h" +#include "access/gist.h" #include "access/nbtree.h" #include "catalog/pg_am.h" #include "fmgr.h" @@ -175,3 +176,36 @@ PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy, FinishSortSupportFunction(opfamily, opcintype, ssup); } + +/* + * Fill in SortSupport given a GiST index relation + * + * Caller must previously have zeroed the SortSupportData structure and then + * filled in ssup_cxt, ssup_attno, ssup_collation, and ssup_nulls_first. This + * will fill in ssup_reverse (always false for GiST index build), as well as + * the comparator function pointer. + */ +void +PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup) +{ + Oid opfamily = indexRel->rd_opfamily[ssup->ssup_attno - 1]; + Oid opcintype = indexRel->rd_opcintype[ssup->ssup_attno - 1]; + Oid sortSupportFunction; + + Assert(ssup->comparator == NULL); + + if (indexRel->rd_rel->relam != GIST_AM_OID) + elog(ERROR, "unexpected non-gist AM: %u", indexRel->rd_rel->relam); + ssup->ssup_reverse = false; + + /* + * Look up the sort support function. This is simpler than for B-tree + * indexes because we don't support the old-style btree comparators. + */ + sortSupportFunction = get_opfamily_proc(opfamily, opcintype, opcintype, + GIST_SORTSUPPORT_PROC); + if (!OidIsValid(sortSupportFunction)) + elog(ERROR, "missing support function %d(%u,%u) in opfamily %u", + GIST_SORTSUPPORT_PROC, opcintype, opcintype, opfamily); + OidFunctionCall1(sortSupportFunction, PointerGetDatum(ssup)); +} diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 3c49476483b1f..d0cc04a878a14 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -1167,6 +1167,63 @@ tuplesort_begin_index_hash(Relation heapRel, return state; } +Tuplesortstate * +tuplesort_begin_index_gist(Relation heapRel, + Relation indexRel, + int workMem, + SortCoordinate coordinate, + bool randomAccess) +{ + Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate, + randomAccess); + MemoryContext oldcontext; + int i; + + oldcontext = MemoryContextSwitchTo(state->sortcontext); + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, + "begin index sort: workMem = %d, randomAccess = %c", + workMem, randomAccess ? 't' : 'f'); +#endif + + state->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel); + + state->comparetup = comparetup_index_btree; + state->copytup = copytup_index; + state->writetup = writetup_index; + state->readtup = readtup_index; + + state->heapRel = heapRel; + state->indexRel = indexRel; + + /* Prepare SortSupport data for each column */ + state->sortKeys = (SortSupport) palloc0(state->nKeys * + sizeof(SortSupportData)); + + for (i = 0; i < state->nKeys; i++) + { + SortSupport sortKey = state->sortKeys + i; + + sortKey->ssup_cxt = CurrentMemoryContext; + sortKey->ssup_collation = indexRel->rd_indcollation[i]; + sortKey->ssup_nulls_first = false; + sortKey->ssup_attno = i + 1; + /* Convey if abbreviation optimization is applicable in principle */ + sortKey->abbreviate = (i == 0); + + AssertState(sortKey->ssup_attno != 0); + + /* Look for a sort support function */ + PrepareSortSupportFromGistIndexRel(indexRel, sortKey); + } + + MemoryContextSwitchTo(oldcontext); + + return state; +} + Tuplesortstate * tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, @@ -2591,7 +2648,7 @@ inittapes(Tuplesortstate *state, bool mergeruns) /* Create the tape set and allocate the per-tape data arrays */ inittapestate(state, maxTapes); state->tapeset = - LogicalTapeSetCreate(maxTapes, NULL, + LogicalTapeSetCreate(maxTapes, false, NULL, state->shared ? &state->shared->fileset : NULL, state->worker); @@ -4657,8 +4714,9 @@ leader_takeover_tapes(Tuplesortstate *state) * randomAccess is disallowed for parallel sorts. */ inittapestate(state, nParticipants + 1); - state->tapeset = LogicalTapeSetCreate(nParticipants + 1, shared->tapes, - &shared->fileset, state->worker); + state->tapeset = LogicalTapeSetCreate(nParticipants + 1, false, + shared->tapes, &shared->fileset, + state->worker); /* mergeruns() relies on currentRun for # of runs (in one-pass cases) */ state->currentRun = nParticipants; diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index 22cf3ebaf4728..8c41483e87c52 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -64,6 +64,7 @@ #include "storage/spin.h" #include "utils/builtins.h" #include "utils/memutils.h" +#include "utils/old_snapshot.h" #include "utils/rel.h" #include "utils/resowner_private.h" #include "utils/snapmgr.h" @@ -76,59 +77,7 @@ */ int old_snapshot_threshold; /* number of minutes, -1 disables */ -/* - * Structure for dealing with old_snapshot_threshold implementation. - */ -typedef struct OldSnapshotControlData -{ - /* - * Variables for old snapshot handling are shared among processes and are - * only allowed to move forward. - */ - slock_t mutex_current; /* protect current_timestamp */ - TimestampTz current_timestamp; /* latest snapshot timestamp */ - slock_t mutex_latest_xmin; /* protect latest_xmin and next_map_update */ - TransactionId latest_xmin; /* latest snapshot xmin */ - TimestampTz next_map_update; /* latest snapshot valid up to */ - slock_t mutex_threshold; /* protect threshold fields */ - TimestampTz threshold_timestamp; /* earlier snapshot is old */ - TransactionId threshold_xid; /* earlier xid may be gone */ - - /* - * Keep one xid per minute for old snapshot error handling. - * - * Use a circular buffer with a head offset, a count of entries currently - * used, and a timestamp corresponding to the xid at the head offset. A - * count_used value of zero means that there are no times stored; a - * count_used value of OLD_SNAPSHOT_TIME_MAP_ENTRIES means that the buffer - * is full and the head must be advanced to add new entries. Use - * timestamps aligned to minute boundaries, since that seems less - * surprising than aligning based on the first usage timestamp. The - * latest bucket is effectively stored within latest_xmin. The circular - * buffer is updated when we get a new xmin value that doesn't fall into - * the same interval. - * - * It is OK if the xid for a given time slot is from earlier than - * calculated by adding the number of minutes corresponding to the - * (possibly wrapped) distance from the head offset to the time of the - * head entry, since that just results in the vacuuming of old tuples - * being slightly less aggressive. It would not be OK for it to be off in - * the other direction, since it might result in vacuuming tuples that are - * still expected to be there. - * - * Use of an SLRU was considered but not chosen because it is more - * heavyweight than is needed for this, and would probably not be any less - * code to implement. - * - * Persistence is not needed. - */ - int head_offset; /* subscript of oldest tracked time */ - TimestampTz head_timestamp; /* time corresponding to head xid */ - int count_used; /* how many slots are in use */ - TransactionId xid_by_minute[FLEXIBLE_ARRAY_MEMBER]; -} OldSnapshotControlData; - -static volatile OldSnapshotControlData *oldSnapshotControl; +volatile OldSnapshotControlData *oldSnapshotControl; /* @@ -1855,7 +1804,7 @@ TransactionIdLimitedForOldSnapshots(TransactionId recentXmin, if (ts == threshold_timestamp) { /* - * Current timestamp is in same bucket as the the last limit that + * Current timestamp is in same bucket as the last limit that * was applied. Reuse. */ xlimit = threshold_xid; @@ -2000,10 +1949,32 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) else { /* We need a new bucket, but it might not be the very next one. */ - int advance = ((ts - oldSnapshotControl->head_timestamp) - / USECS_PER_MINUTE); + int distance_to_new_tail; + int distance_to_current_tail; + int advance; - oldSnapshotControl->head_timestamp = ts; + /* + * Our goal is for the new "tail" of the mapping, that is, the entry + * which is newest and thus furthest from the "head" entry, to + * correspond to "ts". Since there's one entry per minute, the + * distance between the current head and the new tail is just the + * number of minutes of difference between ts and the current + * head_timestamp. + * + * The distance from the current head to the current tail is one + * less than the number of entries in the mapping, because the + * entry at the head_offset is for 0 minutes after head_timestamp. + * + * The difference between these two values is the number of minutes + * by which we need to advance the mapping, either adding new entries + * or rotating old ones out. + */ + distance_to_new_tail = + (ts - oldSnapshotControl->head_timestamp) / USECS_PER_MINUTE; + distance_to_current_tail = + oldSnapshotControl->count_used - 1; + advance = distance_to_new_tail - distance_to_current_tail; + Assert(advance > 0); if (advance >= OLD_SNAPSHOT_TIME_MAP_ENTRIES) { @@ -2011,6 +1982,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) oldSnapshotControl->head_offset = 0; oldSnapshotControl->count_used = 1; oldSnapshotControl->xid_by_minute[0] = xmin; + oldSnapshotControl->head_timestamp = ts; } else { @@ -2029,6 +2001,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) else oldSnapshotControl->head_offset = old_head + 1; oldSnapshotControl->xid_by_minute[old_head] = xmin; + oldSnapshotControl->head_timestamp += USECS_PER_MINUTE; } else { diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 37e0d7ceab93c..ee3bfa82f481e 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -331,12 +331,9 @@ escape_quotes(const char *src) /* * Escape a field value to be inserted into the BKI data. - * Here, we first run the value through escape_quotes (which - * will be inverted by the backend's scanstr() function) and - * then overlay special processing of double quotes, which - * bootscanner.l will only accept as data if converted to octal - * representation ("\042"). We always wrap the value in double - * quotes, even if that isn't strictly necessary. + * Run the value through escape_quotes (which will be inverted + * by the backend's DeescapeQuotedString() function), then wrap + * the value in single quotes, even if that isn't strictly necessary. */ static char * escape_quotes_bki(const char *src) @@ -345,30 +342,13 @@ escape_quotes_bki(const char *src) char *data = escape_quotes(src); char *resultp; char *datap; - int nquotes = 0; - /* count double quotes in data */ - datap = data; - while ((datap = strchr(datap, '"')) != NULL) - { - nquotes++; - datap++; - } - - result = (char *) pg_malloc(strlen(data) + 3 + nquotes * 3); + result = (char *) pg_malloc(strlen(data) + 3); resultp = result; - *resultp++ = '"'; + *resultp++ = '\''; for (datap = data; *datap; datap++) - { - if (*datap == '"') - { - strcpy(resultp, "\\042"); - resultp += 4; - } - else - *resultp++ = *datap; - } - *resultp++ = '"'; + *resultp++ = *datap; + *resultp++ = '\''; *resultp = '\0'; free(data); @@ -486,7 +466,7 @@ readfile(const char *path) result = (char **) pg_malloc(maxlines * sizeof(char *)); n = 0; - while (pg_get_line_append(infile, &line)) + while (pg_get_line_buf(infile, &line)) { /* make sure there will be room for a trailing NULL pointer */ if (n >= maxlines - 1) @@ -496,8 +476,6 @@ readfile(const char *path) } result[n++] = pg_strdup(line.data); - - resetStringInfo(&line); } result[n] = NULL; diff --git a/src/bin/pg_archivecleanup/pg_archivecleanup.c b/src/bin/pg_archivecleanup/pg_archivecleanup.c index e454bae767de2..12338e3bb2c26 100644 --- a/src/bin/pg_archivecleanup/pg_archivecleanup.c +++ b/src/bin/pg_archivecleanup/pg_archivecleanup.c @@ -302,7 +302,7 @@ main(int argc, char **argv) switch (c) { case 'd': /* Debug mode */ - pg_logging_set_level(PG_LOG_DEBUG); + pg_logging_increase_verbosity(); break; case 'n': /* Dry-Run mode */ dryrun = true; diff --git a/src/bin/pg_basebackup/pg_receivewal.c b/src/bin/pg_basebackup/pg_receivewal.c index cd05f5fede18f..cddc896390da9 100644 --- a/src/bin/pg_basebackup/pg_receivewal.c +++ b/src/bin/pg_basebackup/pg_receivewal.c @@ -269,8 +269,8 @@ FindStreamingStart(uint32 *tli) if (statbuf.st_size != WalSegSz) { - pg_log_warning("segment file \"%s\" has incorrect size %d, skipping", - dirent->d_name, (int) statbuf.st_size); + pg_log_warning("segment file \"%s\" has incorrect size %lld, skipping", + dirent->d_name, (long long int) statbuf.st_size); continue; } } diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c index d3f99d89c5c85..dc97c7e89c4d6 100644 --- a/src/bin/pg_basebackup/receivelog.c +++ b/src/bin/pg_basebackup/receivelog.c @@ -46,8 +46,7 @@ static bool ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, i XLogRecPtr *blockpos); static PGresult *HandleEndOfCopyStream(PGconn *conn, StreamCtl *stream, char *copybuf, XLogRecPtr blockpos, XLogRecPtr *stoppos); -static bool CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos, - XLogRecPtr *stoppos); +static bool CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos); static long CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout, TimestampTz last_status); @@ -747,7 +746,7 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream, /* * Check if we should continue streaming, or abort at this point. */ - if (!CheckCopyStreamStop(conn, stream, blockpos, stoppos)) + if (!CheckCopyStreamStop(conn, stream, blockpos)) goto error; now = feGetCurrentTimestamp(); @@ -825,7 +824,7 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream, * Check if we should continue streaming, or abort at this * point. */ - if (!CheckCopyStreamStop(conn, stream, blockpos, stoppos)) + if (!CheckCopyStreamStop(conn, stream, blockpos)) goto error; } else @@ -1203,8 +1202,7 @@ HandleEndOfCopyStream(PGconn *conn, StreamCtl *stream, char *copybuf, * Check if we should continue streaming, or abort at this point. */ static bool -CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos, - XLogRecPtr *stoppos) +CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos) { if (still_sending && stream->stream_stop(blockpos, stream->timeline, false)) { diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c index 1cdc3ebaa338d..fc07f1aba6e77 100644 --- a/src/bin/pg_ctl/pg_ctl.c +++ b/src/bin/pg_ctl/pg_ctl.c @@ -1778,7 +1778,7 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, bool as_ser Advapi32Handle = LoadLibrary("ADVAPI32.DLL"); if (Advapi32Handle != NULL) { - _CreateRestrictedToken = (__CreateRestrictedToken) GetProcAddress(Advapi32Handle, "CreateRestrictedToken"); + _CreateRestrictedToken = (__CreateRestrictedToken) (pg_funcptr_t) GetProcAddress(Advapi32Handle, "CreateRestrictedToken"); } if (_CreateRestrictedToken == NULL) @@ -1852,11 +1852,11 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, bool as_ser Kernel32Handle = LoadLibrary("KERNEL32.DLL"); if (Kernel32Handle != NULL) { - _IsProcessInJob = (__IsProcessInJob) GetProcAddress(Kernel32Handle, "IsProcessInJob"); - _CreateJobObject = (__CreateJobObject) GetProcAddress(Kernel32Handle, "CreateJobObjectA"); - _SetInformationJobObject = (__SetInformationJobObject) GetProcAddress(Kernel32Handle, "SetInformationJobObject"); - _AssignProcessToJobObject = (__AssignProcessToJobObject) GetProcAddress(Kernel32Handle, "AssignProcessToJobObject"); - _QueryInformationJobObject = (__QueryInformationJobObject) GetProcAddress(Kernel32Handle, "QueryInformationJobObject"); + _IsProcessInJob = (__IsProcessInJob) (pg_funcptr_t) GetProcAddress(Kernel32Handle, "IsProcessInJob"); + _CreateJobObject = (__CreateJobObject) (pg_funcptr_t) GetProcAddress(Kernel32Handle, "CreateJobObjectA"); + _SetInformationJobObject = (__SetInformationJobObject) (pg_funcptr_t) GetProcAddress(Kernel32Handle, "SetInformationJobObject"); + _AssignProcessToJobObject = (__AssignProcessToJobObject) (pg_funcptr_t) GetProcAddress(Kernel32Handle, "AssignProcessToJobObject"); + _QueryInformationJobObject = (__QueryInformationJobObject) (pg_funcptr_t) GetProcAddress(Kernel32Handle, "QueryInformationJobObject"); } /* Verify that we found all functions */ diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c index 08239dde4f92f..634ca86cfb788 100644 --- a/src/bin/pg_dump/common.c +++ b/src/bin/pg_dump/common.c @@ -719,6 +719,9 @@ buildIndexArray(void *objArray, int numObjs, Size objSize) DumpableObject **ptrs; int i; + if (numObjs <= 0) + return NULL; + ptrs = (DumpableObject **) pg_malloc(numObjs * sizeof(DumpableObject *)); for (i = 0; i < numObjs; i++) ptrs[i] = (DumpableObject *) ((char *) objArray + i * objSize); diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c index 287d485d5f712..2d6ea13c4554a 100644 --- a/src/bin/pg_dump/dumputils.c +++ b/src/bin/pg_dump/dumputils.c @@ -685,7 +685,7 @@ AddAcl(PQExpBuffer aclbuf, const char *keyword, const char *subname) * keep this file free of assumptions about how to deal with SQL errors.) */ void -buildShSecLabelQuery(PGconn *conn, const char *catalog_name, Oid objectId, +buildShSecLabelQuery(const char *catalog_name, Oid objectId, PQExpBuffer sql) { appendPQExpBuffer(sql, diff --git a/src/bin/pg_dump/dumputils.h b/src/bin/pg_dump/dumputils.h index cb1d98d873ee2..d35d9d34d28c1 100644 --- a/src/bin/pg_dump/dumputils.h +++ b/src/bin/pg_dump/dumputils.h @@ -46,7 +46,7 @@ extern bool buildDefaultACLCommands(const char *type, const char *nspname, const char *owner, int remoteVersion, PQExpBuffer sql); -extern void buildShSecLabelQuery(PGconn *conn, const char *catalog_name, +extern void buildShSecLabelQuery(const char *catalog_name, Oid objectId, PQExpBuffer sql); extern void emitShSecLabels(PGconn *conn, PGresult *res, PQExpBuffer buffer, const char *objtype, const char *objname); diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c index f0587f41e4925..b51cc76c7dc21 100644 --- a/src/bin/pg_dump/parallel.c +++ b/src/bin/pg_dump/parallel.c @@ -130,7 +130,7 @@ typedef struct /* Windows implementation of pipe access */ static int pgpipe(int handles[2]); -static int piperead(int s, char *buf, int len); +#define piperead(a,b,c) recv(a,b,c,0) #define pipewrite(a,b,c) send(a,b,c,0) #else /* !WIN32 */ @@ -229,19 +229,6 @@ static char *readMessageFromPipe(int fd); (strncmp(msg, prefix, strlen(prefix)) == 0) -/* - * Shutdown callback to clean up socket access - */ -#ifdef WIN32 -static void -shutdown_parallel_dump_utils(int code, void *unused) -{ - /* Call the cleanup function only from the main thread */ - if (mainThreadId == GetCurrentThreadId()) - WSACleanup(); -} -#endif - /* * Initialize parallel dump support --- should be called early in process * startup. (Currently, this is called whether or not we intend parallel @@ -267,8 +254,7 @@ init_parallel_dump_utils(void) pg_log_error("WSAStartup failed: %d", err); exit_nicely(1); } - /* ... and arrange to shut it down at exit */ - on_exit_nicely(shutdown_parallel_dump_utils, NULL); + parallel_init_done = true; } #endif @@ -1817,20 +1803,4 @@ pgpipe(int handles[2]) return 0; } -/* - * Windows implementation of reading from a pipe. - */ -static int -piperead(int s, char *buf, int len) -{ - int ret = recv(s, buf, len, 0); - - if (ret < 0 && WSAGetLastError() == WSAECONNRESET) - { - /* EOF on the pipe! */ - ret = 0; - } - return ret; -} - #endif /* WIN32 */ diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h index 1017abbbe58cf..a6a8e6f2fd864 100644 --- a/src/bin/pg_dump/pg_backup.h +++ b/src/bin/pg_dump/pg_backup.h @@ -58,6 +58,20 @@ typedef enum _teSection SECTION_POST_DATA /* stuff to be processed after data */ } teSection; +/* Parameters needed by ConnectDatabase; same for dump and restore */ +typedef struct _connParams +{ + /* These fields record the actual command line parameters */ + char *dbname; /* this may be a connstring! */ + char *pgport; + char *pghost; + char *username; + trivalue promptPassword; + /* If not NULL, this overrides the dbname obtained from command line */ + /* (but *only* the DB name, not anything else in the connstring) */ + char *override_dbname; +} ConnParams; + typedef struct _restoreOptions { int createDB; /* Issue commands to create the database */ @@ -107,12 +121,9 @@ typedef struct _restoreOptions SimpleStringList tableNames; int useDB; - char *dbname; /* subject to expand_dbname */ - char *pgport; - char *pghost; - char *username; + ConnParams cparams; /* parameters to use if useDB */ + int noDataForFailedTables; - trivalue promptPassword; int exit_on_error; int compression; int suppressDumpWarnings; /* Suppress output of WARNING entries @@ -127,10 +138,7 @@ typedef struct _restoreOptions typedef struct _dumpOptions { - const char *dbname; /* subject to expand_dbname */ - const char *pghost; - const char *pgport; - const char *username; + ConnParams cparams; int binary_upgrade; @@ -247,12 +255,9 @@ typedef void (*SetupWorkerPtrType) (Archive *AH); * Main archiver interface. */ -extern void ConnectDatabase(Archive *AH, - const char *dbname, - const char *pghost, - const char *pgport, - const char *username, - trivalue prompt_password); +extern void ConnectDatabase(Archive *AHX, + const ConnParams *cparams, + bool isReconnect); extern void DisconnectDatabase(Archive *AHX); extern PGconn *GetConnection(Archive *AHX); diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index c05a1fd6af0d7..b961a24b36dd1 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -30,8 +30,10 @@ #include #endif +#include "common/string.h" #include "dumputils.h" #include "fe_utils/string_utils.h" +#include "lib/stringinfo.h" #include "libpq/libpq-fs.h" #include "parallel.h" #include "pg_backup_archiver.h" @@ -70,8 +72,7 @@ typedef struct _parallelReadyList static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt, const int compression, bool dosync, ArchiveMode mode, SetupWorkerPtrType setupWorkerPtr); -static void _getObjectDescription(PQExpBuffer buf, TocEntry *te, - ArchiveHandle *AH); +static void _getObjectDescription(PQExpBuffer buf, TocEntry *te); static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData); static char *sanitize_line(const char *str, bool want_hyphen); static void _doSetFixedOutputState(ArchiveHandle *AH); @@ -91,7 +92,7 @@ static bool _tocEntryIsACL(TocEntry *te); static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te); static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te); static void buildTocEntryArrays(ArchiveHandle *AH); -static void _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te); +static void _moveBefore(TocEntry *pos, TocEntry *te); static int _discoverArchiveFormat(ArchiveHandle *AH); static int RestoringToDB(ArchiveHandle *AH); @@ -121,8 +122,7 @@ static int TocEntrySizeCompare(const void *p1, const void *p2); static void move_to_ready_list(TocEntry *pending_list, ParallelReadyList *ready_list, RestorePass pass); -static TocEntry *pop_next_work_item(ArchiveHandle *AH, - ParallelReadyList *ready_list, +static TocEntry *pop_next_work_item(ParallelReadyList *ready_list, ParallelState *pstate); static void mark_dump_job_done(ArchiveHandle *AH, TocEntry *te, @@ -165,6 +165,7 @@ InitDumpOptions(DumpOptions *opts) memset(opts, 0, sizeof(DumpOptions)); /* set any fields that shouldn't default to zeroes */ opts->include_everything = true; + opts->cparams.promptPassword = TRI_DEFAULT; opts->dumpSections = DUMP_UNSECTIONED; } @@ -178,6 +179,11 @@ dumpOptionsFromRestoreOptions(RestoreOptions *ropt) DumpOptions *dopt = NewDumpOptions(); /* this is the inverse of what's at the end of pg_dump.c's main() */ + dopt->cparams.dbname = ropt->cparams.dbname ? pg_strdup(ropt->cparams.dbname) : NULL; + dopt->cparams.pgport = ropt->cparams.pgport ? pg_strdup(ropt->cparams.pgport) : NULL; + dopt->cparams.pghost = ropt->cparams.pghost ? pg_strdup(ropt->cparams.pghost) : NULL; + dopt->cparams.username = ropt->cparams.username ? pg_strdup(ropt->cparams.username) : NULL; + dopt->cparams.promptPassword = ropt->cparams.promptPassword; dopt->outputClean = ropt->dropSchema; dopt->dataOnly = ropt->dataOnly; dopt->schemaOnly = ropt->schemaOnly; @@ -410,9 +416,7 @@ RestoreArchive(Archive *AHX) AHX->minRemoteVersion = 0; AHX->maxRemoteVersion = 9999999; - ConnectDatabase(AHX, ropt->dbname, - ropt->pghost, ropt->pgport, ropt->username, - ropt->promptPassword); + ConnectDatabase(AHX, &ropt->cparams, false); /* * If we're talking to the DB directly, don't send comments since they @@ -832,16 +836,8 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel) if (strcmp(te->desc, "DATABASE") == 0 || strcmp(te->desc, "DATABASE PROPERTIES") == 0) { - PQExpBufferData connstr; - - initPQExpBuffer(&connstr); - appendPQExpBufferStr(&connstr, "dbname="); - appendConnStrVal(&connstr, te->tag); - /* Abandon struct, but keep its buffer until process exit. */ - pg_log_info("connecting to new database \"%s\"", te->tag); _reconnectToDB(AH, te->tag); - ropt->dbname = connstr.data; } } @@ -973,7 +969,7 @@ NewRestoreOptions(void) /* set any fields that shouldn't default to zeroes */ opts->format = archUnknown; - opts->promptPassword = TRI_DEFAULT; + opts->cparams.promptPassword = TRI_DEFAULT; opts->dumpSections = DUMP_UNSECTIONED; return opts; @@ -1369,8 +1365,7 @@ SortTocFromFile(Archive *AHX) ArchiveHandle *AH = (ArchiveHandle *) AHX; RestoreOptions *ropt = AH->public.ropt; FILE *fh; - char buf[100]; - bool incomplete_line; + StringInfoData linebuf; /* Allocate space for the 'wanted' array, and init it */ ropt->idWanted = (bool *) pg_malloc0(sizeof(bool) * AH->maxDumpId); @@ -1380,45 +1375,33 @@ SortTocFromFile(Archive *AHX) if (!fh) fatal("could not open TOC file \"%s\": %m", ropt->tocFile); - incomplete_line = false; - while (fgets(buf, sizeof(buf), fh) != NULL) + initStringInfo(&linebuf); + + while (pg_get_line_buf(fh, &linebuf)) { - bool prev_incomplete_line = incomplete_line; - int buflen; char *cmnt; char *endptr; DumpId id; TocEntry *te; - /* - * Some lines in the file might be longer than sizeof(buf). This is - * no problem, since we only care about the leading numeric ID which - * can be at most a few characters; but we have to skip continuation - * bufferloads when processing a long line. - */ - buflen = strlen(buf); - if (buflen > 0 && buf[buflen - 1] == '\n') - incomplete_line = false; - else - incomplete_line = true; - if (prev_incomplete_line) - continue; - /* Truncate line at comment, if any */ - cmnt = strchr(buf, ';'); + cmnt = strchr(linebuf.data, ';'); if (cmnt != NULL) + { cmnt[0] = '\0'; + linebuf.len = cmnt - linebuf.data; + } /* Ignore if all blank */ - if (strspn(buf, " \t\r\n") == strlen(buf)) + if (strspn(linebuf.data, " \t\r\n") == linebuf.len) continue; /* Get an ID, check it's valid and not already seen */ - id = strtol(buf, &endptr, 10); - if (endptr == buf || id <= 0 || id > AH->maxDumpId || + id = strtol(linebuf.data, &endptr, 10); + if (endptr == linebuf.data || id <= 0 || id > AH->maxDumpId || ropt->idWanted[id - 1]) { - pg_log_warning("line ignored: %s", buf); + pg_log_warning("line ignored: %s", linebuf.data); continue; } @@ -1442,9 +1425,11 @@ SortTocFromFile(Archive *AHX) * side-effects on the order in which restorable items actually get * restored. */ - _moveBefore(AH, AH->toc, te); + _moveBefore(AH->toc, te); } + pg_free(linebuf.data); + if (fclose(fh) != 0) fatal("could not close TOC file: %m"); } @@ -1652,16 +1637,17 @@ dump_lo_buf(ArchiveHandle *AH) { if (AH->connection) { - size_t res; + int res; res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_used); - pg_log_debug(ngettext("wrote %lu byte of large object data (result = %lu)", - "wrote %lu bytes of large object data (result = %lu)", + pg_log_debug(ngettext("wrote %zu byte of large object data (result = %d)", + "wrote %zu bytes of large object data (result = %d)", AH->lo_buf_used), - (unsigned long) AH->lo_buf_used, (unsigned long) res); + AH->lo_buf_used, res); + /* We assume there are no short writes, only errors */ if (res != AH->lo_buf_used) - fatal("could not write to large object (result: %lu, expected: %lu)", - (unsigned long) res, (unsigned long) AH->lo_buf_used); + warn_or_exit_horribly(AH, "could not write to large object: %s", + PQerrorMessage(AH->connection)); } else { @@ -1804,7 +1790,7 @@ _moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te) #endif static void -_moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te) +_moveBefore(TocEntry *pos, TocEntry *te) { /* Unlink te from list */ te->prev->next = te->next; @@ -2278,7 +2264,8 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt, { ArchiveHandle *AH; - pg_log_debug("allocating AH for %s, format %d", FileSpec, fmt); + pg_log_debug("allocating AH for %s, format %d", + FileSpec ? FileSpec : "(stdio)", fmt); AH = (ArchiveHandle *) pg_malloc0(sizeof(ArchiveHandle)); @@ -2355,8 +2342,6 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt, else AH->format = fmt; - AH->promptPassword = TRI_DEFAULT; - switch (AH->format) { case archCustom: @@ -3217,27 +3202,20 @@ _doSetSessionAuth(ArchiveHandle *AH, const char *user) * If we're currently restoring right into a database, this will * actually establish a connection. Otherwise it puts a \connect into * the script output. - * - * NULL dbname implies reconnecting to the current DB (pretty useless). */ static void _reconnectToDB(ArchiveHandle *AH, const char *dbname) { if (RestoringToDB(AH)) - ReconnectToServer(AH, dbname, NULL); + ReconnectToServer(AH, dbname); else { - if (dbname) - { - PQExpBufferData connectbuf; + PQExpBufferData connectbuf; - initPQExpBuffer(&connectbuf); - appendPsqlMetaConnect(&connectbuf, dbname); - ahprintf(AH, "%s\n", connectbuf.data); - termPQExpBuffer(&connectbuf); - } - else - ahprintf(AH, "%s\n", "\\connect -\n"); + initPQExpBuffer(&connectbuf); + appendPsqlMetaConnect(&connectbuf, dbname); + ahprintf(AH, "%s\n", connectbuf.data); + termPQExpBuffer(&connectbuf); } /* @@ -3464,7 +3442,7 @@ _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam) * This is used for ALTER ... OWNER TO. */ static void -_getObjectDescription(PQExpBuffer buf, TocEntry *te, ArchiveHandle *AH) +_getObjectDescription(PQExpBuffer buf, TocEntry *te) { const char *type = te->desc; @@ -3673,7 +3651,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData) PQExpBuffer temp = createPQExpBuffer(); appendPQExpBufferStr(temp, "ALTER "); - _getObjectDescription(temp, te, AH); + _getObjectDescription(temp, te); appendPQExpBuffer(temp, " OWNER TO %s;", fmtId(te->owner)); ahprintf(AH, "%s\n\n", temp->data); destroyPQExpBuffer(temp); @@ -4077,7 +4055,7 @@ restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate, for (;;) { /* Look for an item ready to be dispatched to a worker */ - next_work_item = pop_next_work_item(AH, &ready_list, pstate); + next_work_item = pop_next_work_item(&ready_list, pstate); if (next_work_item != NULL) { /* If not to be restored, don't waste time launching a worker */ @@ -4169,9 +4147,7 @@ restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list) /* * Now reconnect the single parent connection. */ - ConnectDatabase((Archive *) AH, ropt->dbname, - ropt->pghost, ropt->pgport, ropt->username, - ropt->promptPassword); + ConnectDatabase((Archive *) AH, &ropt->cparams, true); /* re-establish fixed state */ _doSetFixedOutputState(AH); @@ -4383,7 +4359,7 @@ move_to_ready_list(TocEntry *pending_list, * no remaining dependencies, but we have to check for lock conflicts. */ static TocEntry * -pop_next_work_item(ArchiveHandle *AH, ParallelReadyList *ready_list, +pop_next_work_item(ParallelReadyList *ready_list, ParallelState *pstate) { /* @@ -4833,54 +4809,15 @@ CloneArchive(ArchiveHandle *AH) clone->public.n_errors = 0; /* - * Connect our new clone object to the database: In parallel restore the - * parent is already disconnected, because we can connect the worker - * processes independently to the database (no snapshot sync required). In - * parallel backup we clone the parent's existing connection. + * Connect our new clone object to the database, using the same connection + * parameters used for the original connection. */ - if (AH->mode == archModeRead) - { - RestoreOptions *ropt = AH->public.ropt; - - Assert(AH->connection == NULL); + ConnectDatabase((Archive *) clone, &clone->public.ropt->cparams, true); - /* this also sets clone->connection */ - ConnectDatabase((Archive *) clone, ropt->dbname, - ropt->pghost, ropt->pgport, ropt->username, - ropt->promptPassword); - - /* re-establish fixed state */ + /* re-establish fixed state */ + if (AH->mode == archModeRead) _doSetFixedOutputState(clone); - } - else - { - PQExpBufferData connstr; - char *pghost; - char *pgport; - char *username; - - Assert(AH->connection != NULL); - - /* - * Even though we are technically accessing the parent's database - * object here, these functions are fine to be called like that - * because all just return a pointer and do not actually send/receive - * any data to/from the database. - */ - initPQExpBuffer(&connstr); - appendPQExpBufferStr(&connstr, "dbname="); - appendConnStrVal(&connstr, PQdb(AH->connection)); - pghost = PQhost(AH->connection); - pgport = PQport(AH->connection); - username = PQuser(AH->connection); - - /* this also sets clone->connection */ - ConnectDatabase((Archive *) clone, connstr.data, - pghost, pgport, username, TRI_NO); - - termPQExpBuffer(&connstr); - /* setupDumpWorker will fix up connection state */ - } + /* in write case, setupDumpWorker will fix up connection state */ /* Let the format-specific code have a chance too */ clone->ClonePtr(clone); diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h index f9e6b42752af6..fb8d226d487d3 100644 --- a/src/bin/pg_dump/pg_backup_archiver.h +++ b/src/bin/pg_dump/pg_backup_archiver.h @@ -303,7 +303,6 @@ struct _archiveHandle /* Stuff for direct DB connection */ char *archdbname; /* DB name *read* from archive */ - trivalue promptPassword; char *savedPassword; /* password for ropt->username, if known */ char *use_role; PGconn *connection; @@ -471,7 +470,7 @@ extern void InitArchiveFmt_Tar(ArchiveHandle *AH); extern bool isValidTarHeader(char *header); -extern void ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *newUser); +extern void ReconnectToServer(ArchiveHandle *AH, const char *dbname); extern void DropBlobIfExists(ArchiveHandle *AH, Oid oid); void ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH); diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c index 12899e26e292d..5ba43441f50aa 100644 --- a/src/bin/pg_dump/pg_backup_db.c +++ b/src/bin/pg_dump/pg_backup_db.c @@ -27,7 +27,6 @@ #include "pg_backup_utils.h" static void _check_database_version(ArchiveHandle *AH); -static PGconn *_connectDB(ArchiveHandle *AH, const char *newdbname, const char *newUser); static void notice_processor(void *arg, const char *message); static void @@ -73,211 +72,100 @@ _check_database_version(ArchiveHandle *AH) /* * Reconnect to the server. If dbname is not NULL, use that database, - * else the one associated with the archive handle. If username is - * not NULL, use that user name, else the one from the handle. + * else the one associated with the archive handle. */ void -ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *username) +ReconnectToServer(ArchiveHandle *AH, const char *dbname) { - PGconn *newConn; - const char *newdbname; - const char *newusername; - - if (!dbname) - newdbname = PQdb(AH->connection); - else - newdbname = dbname; - - if (!username) - newusername = PQuser(AH->connection); - else - newusername = username; - - newConn = _connectDB(AH, newdbname, newusername); - - /* Update ArchiveHandle's connCancel before closing old connection */ - set_archive_cancel_info(AH, newConn); - - PQfinish(AH->connection); - AH->connection = newConn; - - /* Start strict; later phases may override this. */ - PQclear(ExecuteSqlQueryForSingleRow((Archive *) AH, - ALWAYS_SECURE_SEARCH_PATH_SQL)); -} - -/* - * Connect to the db again. - * - * Note: it's not really all that sensible to use a single-entry password - * cache if the username keeps changing. In current usage, however, the - * username never does change, so one savedPassword is sufficient. We do - * update the cache on the off chance that the password has changed since the - * start of the run. - */ -static PGconn * -_connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser) -{ - PQExpBufferData connstr; - PGconn *newConn; - const char *newdb; - const char *newuser; - char *password; - bool new_pass; - - if (!reqdb) - newdb = PQdb(AH->connection); - else - newdb = reqdb; - - if (!requser || strlen(requser) == 0) - newuser = PQuser(AH->connection); - else - newuser = requser; - - pg_log_info("connecting to database \"%s\" as user \"%s\"", - newdb, newuser); - - password = AH->savedPassword; - - if (AH->promptPassword == TRI_YES && password == NULL) - password = simple_prompt("Password: ", false); - - initPQExpBuffer(&connstr); - appendPQExpBufferStr(&connstr, "dbname="); - appendConnStrVal(&connstr, newdb); - - do - { - const char *keywords[7]; - const char *values[7]; - - keywords[0] = "host"; - values[0] = PQhost(AH->connection); - keywords[1] = "port"; - values[1] = PQport(AH->connection); - keywords[2] = "user"; - values[2] = newuser; - keywords[3] = "password"; - values[3] = password; - keywords[4] = "dbname"; - values[4] = connstr.data; - keywords[5] = "fallback_application_name"; - values[5] = progname; - keywords[6] = NULL; - values[6] = NULL; - - new_pass = false; - newConn = PQconnectdbParams(keywords, values, true); - - if (!newConn) - fatal("could not reconnect to database"); - - if (PQstatus(newConn) == CONNECTION_BAD) - { - if (!PQconnectionNeedsPassword(newConn)) - fatal("could not reconnect to database: %s", - PQerrorMessage(newConn)); - PQfinish(newConn); - - if (password) - fprintf(stderr, "Password incorrect\n"); - - fprintf(stderr, "Connecting to %s as %s\n", - newdb, newuser); - - if (AH->promptPassword != TRI_NO) - { - if (password && password != AH->savedPassword) - free(password); - password = simple_prompt("Password: ", false); - } - else - fatal("connection needs password"); - - new_pass = true; - } - } while (new_pass); - - if (password && password != AH->savedPassword) - free(password); + PGconn *oldConn = AH->connection; + RestoreOptions *ropt = AH->public.ropt; /* - * We want to remember connection's actual password, whether or not we got - * it by prompting. So we don't just store the password variable. + * Save the dbname, if given, in override_dbname so that it will also + * affect any later reconnection attempt. */ - if (PQconnectionUsedPassword(newConn)) - { - if (AH->savedPassword) - free(AH->savedPassword); - AH->savedPassword = pg_strdup(PQpass(newConn)); - } - - termPQExpBuffer(&connstr); + if (dbname) + ropt->cparams.override_dbname = pg_strdup(dbname); - /* check for version mismatch */ - _check_database_version(AH); + /* + * Note: we want to establish the new connection, and in particular update + * ArchiveHandle's connCancel, before closing old connection. Otherwise + * an ill-timed SIGINT could try to access a dead connection. + */ + AH->connection = NULL; /* dodge error check in ConnectDatabase */ - PQsetNoticeProcessor(newConn, notice_processor, NULL); + ConnectDatabase((Archive *) AH, &ropt->cparams, true); - return newConn; + PQfinish(oldConn); } - /* - * Make a database connection with the given parameters. The - * connection handle is returned, the parameters are stored in AHX. - * An interactive password prompt is automatically issued if required. + * Make, or remake, a database connection with the given parameters. + * + * The resulting connection handle is stored in AHX->connection. * + * An interactive password prompt is automatically issued if required. + * We store the results of that in AHX->savedPassword. * Note: it's not really all that sensible to use a single-entry password * cache if the username keeps changing. In current usage, however, the * username never does change, so one savedPassword is sufficient. */ void ConnectDatabase(Archive *AHX, - const char *dbname, - const char *pghost, - const char *pgport, - const char *username, - trivalue prompt_password) + const ConnParams *cparams, + bool isReconnect) { ArchiveHandle *AH = (ArchiveHandle *) AHX; + trivalue prompt_password; char *password; bool new_pass; if (AH->connection) fatal("already connected to a database"); + /* Never prompt for a password during a reconnection */ + prompt_password = isReconnect ? TRI_NO : cparams->promptPassword; + password = AH->savedPassword; if (prompt_password == TRI_YES && password == NULL) password = simple_prompt("Password: ", false); - AH->promptPassword = prompt_password; - /* * Start the connection. Loop until we have a password if requested by * backend. */ do { - const char *keywords[7]; - const char *values[7]; - - keywords[0] = "host"; - values[0] = pghost; - keywords[1] = "port"; - values[1] = pgport; - keywords[2] = "user"; - values[2] = username; - keywords[3] = "password"; - values[3] = password; - keywords[4] = "dbname"; - values[4] = dbname; - keywords[5] = "fallback_application_name"; - values[5] = progname; - keywords[6] = NULL; - values[6] = NULL; + const char *keywords[8]; + const char *values[8]; + int i = 0; + + /* + * If dbname is a connstring, its entries can override the other + * values obtained from cparams; but in turn, override_dbname can + * override the dbname component of it. + */ + keywords[i] = "host"; + values[i++] = cparams->pghost; + keywords[i] = "port"; + values[i++] = cparams->pgport; + keywords[i] = "user"; + values[i++] = cparams->username; + keywords[i] = "password"; + values[i++] = password; + keywords[i] = "dbname"; + values[i++] = cparams->dbname; + if (cparams->override_dbname) + { + keywords[i] = "dbname"; + values[i++] = cparams->override_dbname; + } + keywords[i] = "fallback_application_name"; + values[i++] = progname; + keywords[i] = NULL; + values[i++] = NULL; + Assert(i <= lengthof(keywords)); new_pass = false; AH->connection = PQconnectdbParams(keywords, values, true); @@ -298,9 +186,16 @@ ConnectDatabase(Archive *AHX, /* check to see that the backend connection was successfully made */ if (PQstatus(AH->connection) == CONNECTION_BAD) - fatal("connection to database \"%s\" failed: %s", - PQdb(AH->connection) ? PQdb(AH->connection) : "", - PQerrorMessage(AH->connection)); + { + if (isReconnect) + fatal("reconnection to database \"%s\" failed: %s", + PQdb(AH->connection) ? PQdb(AH->connection) : "", + PQerrorMessage(AH->connection)); + else + fatal("connection to database \"%s\" failed: %s", + PQdb(AH->connection) ? PQdb(AH->connection) : "", + PQerrorMessage(AH->connection)); + } /* Start strict; later phases may override this. */ PQclear(ExecuteSqlQueryForSingleRow((Archive *) AH, diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index c601ec07012aa..54e708875cd1f 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -107,7 +107,7 @@ static void tarClose(ArchiveHandle *AH, TAR_MEMBER *TH); #ifdef __NOT_USED__ static char *tarGets(char *buf, size_t len, TAR_MEMBER *th); #endif -static int tarPrintf(ArchiveHandle *AH, TAR_MEMBER *th, const char *fmt,...) pg_attribute_printf(3, 4); +static int tarPrintf(TAR_MEMBER *th, const char *fmt,...) pg_attribute_printf(2, 3); static void _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th); static TAR_MEMBER *_tarPositionTo(ArchiveHandle *AH, const char *filename); @@ -851,7 +851,7 @@ _CloseArchive(ArchiveHandle *AH) */ th = tarOpen(AH, "restore.sql", 'w'); - tarPrintf(AH, th, "--\n" + tarPrintf(th, "--\n" "-- NOTE:\n" "--\n" "-- File paths need to be edited. Search for $$PATH$$ and\n" @@ -964,7 +964,7 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) sprintf(fname, "blob_%u.dat%s", oid, sfx); - tarPrintf(AH, ctx->blobToc, "%u %s\n", oid, fname); + tarPrintf(ctx->blobToc, "%u %s\n", oid, fname); tctx->TH = tarOpen(AH, fname, 'w'); } @@ -1008,7 +1008,7 @@ _EndBlobs(ArchiveHandle *AH, TocEntry *te) */ static int -tarPrintf(ArchiveHandle *AH, TAR_MEMBER *th, const char *fmt,...) +tarPrintf(TAR_MEMBER *th, const char *fmt,...) { int save_errno = errno; char *p; diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 784bceaec3948..ff45e3fb8c379 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -157,7 +157,7 @@ static void expand_table_name_patterns(Archive *fout, SimpleStringList *patterns, SimpleOidList *oids, bool strict_names); -static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid); +static NamespaceInfo *findNamespace(Oid nsoid); static void dumpTableData(Archive *fout, TableDataInfo *tdinfo); static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo); static void guessConstraintInheritance(TableInfo *tblinfo, int numTables); @@ -250,9 +250,8 @@ static char *format_function_arguments_old(Archive *fout, char **argnames); static char *format_function_signature(Archive *fout, FuncInfo *finfo, bool honor_quotes); -static char *convertRegProcReference(Archive *fout, - const char *proc); -static char *getFormattedOperatorName(Archive *fout, const char *oproid); +static char *convertRegProcReference(const char *proc); +static char *getFormattedOperatorName(const char *oproid); static char *convertTSFunction(Archive *fout, Oid funcOid); static Oid findLastBuiltinOid_V71(Archive *fout); static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts); @@ -315,7 +314,6 @@ main(int argc, char **argv) char *use_role = NULL; long rowsPerInsert; int numWorkers = 1; - trivalue prompt_password = TRI_DEFAULT; int compressLevel = -1; int plainText = 0; ArchiveFormat archiveFormat = archUnknown; @@ -445,7 +443,7 @@ main(int argc, char **argv) break; case 'd': /* database name */ - dopt.dbname = pg_strdup(optarg); + dopt.cparams.dbname = pg_strdup(optarg); break; case 'E': /* Dump encoding */ @@ -461,7 +459,7 @@ main(int argc, char **argv) break; case 'h': /* server host */ - dopt.pghost = pg_strdup(optarg); + dopt.cparams.pghost = pg_strdup(optarg); break; case 'j': /* number of dump jobs */ @@ -482,7 +480,7 @@ main(int argc, char **argv) break; case 'p': /* server port */ - dopt.pgport = pg_strdup(optarg); + dopt.cparams.pgport = pg_strdup(optarg); break; case 'R': @@ -507,20 +505,20 @@ main(int argc, char **argv) break; case 'U': - dopt.username = pg_strdup(optarg); + dopt.cparams.username = pg_strdup(optarg); break; case 'v': /* verbose */ g_verbose = true; - pg_logging_set_level(PG_LOG_INFO); + pg_logging_increase_verbosity(); break; case 'w': - prompt_password = TRI_NO; + dopt.cparams.promptPassword = TRI_NO; break; case 'W': - prompt_password = TRI_YES; + dopt.cparams.promptPassword = TRI_YES; break; case 'x': /* skip ACL dump */ @@ -614,8 +612,8 @@ main(int argc, char **argv) * Non-option argument specifies database name as long as it wasn't * already specified with -d / --dbname */ - if (optind < argc && dopt.dbname == NULL) - dopt.dbname = argv[optind++]; + if (optind < argc && dopt.cparams.dbname == NULL) + dopt.cparams.dbname = argv[optind++]; /* Complain if any arguments remain */ if (optind < argc) @@ -741,7 +739,7 @@ main(int argc, char **argv) * Open the database using the Archiver, so it knows about it. Errors mean * death. */ - ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password); + ConnectDatabase(fout, &dopt.cparams, false); setup_connection(fout, dumpencoding, dumpsnapshot, use_role); /* @@ -919,6 +917,11 @@ main(int argc, char **argv) ropt->filename = filename; /* if you change this list, see dumpOptionsFromRestoreOptions */ + ropt->cparams.dbname = dopt.cparams.dbname ? pg_strdup(dopt.cparams.dbname) : NULL; + ropt->cparams.pgport = dopt.cparams.pgport ? pg_strdup(dopt.cparams.pgport) : NULL; + ropt->cparams.pghost = dopt.cparams.pghost ? pg_strdup(dopt.cparams.pghost) : NULL; + ropt->cparams.username = dopt.cparams.username ? pg_strdup(dopt.cparams.username) : NULL; + ropt->cparams.promptPassword = dopt.cparams.promptPassword; ropt->dropSchema = dopt.outputClean; ropt->dataOnly = dopt.dataOnly; ropt->schemaOnly = dopt.schemaOnly; @@ -1372,8 +1375,8 @@ expand_foreign_server_name_patterns(Archive *fout, for (cell = patterns->head; cell; cell = cell->next) { - appendPQExpBuffer(query, - "SELECT oid FROM pg_catalog.pg_foreign_server s\n"); + appendPQExpBufferStr(query, + "SELECT oid FROM pg_catalog.pg_foreign_server s\n"); processSQLNamePattern(GetConnection(fout), query, cell->val, false, false, NULL, "s.srvname", NULL, NULL); @@ -2095,8 +2098,6 @@ dumpTableData_insert(Archive *fout, void *dcontext) if (nfields == 0) continue; - Assert(tbinfo->attgenerated); - /* Emit a row heading */ if (rows_per_statement == 1) archputs(" (", fout); @@ -2257,6 +2258,9 @@ dumpTableData(Archive *fout, TableDataInfo *tdinfo) char *copyStmt; const char *copyFrom; + /* We had better have loaded per-column details about this table */ + Assert(tbinfo->interesting); + if (dopt->dump_inserts == 0) { /* Dump/restore using COPY */ @@ -2448,6 +2452,9 @@ makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo) addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId); tbinfo->dataObj = tdinfo; + + /* Make sure that we'll collect per-column info for this table. */ + tbinfo->interesting = true; } /* @@ -2974,7 +2981,7 @@ dumpDatabase(Archive *fout) seclabelQry = createPQExpBuffer(); - buildShSecLabelQuery(conn, "pg_database", dbCatId.oid, seclabelQry); + buildShSecLabelQuery("pg_database", dbCatId.oid, seclabelQry); shres = ExecuteSqlQuery(fout, seclabelQry->data, PGRES_TUPLES_OK); resetPQExpBuffer(seclabelQry); emitShSecLabels(conn, shres, seclabelQry, "DATABASE", datname); @@ -4243,23 +4250,19 @@ getSubscriptions(Archive *fout) username_subquery); if (fout->remoteVersion >= 140000) - appendPQExpBuffer(query, - " s.subbinary,\n"); + appendPQExpBufferStr(query, " s.subbinary,\n"); else - appendPQExpBuffer(query, - " false AS subbinary,\n"); + appendPQExpBufferStr(query, " false AS subbinary,\n"); if (fout->remoteVersion >= 140000) - appendPQExpBuffer(query, - " s.substream\n"); + appendPQExpBufferStr(query, " s.substream\n"); else - appendPQExpBuffer(query, - " false AS substream\n"); + appendPQExpBufferStr(query, " false AS substream\n"); - appendPQExpBuffer(query, - "FROM pg_subscription s\n" - "WHERE s.subdbid = (SELECT oid FROM pg_database\n" - " WHERE datname = current_database())"); + appendPQExpBufferStr(query, + "FROM pg_subscription s\n" + "WHERE s.subdbid = (SELECT oid FROM pg_database\n" + " WHERE datname = current_database())"); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); @@ -4369,10 +4372,10 @@ dumpSubscription(Archive *fout, SubscriptionInfo *subinfo) appendPQExpBufferStr(query, "NONE"); if (strcmp(subinfo->subbinary, "t") == 0) - appendPQExpBuffer(query, ", binary = true"); + appendPQExpBufferStr(query, ", binary = true"); if (strcmp(subinfo->substream, "f") != 0) - appendPQExpBuffer(query, ", streaming = on"); + appendPQExpBufferStr(query, ", streaming = on"); if (strcmp(subinfo->subsynccommit, "off") != 0) appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit)); @@ -4811,7 +4814,7 @@ getNamespaces(Archive *fout, int *numNamespaces) * given a namespace OID, look up the info read by getNamespaces */ static NamespaceInfo * -findNamespace(Archive *fout, Oid nsoid) +findNamespace(Oid nsoid) { NamespaceInfo *nsinfo; @@ -5069,8 +5072,7 @@ getTypes(Archive *fout, int *numTypes) AssignDumpId(&tyinfo[i].dobj); tyinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_typname)); tyinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_typnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_typnamespace))); tyinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); tyinfo[i].typacl = pg_strdup(PQgetvalue(res, i, i_typacl)); tyinfo[i].rtypacl = pg_strdup(PQgetvalue(res, i, i_rtypacl)); @@ -5214,8 +5216,7 @@ getOperators(Archive *fout, int *numOprs) AssignDumpId(&oprinfo[i].dobj); oprinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oprname)); oprinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_oprnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace))); oprinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0]; oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode)); @@ -5300,8 +5301,7 @@ getCollations(Archive *fout, int *numCollations) AssignDumpId(&collinfo[i].dobj); collinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_collname)); collinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_collnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_collnamespace))); collinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); /* Decide whether we want to dump it */ @@ -5373,8 +5373,7 @@ getConversions(Archive *fout, int *numConversions) AssignDumpId(&convinfo[i].dobj); convinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname)); convinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_connamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_connamespace))); convinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); /* Decide whether we want to dump it */ @@ -5518,8 +5517,7 @@ getOpclasses(Archive *fout, int *numOpclasses) AssignDumpId(&opcinfo[i].dobj); opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname)); opcinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_opcnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace))); opcinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); /* Decide whether we want to dump it */ @@ -5602,8 +5600,7 @@ getOpfamilies(Archive *fout, int *numOpfamilies) AssignDumpId(&opfinfo[i].dobj); opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname)); opfinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_opfnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_opfnamespace))); opfinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); /* Decide whether we want to dump it */ @@ -5778,8 +5775,7 @@ getAggregates(Archive *fout, int *numAggs) AssignDumpId(&agginfo[i].aggfn.dobj); agginfo[i].aggfn.dobj.name = pg_strdup(PQgetvalue(res, i, i_aggname)); agginfo[i].aggfn.dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_aggnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_aggnamespace))); agginfo[i].aggfn.rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); if (strlen(agginfo[i].aggfn.rolname) == 0) pg_log_warning("owner of aggregate function \"%s\" appears to be invalid", @@ -6013,8 +6009,7 @@ getFuncs(Archive *fout, int *numFuncs) AssignDumpId(&finfo[i].dobj); finfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_proname)); finfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_pronamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_pronamespace))); finfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang)); finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype)); @@ -6751,8 +6746,7 @@ getTables(Archive *fout, int *numTables) AssignDumpId(&tblinfo[i].dobj); tblinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_relname)); tblinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_relnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_relnamespace))); tblinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); tblinfo[i].relacl = pg_strdup(PQgetvalue(res, i, i_relacl)); tblinfo[i].rrelacl = pg_strdup(PQgetvalue(res, i, i_rrelacl)); @@ -7413,8 +7407,7 @@ getExtendedStatistics(Archive *fout) AssignDumpId(&statsextinfo[i].dobj); statsextinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_stxname)); statsextinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_stxnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_stxnamespace))); statsextinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); statsextinfo[i].stattarget = atoi(PQgetvalue(res, i, i_stattarget)); @@ -8644,9 +8637,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) PQclear(res); /* - * Get info about column defaults + * Get info about column defaults. This is skipped for a data-only + * dump, as it is only needed for table schemas. */ - if (hasdefaults) + if (!dopt->dataOnly && hasdefaults) { AttrDefInfo *attrdefs; int numDefaults; @@ -8731,9 +8725,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) } /* - * Get info about table CHECK constraints + * Get info about table CHECK constraints. This is skipped for a + * data-only dump, as it is only needed for table schemas. */ - if (tbinfo->ncheck > 0) + if (tbinfo->ncheck > 0 && !dopt->dataOnly) { ConstraintInfo *constrs; int numConstrs; @@ -8950,8 +8945,7 @@ getTSParsers(Archive *fout, int *numTSParsers) AssignDumpId(&prsinfo[i].dobj); prsinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_prsname)); prsinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_prsnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_prsnamespace))); prsinfo[i].prsstart = atooid(PQgetvalue(res, i, i_prsstart)); prsinfo[i].prstoken = atooid(PQgetvalue(res, i, i_prstoken)); prsinfo[i].prsend = atooid(PQgetvalue(res, i, i_prsend)); @@ -9033,8 +9027,7 @@ getTSDictionaries(Archive *fout, int *numTSDicts) AssignDumpId(&dictinfo[i].dobj); dictinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_dictname)); dictinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_dictnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_dictnamespace))); dictinfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); dictinfo[i].dicttemplate = atooid(PQgetvalue(res, i, i_dicttemplate)); if (PQgetisnull(res, i, i_dictinitoption)) @@ -9113,8 +9106,7 @@ getTSTemplates(Archive *fout, int *numTSTemplates) AssignDumpId(&tmplinfo[i].dobj); tmplinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_tmplname)); tmplinfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_tmplnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_tmplnamespace))); tmplinfo[i].tmplinit = atooid(PQgetvalue(res, i, i_tmplinit)); tmplinfo[i].tmpllexize = atooid(PQgetvalue(res, i, i_tmpllexize)); @@ -9190,8 +9182,7 @@ getTSConfigurations(Archive *fout, int *numTSConfigs) AssignDumpId(&cfginfo[i].dobj); cfginfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_cfgname)); cfginfo[i].dobj.namespace = - findNamespace(fout, - atooid(PQgetvalue(res, i, i_cfgnamespace))); + findNamespace(atooid(PQgetvalue(res, i, i_cfgnamespace))); cfginfo[i].rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); cfginfo[i].cfgparser = atooid(PQgetvalue(res, i, i_cfgparser)); @@ -9638,7 +9629,7 @@ getDefaultACLs(Archive *fout, int *numDefaultACLs) daclinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_defaclobjtype)); if (nspid != InvalidOid) - daclinfo[i].dobj.namespace = findNamespace(fout, nspid); + daclinfo[i].dobj.namespace = findNamespace(nspid); else daclinfo[i].dobj.namespace = NULL; @@ -11850,26 +11841,26 @@ dumpFunc(Archive *fout, FuncInfo *finfo) asPart = createPQExpBuffer(); /* Fetch function-specific details */ - appendPQExpBuffer(query, - "SELECT\n" - "proretset,\n" - "prosrc,\n" - "probin,\n" - "provolatile,\n" - "proisstrict,\n" - "prosecdef,\n" - "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname,\n"); + appendPQExpBufferStr(query, + "SELECT\n" + "proretset,\n" + "prosrc,\n" + "probin,\n" + "provolatile,\n" + "proisstrict,\n" + "prosecdef,\n" + "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) AS lanname,\n"); if (fout->remoteVersion >= 80300) - appendPQExpBuffer(query, - "proconfig,\n" - "procost,\n" - "prorows,\n"); + appendPQExpBufferStr(query, + "proconfig,\n" + "procost,\n" + "prorows,\n"); else - appendPQExpBuffer(query, - "null AS proconfig,\n" - "0 AS procost,\n" - "0 AS prorows,\n"); + appendPQExpBufferStr(query, + "null AS proconfig,\n" + "0 AS procost,\n" + "0 AS prorows,\n"); if (fout->remoteVersion >= 80400) { @@ -11877,56 +11868,56 @@ dumpFunc(Archive *fout, FuncInfo *finfo) * In 8.4 and up we rely on pg_get_function_arguments and * pg_get_function_result instead of examining proallargtypes etc. */ - appendPQExpBuffer(query, - "pg_catalog.pg_get_function_arguments(oid) AS funcargs,\n" - "pg_catalog.pg_get_function_identity_arguments(oid) AS funciargs,\n" - "pg_catalog.pg_get_function_result(oid) AS funcresult,\n"); + appendPQExpBufferStr(query, + "pg_catalog.pg_get_function_arguments(oid) AS funcargs,\n" + "pg_catalog.pg_get_function_identity_arguments(oid) AS funciargs,\n" + "pg_catalog.pg_get_function_result(oid) AS funcresult,\n"); } else if (fout->remoteVersion >= 80100) - appendPQExpBuffer(query, - "proallargtypes,\n" - "proargmodes,\n" - "proargnames,\n"); + appendPQExpBufferStr(query, + "proallargtypes,\n" + "proargmodes,\n" + "proargnames,\n"); else - appendPQExpBuffer(query, - "null AS proallargtypes,\n" - "null AS proargmodes,\n" - "proargnames,\n"); + appendPQExpBufferStr(query, + "null AS proallargtypes,\n" + "null AS proargmodes,\n" + "proargnames,\n"); if (fout->remoteVersion >= 90200) - appendPQExpBuffer(query, - "proleakproof,\n"); + appendPQExpBufferStr(query, + "proleakproof,\n"); else - appendPQExpBuffer(query, - "false AS proleakproof,\n"); + appendPQExpBufferStr(query, + "false AS proleakproof,\n"); if (fout->remoteVersion >= 90500) - appendPQExpBuffer(query, - "array_to_string(protrftypes, ' ') AS protrftypes,\n"); + appendPQExpBufferStr(query, + "array_to_string(protrftypes, ' ') AS protrftypes,\n"); if (fout->remoteVersion >= 90600) - appendPQExpBuffer(query, - "proparallel,\n"); + appendPQExpBufferStr(query, + "proparallel,\n"); else - appendPQExpBuffer(query, - "'u' AS proparallel,\n"); + appendPQExpBufferStr(query, + "'u' AS proparallel,\n"); if (fout->remoteVersion >= 110000) - appendPQExpBuffer(query, - "prokind,\n"); + appendPQExpBufferStr(query, + "prokind,\n"); else if (fout->remoteVersion >= 80400) - appendPQExpBuffer(query, - "CASE WHEN proiswindow THEN 'w' ELSE 'f' END AS prokind,\n"); + appendPQExpBufferStr(query, + "CASE WHEN proiswindow THEN 'w' ELSE 'f' END AS prokind,\n"); else - appendPQExpBuffer(query, - "'f' AS prokind,\n"); + appendPQExpBufferStr(query, + "'f' AS prokind,\n"); if (fout->remoteVersion >= 120000) - appendPQExpBuffer(query, - "prosupport\n"); + appendPQExpBufferStr(query, + "prosupport\n"); else - appendPQExpBuffer(query, - "'-' AS prosupport\n"); + appendPQExpBufferStr(query, + "'-' AS prosupport\n"); appendPQExpBuffer(query, "FROM pg_catalog.pg_proc " @@ -12648,7 +12639,12 @@ dumpOpr(Archive *fout, OprInfo *oprinfo) oprcanmerge = PQgetvalue(res, 0, i_oprcanmerge); oprcanhash = PQgetvalue(res, 0, i_oprcanhash); - oprregproc = convertRegProcReference(fout, oprcode); + /* In PG14 upwards postfix operator support does not exist anymore. */ + if (strcmp(oprkind, "r") == 0) + pg_log_warning("postfix operators are not supported anymore (operator \"%s\")", + oprcode); + + oprregproc = convertRegProcReference(oprcode); if (oprregproc) { appendPQExpBuffer(details, " FUNCTION = %s", oprregproc); @@ -12660,7 +12656,8 @@ dumpOpr(Archive *fout, OprInfo *oprinfo) /* * right unary means there's a left arg and left unary means there's a - * right arg + * right arg. (Although the "r" case is dead code for PG14 and later, + * continue to support it in case we're dumping from an old server.) */ if (strcmp(oprkind, "r") == 0 || strcmp(oprkind, "b") == 0) @@ -12680,14 +12677,14 @@ dumpOpr(Archive *fout, OprInfo *oprinfo) else appendPQExpBufferStr(oprid, ", NONE)"); - oprref = getFormattedOperatorName(fout, oprcom); + oprref = getFormattedOperatorName(oprcom); if (oprref) { appendPQExpBuffer(details, ",\n COMMUTATOR = %s", oprref); free(oprref); } - oprref = getFormattedOperatorName(fout, oprnegate); + oprref = getFormattedOperatorName(oprnegate); if (oprref) { appendPQExpBuffer(details, ",\n NEGATOR = %s", oprref); @@ -12700,14 +12697,14 @@ dumpOpr(Archive *fout, OprInfo *oprinfo) if (strcmp(oprcanhash, "t") == 0) appendPQExpBufferStr(details, ",\n HASHES"); - oprregproc = convertRegProcReference(fout, oprrest); + oprregproc = convertRegProcReference(oprrest); if (oprregproc) { appendPQExpBuffer(details, ",\n RESTRICT = %s", oprregproc); free(oprregproc); } - oprregproc = convertRegProcReference(fout, oprjoin); + oprregproc = convertRegProcReference(oprjoin); if (oprregproc) { appendPQExpBuffer(details, ",\n JOIN = %s", oprregproc); @@ -12762,7 +12759,7 @@ dumpOpr(Archive *fout, OprInfo *oprinfo) * part. */ static char * -convertRegProcReference(Archive *fout, const char *proc) +convertRegProcReference(const char *proc) { char *name; char *paren; @@ -12803,7 +12800,7 @@ convertRegProcReference(Archive *fout, const char *proc) * are in different schemas. */ static char * -getFormattedOperatorName(Archive *fout, const char *oproid) +getFormattedOperatorName(const char *oproid) { OprInfo *oprInfo; @@ -13890,71 +13887,71 @@ dumpAgg(Archive *fout, AggInfo *agginfo) details = createPQExpBuffer(); /* Get aggregate-specific details */ - appendPQExpBuffer(query, - "SELECT\n" - "aggtransfn,\n" - "aggfinalfn,\n" - "aggtranstype::pg_catalog.regtype,\n" - "agginitval,\n"); + appendPQExpBufferStr(query, + "SELECT\n" + "aggtransfn,\n" + "aggfinalfn,\n" + "aggtranstype::pg_catalog.regtype,\n" + "agginitval,\n"); if (fout->remoteVersion >= 80100) - appendPQExpBuffer(query, - "aggsortop,\n"); + appendPQExpBufferStr(query, + "aggsortop,\n"); else - appendPQExpBuffer(query, - "0 AS aggsortop,\n"); + appendPQExpBufferStr(query, + "0 AS aggsortop,\n"); if (fout->remoteVersion >= 80400) - appendPQExpBuffer(query, - "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n" - "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n"); + appendPQExpBufferStr(query, + "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs,\n" + "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs,\n"); if (fout->remoteVersion >= 90400) - appendPQExpBuffer(query, - "aggkind,\n" - "aggmtransfn,\n" - "aggminvtransfn,\n" - "aggmfinalfn,\n" - "aggmtranstype::pg_catalog.regtype,\n" - "aggfinalextra,\n" - "aggmfinalextra,\n" - "aggtransspace,\n" - "aggmtransspace,\n" - "aggminitval,\n"); + appendPQExpBufferStr(query, + "aggkind,\n" + "aggmtransfn,\n" + "aggminvtransfn,\n" + "aggmfinalfn,\n" + "aggmtranstype::pg_catalog.regtype,\n" + "aggfinalextra,\n" + "aggmfinalextra,\n" + "aggtransspace,\n" + "aggmtransspace,\n" + "aggminitval,\n"); else - appendPQExpBuffer(query, - "'n' AS aggkind,\n" - "'-' AS aggmtransfn,\n" - "'-' AS aggminvtransfn,\n" - "'-' AS aggmfinalfn,\n" - "0 AS aggmtranstype,\n" - "false AS aggfinalextra,\n" - "false AS aggmfinalextra,\n" - "0 AS aggtransspace,\n" - "0 AS aggmtransspace,\n" - "NULL AS aggminitval,\n"); + appendPQExpBufferStr(query, + "'n' AS aggkind,\n" + "'-' AS aggmtransfn,\n" + "'-' AS aggminvtransfn,\n" + "'-' AS aggmfinalfn,\n" + "0 AS aggmtranstype,\n" + "false AS aggfinalextra,\n" + "false AS aggmfinalextra,\n" + "0 AS aggtransspace,\n" + "0 AS aggmtransspace,\n" + "NULL AS aggminitval,\n"); if (fout->remoteVersion >= 90600) - appendPQExpBuffer(query, - "aggcombinefn,\n" - "aggserialfn,\n" - "aggdeserialfn,\n" - "proparallel,\n"); + appendPQExpBufferStr(query, + "aggcombinefn,\n" + "aggserialfn,\n" + "aggdeserialfn,\n" + "proparallel,\n"); else - appendPQExpBuffer(query, - "'-' AS aggcombinefn,\n" - "'-' AS aggserialfn,\n" - "'-' AS aggdeserialfn,\n" - "'u' AS proparallel,\n"); + appendPQExpBufferStr(query, + "'-' AS aggcombinefn,\n" + "'-' AS aggserialfn,\n" + "'-' AS aggdeserialfn,\n" + "'u' AS proparallel,\n"); if (fout->remoteVersion >= 110000) - appendPQExpBuffer(query, - "aggfinalmodify,\n" - "aggmfinalmodify\n"); + appendPQExpBufferStr(query, + "aggfinalmodify,\n" + "aggmfinalmodify\n"); else - appendPQExpBuffer(query, - "'0' AS aggfinalmodify,\n" - "'0' AS aggmfinalmodify\n"); + appendPQExpBufferStr(query, + "'0' AS aggfinalmodify,\n" + "'0' AS aggmfinalmodify\n"); appendPQExpBuffer(query, "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p " @@ -14113,7 +14110,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo) } } - aggsortconvop = getFormattedOperatorName(fout, aggsortop); + aggsortconvop = getFormattedOperatorName(aggsortop); if (aggsortconvop) { appendPQExpBuffer(details, ",\n SORTOP = %s", @@ -15541,10 +15538,12 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo) int j, k; + /* We had better have loaded per-column details about this table */ + Assert(tbinfo->interesting); + qrelname = pg_strdup(fmtId(tbinfo->dobj.name)); qualrelname = pg_strdup(fmtQualifiedDumpable(tbinfo)); - if (tbinfo->hasoids) pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")", qrelname); @@ -17915,8 +17914,6 @@ processExtensionTables(Archive *fout, ExtensionInfo extinfo[], configtbl->dataObj->filtercond = pg_strdup(extconditionarray[j]); } } - - configtbl->interesting = dumpobj; } } if (extconfigarray) diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 97d2b8dac1c6f..2fa11745cc330 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -283,7 +283,7 @@ main(int argc, char *argv[]) case 'v': verbose = true; - pg_logging_set_level(PG_LOG_INFO); + pg_logging_increase_verbosity(); appendPQExpBufferStr(pgdumpopts, " -v"); break; @@ -1614,7 +1614,7 @@ buildShSecLabels(PGconn *conn, const char *catalog_name, Oid objectId, PQExpBuffer sql = createPQExpBuffer(); PGresult *res; - buildShSecLabelQuery(conn, catalog_name, objectId, sql); + buildShSecLabelQuery(catalog_name, objectId, sql); res = executeQuery(conn, sql->data); emitShSecLabels(conn, res, buffer, objtype, objname); diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c index 544ae3bc5cdf5..589b4aed53988 100644 --- a/src/bin/pg_dump/pg_restore.c +++ b/src/bin/pg_dump/pg_restore.c @@ -163,7 +163,7 @@ main(int argc, char **argv) opts->createDB = 1; break; case 'd': - opts->dbname = pg_strdup(optarg); + opts->cparams.dbname = pg_strdup(optarg); break; case 'e': opts->exit_on_error = true; @@ -177,7 +177,7 @@ main(int argc, char **argv) break; case 'h': if (strlen(optarg) != 0) - opts->pghost = pg_strdup(optarg); + opts->cparams.pghost = pg_strdup(optarg); break; case 'j': /* number of restore jobs */ @@ -206,7 +206,7 @@ main(int argc, char **argv) case 'p': if (strlen(optarg) != 0) - opts->pgport = pg_strdup(optarg); + opts->cparams.pgport = pg_strdup(optarg); break; case 'R': /* no-op, still accepted for backwards compatibility */ @@ -240,20 +240,20 @@ main(int argc, char **argv) break; case 'U': - opts->username = pg_strdup(optarg); + opts->cparams.username = pg_strdup(optarg); break; case 'v': /* verbose */ opts->verbose = 1; - pg_logging_set_level(PG_LOG_INFO); + pg_logging_increase_verbosity(); break; case 'w': - opts->promptPassword = TRI_NO; + opts->cparams.promptPassword = TRI_NO; break; case 'W': - opts->promptPassword = TRI_YES; + opts->cparams.promptPassword = TRI_YES; break; case 'x': /* skip ACL dump */ @@ -303,14 +303,14 @@ main(int argc, char **argv) } /* Complain if neither -f nor -d was specified (except if dumping TOC) */ - if (!opts->dbname && !opts->filename && !opts->tocSummary) + if (!opts->cparams.dbname && !opts->filename && !opts->tocSummary) { pg_log_error("one of -d/--dbname and -f/--file must be specified"); exit_nicely(1); } /* Should get at most one of -d and -f, else user is confused */ - if (opts->dbname) + if (opts->cparams.dbname) { if (opts->filename) { diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c index 23fc749e44515..0ec52cb032790 100644 --- a/src/bin/pg_rewind/pg_rewind.c +++ b/src/bin/pg_rewind/pg_rewind.c @@ -181,7 +181,7 @@ main(int argc, char **argv) case 3: debug = true; - pg_logging_set_level(PG_LOG_DEBUG); + pg_logging_increase_verbosity(); break; case 'D': /* -D or --target-pgdata */ diff --git a/src/bin/pg_test_fsync/.gitignore b/src/bin/pg_test_fsync/.gitignore index f3b5932498596..5eb5085f4524a 100644 --- a/src/bin/pg_test_fsync/.gitignore +++ b/src/bin/pg_test_fsync/.gitignore @@ -1 +1,3 @@ /pg_test_fsync + +/tmp_check/ diff --git a/src/bin/pg_test_fsync/Makefile b/src/bin/pg_test_fsync/Makefile index 7632c94eb7f62..631d0f38a8e09 100644 --- a/src/bin/pg_test_fsync/Makefile +++ b/src/bin/pg_test_fsync/Makefile @@ -22,8 +22,15 @@ install: all installdirs installdirs: $(MKDIR_P) '$(DESTDIR)$(bindir)' +check: + $(prove_check) + +installcheck: + $(prove_installcheck) + uninstall: rm -f '$(DESTDIR)$(bindir)/pg_test_fsync$(X)' clean distclean maintainer-clean: rm -f pg_test_fsync$(X) $(OBJS) + rm -rf tmp_check diff --git a/src/bin/pg_test_fsync/pg_test_fsync.c b/src/bin/pg_test_fsync/pg_test_fsync.c index 6e47293123318..3eddd983c63ba 100644 --- a/src/bin/pg_test_fsync/pg_test_fsync.c +++ b/src/bin/pg_test_fsync/pg_test_fsync.c @@ -5,6 +5,7 @@ #include "postgres_fe.h" +#include #include #include #include @@ -62,7 +63,7 @@ do { \ static const char *progname; -static int secs_per_test = 5; +static unsigned int secs_per_test = 5; static int needs_unlink = 0; static char full_buf[DEFAULT_XLOG_SEG_SIZE], *buf, @@ -148,6 +149,8 @@ handle_args(int argc, char *argv[]) int option; /* Command line option */ int optindex = 0; /* used by getopt_long */ + unsigned long optval; /* used for option parsing */ + char *endptr; if (argc > 1) { @@ -173,7 +176,24 @@ handle_args(int argc, char *argv[]) break; case 's': - secs_per_test = atoi(optarg); + errno = 0; + optval = strtoul(optarg, &endptr, 10); + + if (endptr == optarg || *endptr != '\0' || + errno != 0 || optval != (unsigned int) optval) + { + pg_log_error("invalid argument for option %s", "--secs-per-test"); + fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); + exit(1); + } + + secs_per_test = (unsigned int) optval; + if (secs_per_test == 0) + { + pg_log_error("%s must be in range %u..%u", + "--secs-per-test", 1, UINT_MAX); + exit(1); + } break; default: @@ -193,8 +213,8 @@ handle_args(int argc, char *argv[]) exit(1); } - printf(ngettext("%d second per test\n", - "%d seconds per test\n", + printf(ngettext("%u second per test\n", + "%u seconds per test\n", secs_per_test), secs_per_test); #if PG_O_DIRECT != 0 diff --git a/src/bin/pg_test_fsync/t/001_basic.pl b/src/bin/pg_test_fsync/t/001_basic.pl new file mode 100644 index 0000000000000..fe9c295c4976d --- /dev/null +++ b/src/bin/pg_test_fsync/t/001_basic.pl @@ -0,0 +1,25 @@ +use strict; +use warnings; + +use Config; +use TestLib; +use Test::More tests => 12; + +######################################### +# Basic checks + +program_help_ok('pg_test_fsync'); +program_version_ok('pg_test_fsync'); +program_options_handling_ok('pg_test_fsync'); + +######################################### +# Test invalid option combinations + +command_fails_like( + [ 'pg_test_fsync', '--secs-per-test', 'a' ], + qr/\Qpg_test_fsync: error: invalid argument for option --secs-per-test\E/, + 'pg_test_fsync: invalid argument for option --secs-per-test'); +command_fails_like( + [ 'pg_test_fsync', '--secs-per-test', '0' ], + qr/\Qpg_test_fsync: error: --secs-per-test must be in range 1..4294967295\E/, + 'pg_test_fsync: --secs-per-test must be in range'); diff --git a/src/bin/pg_test_timing/.gitignore b/src/bin/pg_test_timing/.gitignore index f6c664c765764..e5aac2ab120f2 100644 --- a/src/bin/pg_test_timing/.gitignore +++ b/src/bin/pg_test_timing/.gitignore @@ -1 +1,3 @@ /pg_test_timing + +/tmp_check/ diff --git a/src/bin/pg_test_timing/Makefile b/src/bin/pg_test_timing/Makefile index 334d6ff5c00db..84d84c38aa86d 100644 --- a/src/bin/pg_test_timing/Makefile +++ b/src/bin/pg_test_timing/Makefile @@ -22,8 +22,15 @@ install: all installdirs installdirs: $(MKDIR_P) '$(DESTDIR)$(bindir)' +check: + $(prove_check) + +installcheck: + $(prove_installcheck) + uninstall: rm -f '$(DESTDIR)$(bindir)/pg_test_timing$(X)' clean distclean maintainer-clean: rm -f pg_test_timing$(X) $(OBJS) + rm -rf tmp_check diff --git a/src/bin/pg_test_timing/pg_test_timing.c b/src/bin/pg_test_timing/pg_test_timing.c index e14802372bd6a..c29d6f8762947 100644 --- a/src/bin/pg_test_timing/pg_test_timing.c +++ b/src/bin/pg_test_timing/pg_test_timing.c @@ -6,15 +6,17 @@ #include "postgres_fe.h" +#include + #include "getopt_long.h" #include "portability/instr_time.h" static const char *progname; -static int32 test_duration = 3; +static unsigned int test_duration = 3; static void handle_args(int argc, char *argv[]); -static uint64 test_timing(int32); +static uint64 test_timing(unsigned int duration); static void output(uint64 loop_count); /* record duration in powers of 2 microseconds */ @@ -47,6 +49,8 @@ handle_args(int argc, char *argv[]) int option; /* Command line option */ int optindex = 0; /* used by getopt_long */ + unsigned long optval; /* used for option parsing */ + char *endptr; if (argc > 1) { @@ -68,7 +72,25 @@ handle_args(int argc, char *argv[]) switch (option) { case 'd': - test_duration = atoi(optarg); + errno = 0; + optval = strtoul(optarg, &endptr, 10); + + if (endptr == optarg || *endptr != '\0' || + errno != 0 || optval != (unsigned int) optval) + { + fprintf(stderr, _("%s: invalid argument for option %s\n"), + progname, "--duration"); + fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); + exit(1); + } + + test_duration = (unsigned int) optval; + if (test_duration == 0) + { + fprintf(stderr, _("%s: %s must be in range %u..%u\n"), + progname, "--duration", 1, UINT_MAX); + exit(1); + } break; default: @@ -89,26 +111,15 @@ handle_args(int argc, char *argv[]) exit(1); } - if (test_duration > 0) - { - printf(ngettext("Testing timing overhead for %d second.\n", - "Testing timing overhead for %d seconds.\n", - test_duration), - test_duration); - } - else - { - fprintf(stderr, - _("%s: duration must be a positive integer (duration is \"%d\")\n"), - progname, test_duration); - fprintf(stderr, _("Try \"%s --help\" for more information.\n"), - progname); - exit(1); - } + + printf(ngettext("Testing timing overhead for %u second.\n", + "Testing timing overhead for %u seconds.\n", + test_duration), + test_duration); } static uint64 -test_timing(int32 duration) +test_timing(unsigned int duration) { uint64 total_time; int64 time_elapsed = 0; diff --git a/src/bin/pg_test_timing/t/001_basic.pl b/src/bin/pg_test_timing/t/001_basic.pl new file mode 100644 index 0000000000000..8bad19c7fad97 --- /dev/null +++ b/src/bin/pg_test_timing/t/001_basic.pl @@ -0,0 +1,25 @@ +use strict; +use warnings; + +use Config; +use TestLib; +use Test::More tests => 12; + +######################################### +# Basic checks + +program_help_ok('pg_test_timing'); +program_version_ok('pg_test_timing'); +program_options_handling_ok('pg_test_timing'); + +######################################### +# Test invalid option combinations + +command_fails_like( + [ 'pg_test_timing', '--duration', 'a' ], + qr/\Qpg_test_timing: invalid argument for option --duration\E/, + 'pg_test_timing: invalid argument for option --duration'); +command_fails_like( + [ 'pg_test_timing', '--duration', '0' ], + qr/\Qpg_test_timing: --duration must be in range 1..4294967295\E/, + 'pg_test_timing: --duration must be in range'); diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 00aef855dc076..05e6bf7f2c349 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -22,10 +22,12 @@ static void check_is_install_user(ClusterInfo *cluster); static void check_proper_datallowconn(ClusterInfo *cluster); static void check_for_prepared_transactions(ClusterInfo *cluster); static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster); +static void check_for_user_defined_postfix_ops(ClusterInfo *cluster); static void check_for_tables_with_oids(ClusterInfo *cluster); static void check_for_reg_data_type_usage(ClusterInfo *cluster); static void check_for_jsonb_9_4_usage(ClusterInfo *cluster); static void check_for_pg_role_prefix(ClusterInfo *cluster); +static void check_for_new_tablespace_dir(ClusterInfo *new_cluster); static char *get_canonical_locale_name(int category, const char *locale); @@ -100,6 +102,13 @@ check_and_dump_old_cluster(bool live_check) check_for_reg_data_type_usage(&old_cluster); check_for_isn_and_int8_passing_mismatch(&old_cluster); + /* + * Pre-PG 14 allowed user defined postfix operators, which are not + * supported anymore. Verify there are none, iff applicable. + */ + if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300) + check_for_user_defined_postfix_ops(&old_cluster); + /* * Pre-PG 12 allowed tables to be declared WITH OIDS, which is not * supported anymore. Verify there are none, iff applicable. @@ -179,6 +188,8 @@ check_new_cluster(void) check_is_install_user(&new_cluster); check_for_prepared_transactions(&new_cluster); + + check_for_new_tablespace_dir(&new_cluster); } @@ -226,18 +237,10 @@ void output_completion_banner(char *analyze_script_file_name, char *deletion_script_file_name) { - /* Did we copy the free space files? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) - pg_log(PG_REPORT, - "Optimizer statistics are not transferred by pg_upgrade so,\n" - "once you start the new server, consider running:\n" - " %s\n\n", analyze_script_file_name); - else - pg_log(PG_REPORT, - "Optimizer statistics and free space information are not transferred\n" - "by pg_upgrade so, once you start the new server, consider running:\n" - " %s\n\n", analyze_script_file_name); - + pg_log(PG_REPORT, + "Optimizer statistics are not transferred by pg_upgrade so,\n" + "once you start the new server, consider running:\n" + " %s\n\n", analyze_script_file_name); if (deletion_script_file_name) pg_log(PG_REPORT, @@ -304,7 +307,7 @@ check_cluster_compatibility(bool live_check) check_control_data(&old_cluster.controldata, &new_cluster.controldata); /* We read the real port number for PG >= 9.1 */ - if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 && + if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) <= 900 && old_cluster.port == DEF_PGUPORT) pg_fatal("When checking a pre-PG 9.1 live old server, " "you must specify the old server's port number.\n"); @@ -502,19 +505,12 @@ create_script_for_cluster_analyze(char **analyze_script_file_name) ECHO_QUOTE, ECHO_QUOTE); fprintf(script, "echo %sthis script and run:%s\n", ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %s \"%s/vacuumdb\" %s--all %s%s\n", ECHO_QUOTE, - new_cluster.bindir, user_specification.data, - /* Did we copy the free space files? */ - (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? - "--analyze-only" : "--analyze", ECHO_QUOTE); + fprintf(script, "echo %s \"%s/vacuumdb\" %s--all --analyze-only%s\n", ECHO_QUOTE, + new_cluster.bindir, user_specification.data, ECHO_QUOTE); fprintf(script, "echo%s\n\n", ECHO_BLANK); fprintf(script, "\"%s/vacuumdb\" %s--all --analyze-in-stages\n", new_cluster.bindir, user_specification.data); - /* Did we copy the free space files? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 804) - fprintf(script, "\"%s/vacuumdb\" %s--all\n", new_cluster.bindir, - user_specification.data); fprintf(script, "echo%s\n\n", ECHO_BLANK); fprintf(script, "echo %sDone%s\n", @@ -534,6 +530,44 @@ create_script_for_cluster_analyze(char **analyze_script_file_name) } +/* + * A previous run of pg_upgrade might have failed and the new cluster + * directory recreated, but they might have forgotten to remove + * the new cluster's tablespace directories. Therefore, check that + * new cluster tablespace directories do not already exist. If + * they do, it would cause an error while restoring global objects. + * This allows the failure to be detected at check time, rather than + * during schema restore. + * + * Note, v8.4 has no tablespace_suffix, which is fine so long as the + * version being upgraded *to* has a suffix, since it's not allowed + * to pg_upgrade from a version to the same version if tablespaces are + * in use. + */ +static void +check_for_new_tablespace_dir(ClusterInfo *new_cluster) +{ + int tblnum; + char new_tablespace_dir[MAXPGPATH]; + + prep_status("Checking for new cluster tablespace directories"); + + for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + { + struct stat statbuf; + + snprintf(new_tablespace_dir, MAXPGPATH, "%s%s", + os_info.old_tablespaces[tblnum], + new_cluster->tablespace_suffix); + + if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT) + pg_fatal("new cluster tablespace directory already exists: \"%s\"\n", + new_tablespace_dir); + } + + check_ok(); +} + /* * create_script_for_old_cluster_deletion() * @@ -896,6 +930,104 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster) check_ok(); } +/* + * Verify that no user defined postfix operators exist. + */ +static void +check_for_user_defined_postfix_ops(ClusterInfo *cluster) +{ + int dbnum; + FILE *script = NULL; + bool found = false; + char output_path[MAXPGPATH]; + + prep_status("Checking for user-defined postfix operators"); + + snprintf(output_path, sizeof(output_path), + "postfix_ops.txt"); + + /* Find any user defined postfix operators */ + for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) + { + PGresult *res; + bool db_used = false; + int ntups; + int rowno; + int i_oproid, + i_oprnsp, + i_oprname, + i_typnsp, + i_typname; + DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(cluster, active_db->db_name); + + /* + * The query below hardcodes FirstNormalObjectId as 16384 rather than + * interpolating that C #define into the query because, if that + * #define is ever changed, the cutoff we want to use is the value + * used by pre-version 14 servers, not that of some future version. + */ + res = executeQueryOrDie(conn, + "SELECT o.oid AS oproid, " + " n.nspname AS oprnsp, " + " o.oprname, " + " tn.nspname AS typnsp, " + " t.typname " + "FROM pg_catalog.pg_operator o, " + " pg_catalog.pg_namespace n, " + " pg_catalog.pg_type t, " + " pg_catalog.pg_namespace tn " + "WHERE o.oprnamespace = n.oid AND " + " o.oprleft = t.oid AND " + " t.typnamespace = tn.oid AND " + " o.oprright = 0 AND " + " o.oid >= 16384"); + ntups = PQntuples(res); + i_oproid = PQfnumber(res, "oproid"); + i_oprnsp = PQfnumber(res, "oprnsp"); + i_oprname = PQfnumber(res, "oprname"); + i_typnsp = PQfnumber(res, "typnsp"); + i_typname = PQfnumber(res, "typname"); + for (rowno = 0; rowno < ntups; rowno++) + { + found = true; + if (script == NULL && + (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("could not open file \"%s\": %s\n", + output_path, strerror(errno)); + if (!db_used) + { + fprintf(script, "In database: %s\n", active_db->db_name); + db_used = true; + } + fprintf(script, " (oid=%s) %s.%s (%s.%s, NONE)\n", + PQgetvalue(res, rowno, i_oproid), + PQgetvalue(res, rowno, i_oprnsp), + PQgetvalue(res, rowno, i_oprname), + PQgetvalue(res, rowno, i_typnsp), + PQgetvalue(res, rowno, i_typname)); + } + + PQclear(res); + + PQfinish(conn); + } + + if (script) + fclose(script); + + if (found) + { + pg_log(PG_REPORT, "fatal\n"); + pg_fatal("Your installation contains user-defined postfix operators, which are not\n" + "supported anymore. Consider dropping the postfix operators and replacing\n" + "them with prefix operators or function calls.\n" + "A list of user-defined postfix operators is in the file:\n" + " %s\n\n", output_path); + } + else + check_ok(); +} /* * Verify that no tables are declared WITH OIDS. diff --git a/src/bin/pg_upgrade/controldata.c b/src/bin/pg_upgrade/controldata.c index 00d71e3a8a7c8..39bcaa8fe1a2b 100644 --- a/src/bin/pg_upgrade/controldata.c +++ b/src/bin/pg_upgrade/controldata.c @@ -180,7 +180,7 @@ get_control_data(ClusterInfo *cluster, bool live_check) } /* pg_resetxlog has been renamed to pg_resetwal in version 10 */ - if (GET_MAJOR_VERSION(cluster->bin_version) < 1000) + if (GET_MAJOR_VERSION(cluster->bin_version) <= 906) resetwal_bin = "pg_resetxlog\" -n"; else resetwal_bin = "pg_resetwal\" -n"; diff --git a/src/bin/pg_upgrade/exec.c b/src/bin/pg_upgrade/exec.c index b31cda8fec60e..bdff13bb688fe 100644 --- a/src/bin/pg_upgrade/exec.c +++ b/src/bin/pg_upgrade/exec.c @@ -341,13 +341,13 @@ check_data_dir(ClusterInfo *cluster) check_single_dir(pg_data, "pg_twophase"); /* pg_xlog has been renamed to pg_wal in v10 */ - if (GET_MAJOR_VERSION(cluster->major_version) < 1000) + if (GET_MAJOR_VERSION(cluster->major_version) <= 906) check_single_dir(pg_data, "pg_xlog"); else check_single_dir(pg_data, "pg_wal"); /* pg_clog has been renamed to pg_xact in v10 */ - if (GET_MAJOR_VERSION(cluster->major_version) < 1000) + if (GET_MAJOR_VERSION(cluster->major_version) <= 906) check_single_dir(pg_data, "pg_clog"); else check_single_dir(pg_data, "pg_xact"); @@ -387,7 +387,7 @@ check_bin_dir(ClusterInfo *cluster) get_bin_version(cluster); /* pg_resetxlog has been renamed to pg_resetwal in version 10 */ - if (GET_MAJOR_VERSION(cluster->bin_version) < 1000) + if (GET_MAJOR_VERSION(cluster->bin_version) <= 906) validate_exec(cluster->bindir, "pg_resetxlog"); else validate_exec(cluster->bindir, "pg_resetwal"); diff --git a/src/bin/pg_upgrade/function.c b/src/bin/pg_upgrade/function.c index d163cb2dde712..e0bc368e1e168 100644 --- a/src/bin/pg_upgrade/function.c +++ b/src/bin/pg_upgrade/function.c @@ -90,7 +90,7 @@ get_loadable_libraries(void) * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 901) + if (GET_MAJOR_VERSION(old_cluster.major_version) <= 900) { PGresult *res; @@ -218,7 +218,7 @@ check_loadable_libraries(void) * library name "plpython" in an old PG <= 9.1 cluster must look * for "plpython2" in the new cluster. */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 && + if (GET_MAJOR_VERSION(old_cluster.major_version) <= 900 && strcmp(lib, "$libdir/plpython") == 0) { lib = "$libdir/plpython2"; diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c index 70194eb0964f9..1bc86e4205de4 100644 --- a/src/bin/pg_upgrade/pg_upgrade.c +++ b/src/bin/pg_upgrade/pg_upgrade.c @@ -407,7 +407,7 @@ create_new_objects(void) * We don't have minmxids for databases or relations in pre-9.3 clusters, * so set those after we have restored the schema. */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 903) + if (GET_MAJOR_VERSION(old_cluster.major_version) <= 902) set_frozenxids(true); /* update new_cluster info now that we have objects in the databases */ @@ -466,9 +466,9 @@ copy_xact_xlog_xid(void) * Copy old commit logs to new data dir. pg_clog has been renamed to * pg_xact in post-10 clusters. */ - copy_subdir_files(GET_MAJOR_VERSION(old_cluster.major_version) < 1000 ? + copy_subdir_files(GET_MAJOR_VERSION(old_cluster.major_version) <= 906 ? "pg_clog" : "pg_xact", - GET_MAJOR_VERSION(new_cluster.major_version) < 1000 ? + GET_MAJOR_VERSION(new_cluster.major_version) <= 906 ? "pg_clog" : "pg_xact"); /* set the next transaction id and epoch of the new cluster */ diff --git a/src/bin/pg_upgrade/relfilenode.c b/src/bin/pg_upgrade/relfilenode.c index af9a021400a8e..f76ddaaf3a161 100644 --- a/src/bin/pg_upgrade/relfilenode.c +++ b/src/bin/pg_upgrade/relfilenode.c @@ -163,16 +163,12 @@ transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace) /* transfer primary file */ transfer_relfile(&maps[mapnum], "", vm_must_add_frozenbit); - /* fsm/vm files added in PG 8.4 */ - if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) - { - /* - * Copy/link any fsm and vm files, if they exist - */ - transfer_relfile(&maps[mapnum], "_fsm", vm_must_add_frozenbit); - if (vm_crashsafe_match) - transfer_relfile(&maps[mapnum], "_vm", vm_must_add_frozenbit); - } + /* + * Copy/link any fsm and vm files, if they exist + */ + transfer_relfile(&maps[mapnum], "_fsm", vm_must_add_frozenbit); + if (vm_crashsafe_match) + transfer_relfile(&maps[mapnum], "_vm", vm_must_add_frozenbit); } } } diff --git a/src/bin/pg_upgrade/server.c b/src/bin/pg_upgrade/server.c index 7db3c1d51f2e2..713509f54062a 100644 --- a/src/bin/pg_upgrade/server.c +++ b/src/bin/pg_upgrade/server.c @@ -220,7 +220,7 @@ start_postmaster(ClusterInfo *cluster, bool report_and_exit_on_error) snprintf(socket_string + strlen(socket_string), sizeof(socket_string) - strlen(socket_string), " -c %s='%s'", - (GET_MAJOR_VERSION(cluster->major_version) < 903) ? + (GET_MAJOR_VERSION(cluster->major_version) <= 902) ? "unix_socket_directory" : "unix_socket_directories", cluster->sockdir); #endif diff --git a/src/bin/pg_upgrade/version.c b/src/bin/pg_upgrade/version.c index 4e5d27f76eb2f..db1934124ee35 100644 --- a/src/bin/pg_upgrade/version.c +++ b/src/bin/pg_upgrade/version.c @@ -158,33 +158,33 @@ check_for_data_type_usage(ClusterInfo *cluster, const char *typename, /* Ranges were introduced in 9.2 */ if (GET_MAJOR_VERSION(cluster->major_version) >= 902) - appendPQExpBuffer(&querybuf, - " UNION ALL " + appendPQExpBufferStr(&querybuf, + " UNION ALL " /* ranges containing any type selected so far */ - " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x " - " WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid"); + " SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x " + " WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid"); - appendPQExpBuffer(&querybuf, - " ) foo " - ") " + appendPQExpBufferStr(&querybuf, + " ) foo " + ") " /* now look for stored columns of any such type */ - "SELECT n.nspname, c.relname, a.attname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n, " - " pg_catalog.pg_attribute a " - "WHERE c.oid = a.attrelid AND " - " NOT a.attisdropped AND " - " a.atttypid IN (SELECT oid FROM oids) AND " - " c.relkind IN (" - CppAsString2(RELKIND_RELATION) ", " - CppAsString2(RELKIND_MATVIEW) ", " - CppAsString2(RELKIND_INDEX) ") AND " - " c.relnamespace = n.oid AND " + "SELECT n.nspname, c.relname, a.attname " + "FROM pg_catalog.pg_class c, " + " pg_catalog.pg_namespace n, " + " pg_catalog.pg_attribute a " + "WHERE c.oid = a.attrelid AND " + " NOT a.attisdropped AND " + " a.atttypid IN (SELECT oid FROM oids) AND " + " c.relkind IN (" + CppAsString2(RELKIND_RELATION) ", " + CppAsString2(RELKIND_MATVIEW) ", " + CppAsString2(RELKIND_INDEX) ") AND " + " c.relnamespace = n.oid AND " /* exclude possible orphaned temp tables */ - " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " + " n.nspname !~ '^pg_temp_' AND " + " n.nspname !~ '^pg_toast_temp_' AND " /* exclude system catalogs, too */ - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); res = executeQueryOrDie(conn, "%s", querybuf.data); diff --git a/src/bin/pg_verifybackup/parse_manifest.c b/src/bin/pg_verifybackup/parse_manifest.c index faee423c7ece5..608e23538bad2 100644 --- a/src/bin/pg_verifybackup/parse_manifest.c +++ b/src/bin/pg_verifybackup/parse_manifest.c @@ -325,7 +325,7 @@ json_manifest_object_field_start(void *state, char *fname, bool isnull) /* It's not a field we recognize. */ json_manifest_parse_failure(parse->context, - "unknown toplevel field"); + "unrecognized top-level field"); break; case JM_EXPECT_THIS_FILE_FIELD: @@ -358,7 +358,7 @@ json_manifest_object_field_start(void *state, char *fname, bool isnull) parse->wal_range_field = JMWRF_END_LSN; else json_manifest_parse_failure(parse->context, - "unexpected wal range field"); + "unexpected WAL range field"); parse->state = JM_EXPECT_THIS_WAL_RANGE_VALUE; break; @@ -469,10 +469,10 @@ json_manifest_finalize_file(JsonManifestParseState *parse) /* Pathname and size are required. */ if (parse->pathname == NULL && parse->encoded_pathname == NULL) - json_manifest_parse_failure(parse->context, "missing pathname"); + json_manifest_parse_failure(parse->context, "missing path name"); if (parse->pathname != NULL && parse->encoded_pathname != NULL) json_manifest_parse_failure(parse->context, - "both pathname and encoded pathname"); + "both path name and encoded path name"); if (parse->size == NULL) json_manifest_parse_failure(parse->context, "missing size"); if (parse->algorithm == NULL && parse->checksum != NULL) @@ -491,7 +491,7 @@ json_manifest_finalize_file(JsonManifestParseState *parse) parse->encoded_pathname, raw_length)) json_manifest_parse_failure(parse->context, - "unable to decode filename"); + "could not decode file name"); parse->pathname[raw_length] = '\0'; pfree(parse->encoded_pathname); parse->encoded_pathname = NULL; @@ -582,10 +582,10 @@ json_manifest_finalize_wal_range(JsonManifestParseState *parse) "timeline is not an integer"); if (!parse_xlogrecptr(&start_lsn, parse->start_lsn)) json_manifest_parse_failure(parse->context, - "unable to parse start LSN"); + "could not parse start LSN"); if (!parse_xlogrecptr(&end_lsn, parse->end_lsn)) json_manifest_parse_failure(parse->context, - "unable to parse end LSN"); + "could not parse end LSN"); /* Invoke the callback with the details we've gathered. */ context->perwalrange_cb(context, tli, start_lsn, end_lsn); diff --git a/src/bin/pg_verifybackup/pg_verifybackup.c b/src/bin/pg_verifybackup/pg_verifybackup.c index 70b6ffdec00b5..bb3733b57e204 100644 --- a/src/bin/pg_verifybackup/pg_verifybackup.c +++ b/src/bin/pg_verifybackup/pg_verifybackup.c @@ -411,8 +411,8 @@ parse_manifest_file(char *manifest_path, manifest_files_hash **ht_p, report_fatal_error("could not read file \"%s\": %m", manifest_path); else - report_fatal_error("could not read file \"%s\": read %d of %zu", - manifest_path, rc, (size_t) statbuf.st_size); + report_fatal_error("could not read file \"%s\": read %d of %lld", + manifest_path, rc, (long long int) statbuf.st_size); } /* Close the manifest file. */ @@ -471,7 +471,7 @@ record_manifest_details_for_file(JsonManifestParseContext *context, /* Make a new entry in the hash table for this file. */ m = manifest_files_insert(ht, pathname, &found); if (found) - report_fatal_error("duplicate pathname in backup manifest: \"%s\"", + report_fatal_error("duplicate path name in backup manifest: \"%s\"", pathname); /* Initialize the entry. */ @@ -638,8 +638,8 @@ verify_backup_file(verifier_context *context, char *relpath, char *fullpath) if (m->size != sb.st_size) { report_backup_error(context, - "\"%s\" has size %zu on disk but size %zu in the manifest", - relpath, (size_t) sb.st_size, m->size); + "\"%s\" has size %lld on disk but size %zu in the manifest", + relpath, (long long int) sb.st_size, m->size); m->bad = true; } diff --git a/src/bin/pg_verifybackup/t/005_bad_manifest.pl b/src/bin/pg_verifybackup/t/005_bad_manifest.pl index afd64d1a96b00..5bd5556038c3d 100644 --- a/src/bin/pg_verifybackup/t/005_bad_manifest.pl +++ b/src/bin/pg_verifybackup/t/005_bad_manifest.pl @@ -38,7 +38,7 @@ {"PostgreSQL-Backup-Manifest-Version": 1, "Files": true} EOM -test_parse_error('unknown toplevel field', < 0); fprintf(stderr, "creating %d partitions...\n", partitions); + initPQExpBuffer(&query); + for (int p = 1; p <= partitions; p++) { - char query[256]; - if (partition_method == PART_RANGE) { int64 part_size = (naccounts * (int64) scale + partitions - 1) / partitions; - char minvalue[32], - maxvalue[32]; + + printfPQExpBuffer(&query, + "create%s table pgbench_accounts_%d\n" + " partition of pgbench_accounts\n" + " for values from (", + unlogged_tables ? " unlogged" : "", p); /* * For RANGE, we use open-ended partitions at the beginning and @@ -3662,34 +3657,39 @@ createPartitions(PGconn *con) * scale, it is more generic and the performance is better. */ if (p == 1) - sprintf(minvalue, "minvalue"); + appendPQExpBufferStr(&query, "minvalue"); else - sprintf(minvalue, INT64_FORMAT, (p - 1) * part_size + 1); + appendPQExpBuffer(&query, INT64_FORMAT, (p - 1) * part_size + 1); + + appendPQExpBufferStr(&query, ") to ("); if (p < partitions) - sprintf(maxvalue, INT64_FORMAT, p * part_size + 1); + appendPQExpBuffer(&query, INT64_FORMAT, p * part_size + 1); else - sprintf(maxvalue, "maxvalue"); - - snprintf(query, sizeof(query), - "create%s table pgbench_accounts_%d\n" - " partition of pgbench_accounts\n" - " for values from (%s) to (%s)%s\n", - unlogged_tables ? " unlogged" : "", p, - minvalue, maxvalue, ff); + appendPQExpBufferStr(&query, "maxvalue"); + + appendPQExpBufferChar(&query, ')'); } else if (partition_method == PART_HASH) - snprintf(query, sizeof(query), - "create%s table pgbench_accounts_%d\n" - " partition of pgbench_accounts\n" - " for values with (modulus %d, remainder %d)%s\n", - unlogged_tables ? " unlogged" : "", p, - partitions, p - 1, ff); + printfPQExpBuffer(&query, + "create%s table pgbench_accounts_%d\n" + " partition of pgbench_accounts\n" + " for values with (modulus %d, remainder %d)", + unlogged_tables ? " unlogged" : "", p, + partitions, p - 1); else /* cannot get there */ Assert(0); - executeStatement(con, query); + /* + * Per ddlinfo in initCreateTables, fillfactor is needed on table + * pgbench_accounts. + */ + appendPQExpBuffer(&query, " with (fillfactor=%d)", fillfactor); + + executeStatement(con, query.data); } + + termPQExpBuffer(&query); } /* @@ -3743,63 +3743,50 @@ initCreateTables(PGconn *con) } }; int i; + PQExpBufferData query; fprintf(stderr, "creating tables...\n"); + initPQExpBuffer(&query); + for (i = 0; i < lengthof(DDLs); i++) { - char opts[256]; - char buffer[256]; const struct ddlinfo *ddl = &DDLs[i]; - const char *cols; /* Construct new create table statement. */ - opts[0] = '\0'; + printfPQExpBuffer(&query, "create%s table %s(%s)", + unlogged_tables ? " unlogged" : "", + ddl->table, + (scale >= SCALE_32BIT_THRESHOLD) ? ddl->bigcols : ddl->smcols); /* Partition pgbench_accounts table */ if (partition_method != PART_NONE && strcmp(ddl->table, "pgbench_accounts") == 0) - snprintf(opts + strlen(opts), sizeof(opts) - strlen(opts), - " partition by %s (aid)", PARTITION_METHOD[partition_method]); + appendPQExpBuffer(&query, + " partition by %s (aid)", PARTITION_METHOD[partition_method]); else if (ddl->declare_fillfactor) + { /* fillfactor is only expected on actual tables */ - append_fillfactor(opts, sizeof(opts)); + appendPQExpBuffer(&query, " with (fillfactor=%d)", fillfactor); + } if (tablespace != NULL) { char *escape_tablespace; - escape_tablespace = PQescapeIdentifier(con, tablespace, - strlen(tablespace)); - snprintf(opts + strlen(opts), sizeof(opts) - strlen(opts), - " tablespace %s", escape_tablespace); + escape_tablespace = PQescapeIdentifier(con, tablespace, strlen(tablespace)); + appendPQExpBuffer(&query, " tablespace %s", escape_tablespace); PQfreemem(escape_tablespace); } - cols = (scale >= SCALE_32BIT_THRESHOLD) ? ddl->bigcols : ddl->smcols; - - snprintf(buffer, sizeof(buffer), "create%s table %s(%s)%s", - unlogged_tables ? " unlogged" : "", - ddl->table, cols, opts); - - executeStatement(con, buffer); + executeStatement(con, query.data); } + termPQExpBuffer(&query); + if (partition_method != PART_NONE) createPartitions(con); } -/* - * add fillfactor percent option. - * - * XXX - As default is 100, it could be removed in this case. - */ -static void -append_fillfactor(char *opts, int len) -{ - snprintf(opts + strlen(opts), len - strlen(opts), - " with (fillfactor=%d)", fillfactor); -} - /* * Truncate away any old data, in one command in case there are foreign keys */ @@ -3819,7 +3806,7 @@ initTruncateTables(PGconn *con) static void initGenerateDataClientSide(PGconn *con) { - char sql[256]; + PQExpBufferData sql; PGresult *res; int i; int64 k; @@ -3845,6 +3832,8 @@ initGenerateDataClientSide(PGconn *con) /* truncate away any old data */ initTruncateTables(con); + initPQExpBuffer(&sql); + /* * fill branches, tellers, accounts in that order in case foreign keys * already exist @@ -3852,19 +3841,19 @@ initGenerateDataClientSide(PGconn *con) for (i = 0; i < nbranches * scale; i++) { /* "filler" column defaults to NULL */ - snprintf(sql, sizeof(sql), - "insert into pgbench_branches(bid,bbalance) values(%d,0)", - i + 1); - executeStatement(con, sql); + printfPQExpBuffer(&sql, + "insert into pgbench_branches(bid,bbalance) values(%d,0)", + i + 1); + executeStatement(con, sql.data); } for (i = 0; i < ntellers * scale; i++) { /* "filler" column defaults to NULL */ - snprintf(sql, sizeof(sql), - "insert into pgbench_tellers(tid,bid,tbalance) values (%d,%d,0)", - i + 1, i / ntellers + 1); - executeStatement(con, sql); + printfPQExpBuffer(&sql, + "insert into pgbench_tellers(tid,bid,tbalance) values (%d,%d,0)", + i + 1, i / ntellers + 1); + executeStatement(con, sql.data); } /* @@ -3885,10 +3874,10 @@ initGenerateDataClientSide(PGconn *con) int64 j = k + 1; /* "filler" column defaults to blank padded empty string */ - snprintf(sql, sizeof(sql), - INT64_FORMAT "\t" INT64_FORMAT "\t%d\t\n", - j, k / naccounts + 1, 0); - if (PQputline(con, sql)) + printfPQExpBuffer(&sql, + INT64_FORMAT "\t" INT64_FORMAT "\t%d\t\n", + j, k / naccounts + 1, 0); + if (PQputline(con, sql.data)) { pg_log_fatal("PQputline failed"); exit(1); @@ -3950,6 +3939,8 @@ initGenerateDataClientSide(PGconn *con) exit(1); } + termPQExpBuffer(&sql); + executeStatement(con, "commit"); } @@ -3963,7 +3954,7 @@ initGenerateDataClientSide(PGconn *con) static void initGenerateDataServerSide(PGconn *con) { - char sql[256]; + PQExpBufferData sql; fprintf(stderr, "generating data (server-side)...\n"); @@ -3976,24 +3967,28 @@ initGenerateDataServerSide(PGconn *con) /* truncate away any old data */ initTruncateTables(con); - snprintf(sql, sizeof(sql), - "insert into pgbench_branches(bid,bbalance) " - "select bid, 0 " - "from generate_series(1, %d) as bid", nbranches * scale); - executeStatement(con, sql); - - snprintf(sql, sizeof(sql), - "insert into pgbench_tellers(tid,bid,tbalance) " - "select tid, (tid - 1) / %d + 1, 0 " - "from generate_series(1, %d) as tid", ntellers, ntellers * scale); - executeStatement(con, sql); - - snprintf(sql, sizeof(sql), - "insert into pgbench_accounts(aid,bid,abalance,filler) " - "select aid, (aid - 1) / %d + 1, 0, '' " - "from generate_series(1, " INT64_FORMAT ") as aid", - naccounts, (int64) naccounts * scale); - executeStatement(con, sql); + initPQExpBuffer(&sql); + + printfPQExpBuffer(&sql, + "insert into pgbench_branches(bid,bbalance) " + "select bid, 0 " + "from generate_series(1, %d) as bid", nbranches * scale); + executeStatement(con, sql.data); + + printfPQExpBuffer(&sql, + "insert into pgbench_tellers(tid,bid,tbalance) " + "select tid, (tid - 1) / %d + 1, 0 " + "from generate_series(1, %d) as tid", ntellers, ntellers * scale); + executeStatement(con, sql.data); + + printfPQExpBuffer(&sql, + "insert into pgbench_accounts(aid,bid,abalance,filler) " + "select aid, (aid - 1) / %d + 1, 0, '' " + "from generate_series(1, " INT64_FORMAT ") as aid", + naccounts, (int64) naccounts * scale); + executeStatement(con, sql.data); + + termPQExpBuffer(&sql); executeStatement(con, "commit"); } @@ -4023,13 +4018,15 @@ initCreatePKeys(PGconn *con) "alter table pgbench_accounts add primary key (aid)" }; int i; + PQExpBufferData query; fprintf(stderr, "creating primary keys...\n"); + initPQExpBuffer(&query); + for (i = 0; i < lengthof(DDLINDEXes); i++) { - char buffer[256]; - - strlcpy(buffer, DDLINDEXes[i], sizeof(buffer)); + resetPQExpBuffer(&query); + appendPQExpBufferStr(&query, DDLINDEXes[i]); if (index_tablespace != NULL) { @@ -4037,13 +4034,14 @@ initCreatePKeys(PGconn *con) escape_tablespace = PQescapeIdentifier(con, index_tablespace, strlen(index_tablespace)); - snprintf(buffer + strlen(buffer), sizeof(buffer) - strlen(buffer), - " using index tablespace %s", escape_tablespace); + appendPQExpBuffer(&query, " using index tablespace %s", escape_tablespace); PQfreemem(escape_tablespace); } - executeStatement(con, buffer); + executeStatement(con, query.data); } + + termPQExpBuffer(&query); } /* @@ -5522,7 +5520,7 @@ main(int argc, char **argv) pgport = pg_strdup(optarg); break; case 'd': - pg_logging_set_level(PG_LOG_DEBUG); + pg_logging_increase_verbosity(); break; case 'c': benchmarking_option_set = true; diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl index 52009c3524291..61b671d54fd66 100644 --- a/src/bin/pgbench/t/001_pgbench_with_server.pl +++ b/src/bin/pgbench/t/001_pgbench_with_server.pl @@ -287,7 +287,7 @@ sub pgbench [], [ qr{ERROR: invalid input syntax for type json}, - qr{(?!extended query with parameters)} + qr{(?!unnamed portal with parameters)} ], 'server parameter logging', { @@ -314,7 +314,7 @@ sub pgbench [], [ qr{ERROR: division by zero}, - qr{CONTEXT: extended query with parameters: \$1 = '1', \$2 = NULL} + qr{CONTEXT: unnamed portal with parameters: \$1 = '1', \$2 = NULL} ], 'server parameter logging', { @@ -328,7 +328,7 @@ sub pgbench [], [ qr{ERROR: invalid input syntax for type json}, - qr[CONTEXT: JSON data, line 1: \{ invalid\.\.\.[\r\n]+extended query with parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que \.\.\.']m + qr[CONTEXT: JSON data, line 1: \{ invalid\.\.\.[\r\n]+unnamed portal with parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que \.\.\.']m ], 'server parameter logging', { @@ -356,7 +356,7 @@ sub pgbench [], [ qr{ERROR: division by zero}, - qr{CONTEXT: extended query with parameters: \$1 = '1', \$2 = NULL} + qr{CONTEXT: unnamed portal with parameters: \$1 = '1', \$2 = NULL} ], 'server parameter logging', { @@ -373,7 +373,7 @@ sub pgbench [], [ qr{ERROR: invalid input syntax for type json}, - qr[CONTEXT: JSON data, line 1: \{ invalid\.\.\.[\r\n]+extended query with parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?']m + qr[CONTEXT: JSON data, line 1: \{ invalid\.\.\.[\r\n]+unnamed portal with parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?']m ], 'server parameter logging', { diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index d4aa0976b5bf2..c7a83d5dfc592 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -3011,34 +3011,13 @@ param_is_newly_set(const char *old_val, const char *new_val) return false; } -/* return whether the connection has 'hostaddr' in its conninfo */ -static bool -has_hostaddr(PGconn *conn) -{ - bool used = false; - PQconninfoOption *ciopt = PQconninfo(conn); - - for (PQconninfoOption *p = ciopt; p->keyword != NULL; p++) - { - if (strcmp(p->keyword, "hostaddr") == 0 && p->val != NULL) - { - used = true; - break; - } - } - - PQconninfoFree(ciopt); - return used; -} - /* * do_connect -- handler for \connect * - * Connects to a database with given parameters. Absent an established - * connection, all parameters are required. Given -reuse-previous=off or a - * connection string without -reuse-previous=on, NULL values will pass through - * to PQconnectdbParams(), so the libpq defaults will be used. Otherwise, NULL - * values will be replaced with the ones in the current connection. + * Connects to a database with given parameters. If we are told to re-use + * parameters, parameters from the previous connection are used where the + * command's own options do not supply a value. Otherwise, libpq defaults + * are used. * * In interactive mode, if connection fails with the given parameters, * the old connection will be kept. @@ -3048,28 +3027,26 @@ do_connect(enum trivalue reuse_previous_specification, char *dbname, char *user, char *host, char *port) { PGconn *o_conn = pset.db, - *n_conn; + *n_conn = NULL; + PQconninfoOption *cinfo; + int nconnopts = 0; + bool same_host = false; char *password = NULL; - char *hostaddr = NULL; - bool keep_password; + bool success = true; + bool keep_password = true; bool has_connection_string; bool reuse_previous; - PQExpBufferData connstr; - if (!o_conn && (!dbname || !user || !host || !port)) + has_connection_string = dbname ? + recognized_connection_string(dbname) : false; + + /* Complain if we have additional arguments after a connection string. */ + if (has_connection_string && (user || host || port)) { - /* - * We don't know the supplied connection parameters and don't want to - * connect to the wrong database by using defaults, so require all - * parameters to be specified. - */ - pg_log_error("All connection parameters must be supplied because no " - "database connection exists"); + pg_log_error("Do not give user, host, or port separately when using a connection string"); return false; } - has_connection_string = dbname ? - recognized_connection_string(dbname) : false; switch (reuse_previous_specification) { case TRI_YES: @@ -3083,68 +3060,164 @@ do_connect(enum trivalue reuse_previous_specification, break; } - /* If the old connection does not exist, there is nothing to reuse. */ - if (!o_conn) - reuse_previous = false; - - /* Silently ignore arguments subsequent to a connection string. */ - if (has_connection_string) - { - user = NULL; - host = NULL; - port = NULL; - } - /* - * Grab missing values from the old connection. If we grab host (or host - * is the same as before) and hostaddr was set, grab that too. + * If we intend to re-use connection parameters, collect them out of the + * old connection, then replace individual values as necessary. (We may + * need to resort to looking at pset.dead_conn, if the connection died + * previously.) Otherwise, obtain a PQconninfoOption array containing + * libpq's defaults, and modify that. Note this function assumes that + * PQconninfo, PQconndefaults, and PQconninfoParse will all produce arrays + * containing the same options in the same order. */ if (reuse_previous) { - if (!user) - user = PQuser(o_conn); - if (host && strcmp(host, PQhost(o_conn)) == 0 && - has_hostaddr(o_conn)) - { - hostaddr = PQhostaddr(o_conn); - } - if (!host) + if (o_conn) + cinfo = PQconninfo(o_conn); + else if (pset.dead_conn) + cinfo = PQconninfo(pset.dead_conn); + else { - host = PQhost(o_conn); - if (has_hostaddr(o_conn)) - hostaddr = PQhostaddr(o_conn); + /* This is reachable after a non-interactive \connect failure */ + pg_log_error("No database connection exists to re-use parameters from"); + return false; } - if (!port) - port = PQport(o_conn); } - - /* - * Any change in the parameters read above makes us discard the password. - * We also discard it if we're to use a conninfo rather than the - * positional syntax. - */ - if (has_connection_string) - keep_password = false; else - keep_password = - (user && PQuser(o_conn) && strcmp(user, PQuser(o_conn)) == 0) && - (host && PQhost(o_conn) && strcmp(host, PQhost(o_conn)) == 0) && - (port && PQport(o_conn) && strcmp(port, PQport(o_conn)) == 0); + cinfo = PQconndefaults(); - /* - * Grab missing dbname from old connection. No password discard if this - * changes: passwords aren't (usually) database-specific. - */ - if (!dbname && reuse_previous) + if (cinfo) { - initPQExpBuffer(&connstr); - appendPQExpBufferStr(&connstr, "dbname="); - appendConnStrVal(&connstr, PQdb(o_conn)); - dbname = connstr.data; - /* has_connection_string=true would be a dead store */ + if (has_connection_string) + { + /* Parse the connstring and insert values into cinfo */ + PQconninfoOption *replcinfo; + char *errmsg; + + replcinfo = PQconninfoParse(dbname, &errmsg); + if (replcinfo) + { + PQconninfoOption *ci; + PQconninfoOption *replci; + bool have_password = false; + + for (ci = cinfo, replci = replcinfo; + ci->keyword && replci->keyword; + ci++, replci++) + { + Assert(strcmp(ci->keyword, replci->keyword) == 0); + /* Insert value from connstring if one was provided */ + if (replci->val) + { + /* + * We know that both val strings were allocated by + * libpq, so the least messy way to avoid memory leaks + * is to swap them. + */ + char *swap = replci->val; + + replci->val = ci->val; + ci->val = swap; + + /* + * Check whether connstring provides options affecting + * password re-use. While any change in user, host, + * hostaddr, or port causes us to ignore the old + * connection's password, we don't force that for + * dbname, since passwords aren't database-specific. + */ + if (replci->val == NULL || + strcmp(ci->val, replci->val) != 0) + { + if (strcmp(replci->keyword, "user") == 0 || + strcmp(replci->keyword, "host") == 0 || + strcmp(replci->keyword, "hostaddr") == 0 || + strcmp(replci->keyword, "port") == 0) + keep_password = false; + } + /* Also note whether connstring contains a password. */ + if (strcmp(replci->keyword, "password") == 0) + have_password = true; + } + } + Assert(ci->keyword == NULL && replci->keyword == NULL); + + /* While here, determine how many option slots there are */ + nconnopts = ci - cinfo; + + PQconninfoFree(replcinfo); + + /* + * If the connstring contains a password, tell the loop below + * that we may use it, regardless of other settings (i.e., + * cinfo's password is no longer an "old" password). + */ + if (have_password) + keep_password = true; + + /* Don't let code below try to inject dbname into params. */ + dbname = NULL; + } + else + { + /* PQconninfoParse failed */ + if (errmsg) + { + pg_log_error("%s", errmsg); + PQfreemem(errmsg); + } + else + pg_log_error("out of memory"); + success = false; + } + } + else + { + /* + * If dbname isn't a connection string, then we'll inject it and + * the other parameters into the keyword array below. (We can't + * easily insert them into the cinfo array because of memory + * management issues: PQconninfoFree would misbehave on Windows.) + * However, to avoid dependencies on the order in which parameters + * appear in the array, make a preliminary scan to set + * keep_password and same_host correctly. + * + * While any change in user, host, or port causes us to ignore the + * old connection's password, we don't force that for dbname, + * since passwords aren't database-specific. + */ + PQconninfoOption *ci; + + for (ci = cinfo; ci->keyword; ci++) + { + if (user && strcmp(ci->keyword, "user") == 0) + { + if (!(ci->val && strcmp(user, ci->val) == 0)) + keep_password = false; + } + else if (host && strcmp(ci->keyword, "host") == 0) + { + if (ci->val && strcmp(host, ci->val) == 0) + same_host = true; + else + keep_password = false; + } + else if (port && strcmp(ci->keyword, "port") == 0) + { + if (!(ci->val && strcmp(port, ci->val) == 0)) + keep_password = false; + } + } + + /* While here, determine how many option slots there are */ + nconnopts = ci - cinfo; + } } else - connstr.data = NULL; + { + /* We failed to create the cinfo structure */ + pg_log_error("out of memory"); + success = false; + } /* * If the user asked to be prompted for a password, ask for one now. If @@ -3156,77 +3229,74 @@ do_connect(enum trivalue reuse_previous_specification, * the postmaster's log. But libpq offers no API that would let us obtain * a password and then continue with the first connection attempt. */ - if (pset.getPassword == TRI_YES) + if (pset.getPassword == TRI_YES && success) { /* - * If a connstring or URI is provided, we can't be sure we know which - * username will be used, since we haven't parsed that argument yet. + * If a connstring or URI is provided, we don't know which username + * will be used, since we haven't dug that out of the connstring. * Don't risk issuing a misleading prompt. As in startup.c, it does - * not seem worth working harder, since this getPassword option is + * not seem worth working harder, since this getPassword setting is * normally only used in noninteractive cases. */ password = prompt_for_password(has_connection_string ? NULL : user); } - else if (o_conn && keep_password) - { - password = PQpass(o_conn); - if (password && *password) - password = pg_strdup(password); - else - password = NULL; - } - while (true) + /* Loop till we have a connection or fail, which we might've already */ + while (success) { -#define PARAMS_ARRAY_SIZE 9 - const char **keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); - const char **values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); - int paramnum = -1; - - keywords[++paramnum] = "host"; - values[paramnum] = host; - if (hostaddr && *hostaddr) - { - keywords[++paramnum] = "hostaddr"; - values[paramnum] = hostaddr; - } - keywords[++paramnum] = "port"; - values[paramnum] = port; - keywords[++paramnum] = "user"; - values[paramnum] = user; + const char **keywords = pg_malloc((nconnopts + 1) * sizeof(*keywords)); + const char **values = pg_malloc((nconnopts + 1) * sizeof(*values)); + int paramnum = 0; + PQconninfoOption *ci; /* - * Position in the array matters when the dbname is a connection - * string, because settings in a connection string override earlier - * array entries only. Thus, user= in the connection string always - * takes effect, but client_encoding= often will not. + * Copy non-default settings into the PQconnectdbParams parameter + * arrays; but inject any values specified old-style, as well as any + * interactively-obtained password, and a couple of fields we want to + * set forcibly. * - * If you change this code, also change the initial-connection code in - * main(). For no good reason, a connection string password= takes - * precedence in main() but not here. + * If you change this code, see also the initial-connection code in + * main(). */ - keywords[++paramnum] = "dbname"; - values[paramnum] = dbname; - keywords[++paramnum] = "password"; - values[paramnum] = password; - keywords[++paramnum] = "fallback_application_name"; - values[paramnum] = pset.progname; - keywords[++paramnum] = "client_encoding"; - values[paramnum] = (pset.notty || getenv("PGCLIENTENCODING")) ? NULL : "auto"; - + for (ci = cinfo; ci->keyword; ci++) + { + keywords[paramnum] = ci->keyword; + + if (dbname && strcmp(ci->keyword, "dbname") == 0) + values[paramnum++] = dbname; + else if (user && strcmp(ci->keyword, "user") == 0) + values[paramnum++] = user; + else if (host && strcmp(ci->keyword, "host") == 0) + values[paramnum++] = host; + else if (host && !same_host && strcmp(ci->keyword, "hostaddr") == 0) + { + /* If we're changing the host value, drop any old hostaddr */ + values[paramnum++] = NULL; + } + else if (port && strcmp(ci->keyword, "port") == 0) + values[paramnum++] = port; + /* If !keep_password, we unconditionally drop old password */ + else if ((password || !keep_password) && + strcmp(ci->keyword, "password") == 0) + values[paramnum++] = password; + else if (strcmp(ci->keyword, "fallback_application_name") == 0) + values[paramnum++] = pset.progname; + else if (strcmp(ci->keyword, "client_encoding") == 0) + values[paramnum++] = (pset.notty || getenv("PGCLIENTENCODING")) ? NULL : "auto"; + else if (ci->val) + values[paramnum++] = ci->val; + /* else, don't bother making libpq parse this keyword */ + } /* add array terminator */ - keywords[++paramnum] = NULL; + keywords[paramnum] = NULL; values[paramnum] = NULL; - n_conn = PQconnectdbParams(keywords, values, true); + /* Note we do not want libpq to re-expand the dbname parameter */ + n_conn = PQconnectdbParams(keywords, values, false); pg_free(keywords); pg_free(values); - /* We can immediately discard the password -- no longer needed */ - if (password) - pg_free(password); - if (PQstatus(n_conn) == CONNECTION_OK) break; @@ -3242,9 +3312,28 @@ do_connect(enum trivalue reuse_previous_specification, */ password = prompt_for_password(PQuser(n_conn)); PQfinish(n_conn); + n_conn = NULL; continue; } + /* + * We'll report the error below ... unless n_conn is NULL, indicating + * that libpq didn't have enough memory to make a PGconn. + */ + if (n_conn == NULL) + pg_log_error("out of memory"); + + success = false; + } /* end retry loop */ + + /* Release locally allocated data, whether we succeeded or not */ + if (password) + pg_free(password); + if (cinfo) + PQconninfoFree(cinfo); + + if (!success) + { /* * Failed to connect to the database. In interactive mode, keep the * previous connection to the DB; in scripting mode, close our @@ -3252,7 +3341,11 @@ do_connect(enum trivalue reuse_previous_specification, */ if (pset.cur_cmd_interactive) { - pg_log_info("%s", PQerrorMessage(n_conn)); + if (n_conn) + { + pg_log_info("%s", PQerrorMessage(n_conn)); + PQfinish(n_conn); + } /* pset.db is left unmodified */ if (o_conn) @@ -3260,27 +3353,39 @@ do_connect(enum trivalue reuse_previous_specification, } else { - pg_log_error("\\connect: %s", PQerrorMessage(n_conn)); + if (n_conn) + { + pg_log_error("\\connect: %s", PQerrorMessage(n_conn)); + PQfinish(n_conn); + } + if (o_conn) { /* - * Transition to having no connection. Keep this bit in sync - * with CheckConnection(). + * Transition to having no connection. + * + * Unlike CheckConnection(), we close the old connection + * immediately to prevent its parameters from being re-used. + * This is so that a script cannot accidentally reuse + * parameters it did not expect to. Otherwise, the state + * cleanup should be the same as in CheckConnection(). */ PQfinish(o_conn); pset.db = NULL; ResetCancelConn(); UnsyncVariables(); } + + /* On the same reasoning, release any dead_conn to prevent reuse */ + if (pset.dead_conn) + { + PQfinish(pset.dead_conn); + pset.dead_conn = NULL; + } } - PQfinish(n_conn); - if (connstr.data) - termPQExpBuffer(&connstr); return false; } - if (connstr.data) - termPQExpBuffer(&connstr); /* * Replace the old connection with the new one, and update @@ -3330,8 +3435,15 @@ do_connect(enum trivalue reuse_previous_specification, PQdb(pset.db), PQuser(pset.db)); } + /* Drop no-longer-needed connection(s) */ if (o_conn) PQfinish(o_conn); + if (pset.dead_conn) + { + PQfinish(pset.dead_conn); + pset.dead_conn = NULL; + } + return true; } diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index 6323a35c91cac..ff673665d869e 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -313,10 +313,14 @@ CheckConnection(void) fprintf(stderr, _("Failed.\n")); /* - * Transition to having no connection. Keep this bit in sync with - * do_connect(). + * Transition to having no connection; but stash away the failed + * connection so that we can still refer to its parameters in a + * later \connect attempt. Keep the state cleanup here in sync + * with do_connect(). */ - PQfinish(pset.db); + if (pset.dead_conn) + PQfinish(pset.dead_conn); + pset.dead_conn = pset.db; pset.db = NULL; ResetCancelConn(); UnsyncVariables(); diff --git a/src/bin/psql/create_help.pl b/src/bin/psql/create_help.pl index ee82e645832e0..60e093bad4902 100644 --- a/src/bin/psql/create_help.pl +++ b/src/bin/psql/create_help.pl @@ -63,11 +63,12 @@ struct _helpStruct { - const char *cmd; /* the command name */ - const char *help; /* the help associated with it */ - const char *docbook_id; /* DocBook XML id (for generating URL) */ - void (*syntaxfunc)(PQExpBuffer); /* function that prints the syntax associated with it */ - int nl_count; /* number of newlines in syntax (for pager) */ + const char *cmd; /* the command name */ + const char *help; /* the help associated with it */ + const char *docbook_id; /* DocBook XML id (for generating URL) */ + void (*syntaxfunc) (PQExpBuffer); /* function that prints the + * syntax associated with it */ + int nl_count; /* number of newlines in syntax (for pager) */ }; extern const struct _helpStruct QL_HELP[]; @@ -190,17 +191,17 @@ { my $id = $_; $id =~ s/ /_/g; - print $cfile_handle " { \"$_\", - N_(\"$entries{$_}{cmddesc}\"), - \"$entries{$_}{cmdid}\", - sql_help_$id, - $entries{$_}{nl_count} }, + print $cfile_handle "\t{\"$_\", +\t\tN_(\"$entries{$_}{cmddesc}\"), +\t\t\"$entries{$_}{cmdid}\", +\t\tsql_help_$id, +\t$entries{$_}{nl_count}}, "; } print $cfile_handle " - { NULL, NULL, NULL } /* End of list marker */ +\t{NULL, NULL, NULL}\t\t\t/* End of list marker */ }; "; @@ -210,7 +211,7 @@ #define QL_MAX_CMD_LEN $maxlen /* largest strlen(cmd) */ -#endif /* $define */ +#endif /* $define */ "; close $cfile_handle; diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 0861d74a6fe0c..07d640021c276 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -799,6 +799,10 @@ describeOperators(const char *pattern, bool verbose, bool showSystem) * anyway, for now, because (1) third-party modules may still be following * the old convention, and (2) we'd need to do it anyway when talking to a * pre-9.1 server. + * + * The support for postfix operators in this query is dead code as of + * Postgres 14, but we need to keep it for as long as we support talking + * to pre-v14 servers. */ printfPQExpBuffer(&buf, @@ -2683,8 +2687,13 @@ describeOneTableDetails(const char *schemaname, " a.attnum = s.attnum AND NOT attisdropped)) AS columns,\n" " 'd' = any(stxkind) AS ndist_enabled,\n" " 'f' = any(stxkind) AS deps_enabled,\n" - " 'm' = any(stxkind) AS mcv_enabled\n" - "FROM pg_catalog.pg_statistic_ext stat " + " 'm' = any(stxkind) AS mcv_enabled,\n"); + + if (pset.sversion >= 130000) + appendPQExpBufferStr(&buf, " stxstattarget\n"); + else + appendPQExpBufferStr(&buf, " -1 AS stxstattarget\n"); + appendPQExpBuffer(&buf, "FROM pg_catalog.pg_statistic_ext stat\n" "WHERE stxrelid = '%s'\n" "ORDER BY 1;", oid); @@ -2732,6 +2741,11 @@ describeOneTableDetails(const char *schemaname, PQgetvalue(result, i, 4), PQgetvalue(result, i, 1)); + /* Show the stats target if it's not default */ + if (strcmp(PQgetvalue(result, i, 8), "-1") != 0) + appendPQExpBuffer(&buf, "; STATISTICS %s", + PQgetvalue(result, i, 8)); + printTableAddFooter(&cont, buf.data); } } @@ -6123,17 +6137,16 @@ listOperatorClasses(const char *access_method_pattern, " pg_catalog.pg_get_userbyid(c.opcowner) AS \"%s\"\n", gettext_noop("Operator family"), gettext_noop("Owner")); - appendPQExpBuffer(&buf, - "\nFROM pg_catalog.pg_opclass c\n" - " LEFT JOIN pg_catalog.pg_am am on am.oid = c.opcmethod\n" - " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.opcnamespace\n" - " LEFT JOIN pg_catalog.pg_type t ON t.oid = c.opcintype\n" - " LEFT JOIN pg_catalog.pg_namespace tn ON tn.oid = t.typnamespace\n" - ); + appendPQExpBufferStr(&buf, + "\nFROM pg_catalog.pg_opclass c\n" + " LEFT JOIN pg_catalog.pg_am am on am.oid = c.opcmethod\n" + " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.opcnamespace\n" + " LEFT JOIN pg_catalog.pg_type t ON t.oid = c.opcintype\n" + " LEFT JOIN pg_catalog.pg_namespace tn ON tn.oid = t.typnamespace\n"); if (verbose) - appendPQExpBuffer(&buf, - " LEFT JOIN pg_catalog.pg_opfamily of ON of.oid = c.opcfamily\n" - " LEFT JOIN pg_catalog.pg_namespace ofn ON ofn.oid = of.opfnamespace\n"); + appendPQExpBufferStr(&buf, + " LEFT JOIN pg_catalog.pg_opfamily of ON of.oid = c.opcfamily\n" + " LEFT JOIN pg_catalog.pg_namespace ofn ON ofn.oid = of.opfnamespace\n"); if (access_method_pattern) have_where = processSQLNamePattern(pset.db, &buf, access_method_pattern, @@ -6202,11 +6215,10 @@ listOperatorFamilies(const char *access_method_pattern, appendPQExpBuffer(&buf, ",\n pg_catalog.pg_get_userbyid(f.opfowner) AS \"%s\"\n", gettext_noop("Owner")); - appendPQExpBuffer(&buf, - "\nFROM pg_catalog.pg_opfamily f\n" - " LEFT JOIN pg_catalog.pg_am am on am.oid = f.opfmethod\n" - " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = f.opfnamespace\n" - ); + appendPQExpBufferStr(&buf, + "\nFROM pg_catalog.pg_opfamily f\n" + " LEFT JOIN pg_catalog.pg_am am on am.oid = f.opfmethod\n" + " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = f.opfnamespace\n"); if (access_method_pattern) have_where = processSQLNamePattern(pset.db, &buf, access_method_pattern, @@ -6226,7 +6238,7 @@ listOperatorFamilies(const char *access_method_pattern, "tn.nspname", "t.typname", "pg_catalog.format_type(t.oid, NULL)", "pg_catalog.pg_type_is_visible(t.oid)"); - appendPQExpBuffer(&buf, " )\n"); + appendPQExpBufferStr(&buf, " )\n"); } appendPQExpBufferStr(&buf, "ORDER BY 1, 2;"); @@ -6293,14 +6305,14 @@ listOpFamilyOperators(const char *access_method_pattern, appendPQExpBuffer(&buf, ", ofs.opfname AS \"%s\"\n", gettext_noop("Sort opfamily")); - appendPQExpBuffer(&buf, - "FROM pg_catalog.pg_amop o\n" - " LEFT JOIN pg_catalog.pg_opfamily of ON of.oid = o.amopfamily\n" - " LEFT JOIN pg_catalog.pg_am am ON am.oid = of.opfmethod AND am.oid = o.amopmethod\n" - " LEFT JOIN pg_catalog.pg_namespace nsf ON of.opfnamespace = nsf.oid\n"); + appendPQExpBufferStr(&buf, + "FROM pg_catalog.pg_amop o\n" + " LEFT JOIN pg_catalog.pg_opfamily of ON of.oid = o.amopfamily\n" + " LEFT JOIN pg_catalog.pg_am am ON am.oid = of.opfmethod AND am.oid = o.amopmethod\n" + " LEFT JOIN pg_catalog.pg_namespace nsf ON of.opfnamespace = nsf.oid\n"); if (verbose) - appendPQExpBuffer(&buf, - " LEFT JOIN pg_catalog.pg_opfamily ofs ON ofs.oid = o.amopsortfamily\n"); + appendPQExpBufferStr(&buf, + " LEFT JOIN pg_catalog.pg_opfamily ofs ON ofs.oid = o.amopsortfamily\n"); if (access_method_pattern) have_where = processSQLNamePattern(pset.db, &buf, access_method_pattern, @@ -6379,12 +6391,12 @@ listOpFamilyFunctions(const char *access_method_pattern, ", ap.amproc::pg_catalog.regprocedure AS \"%s\"\n", gettext_noop("Function")); - appendPQExpBuffer(&buf, - "FROM pg_catalog.pg_amproc ap\n" - " LEFT JOIN pg_catalog.pg_opfamily of ON of.oid = ap.amprocfamily\n" - " LEFT JOIN pg_catalog.pg_am am ON am.oid = of.opfmethod\n" - " LEFT JOIN pg_catalog.pg_namespace ns ON of.opfnamespace = ns.oid\n" - " LEFT JOIN pg_catalog.pg_proc p ON ap.amproc = p.oid\n"); + appendPQExpBufferStr(&buf, + "FROM pg_catalog.pg_amproc ap\n" + " LEFT JOIN pg_catalog.pg_opfamily of ON of.oid = ap.amprocfamily\n" + " LEFT JOIN pg_catalog.pg_am am ON am.oid = of.opfmethod\n" + " LEFT JOIN pg_catalog.pg_namespace ns ON of.opfnamespace = ns.oid\n" + " LEFT JOIN pg_catalog.pg_proc p ON ap.amproc = p.oid\n"); if (access_method_pattern) have_where = processSQLNamePattern(pset.db, &buf, access_method_pattern, diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h index 97941aa10c671..9601f6e90ce89 100644 --- a/src/bin/psql/settings.h +++ b/src/bin/psql/settings.h @@ -117,6 +117,13 @@ typedef struct _psqlSettings VariableSpace vars; /* "shell variable" repository */ + /* + * If we get a connection failure, the now-unusable PGconn is stashed here + * until we can successfully reconnect. Never attempt to do anything with + * this PGconn except extract parameters for a \connect attempt. + */ + PGconn *dead_conn; /* previous connection to backend */ + /* * The remaining fields are set by assign hooks associated with entries in * "vars". They should not be set directly except by those hook diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c index 8232a0143bc95..e8d35a108f365 100644 --- a/src/bin/psql/startup.c +++ b/src/bin/psql/startup.c @@ -145,6 +145,7 @@ main(int argc, char *argv[]) pset.progname = get_progname(argv[0]); pset.db = NULL; + pset.dead_conn = NULL; setDecimalLocale(); pset.encoding = PQenv2encoding(); pset.queryFout = stdout; @@ -442,7 +443,10 @@ main(int argc, char *argv[]) /* clean up */ if (pset.logfile) fclose(pset.logfile); - PQfinish(pset.db); + if (pset.db) + PQfinish(pset.db); + if (pset.dead_conn) + PQfinish(pset.dead_conn); setQFout(NULL); return successResult; diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index f41785f11c12e..b2b4f1fd4d13d 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -1974,10 +1974,10 @@ psql_completion(const char *text, int start, int end) */ else if (Matches("ALTER", "TABLE", MatchAny)) COMPLETE_WITH("ADD", "ALTER", "CLUSTER ON", "DISABLE", "DROP", - "ENABLE", "INHERIT", "NO INHERIT", "RENAME", "RESET", + "ENABLE", "INHERIT", "NO", "RENAME", "RESET", "OWNER TO", "SET", "VALIDATE CONSTRAINT", "REPLICA IDENTITY", "ATTACH PARTITION", - "DETACH PARTITION"); + "DETACH PARTITION", "FORCE ROW LEVEL SECURITY"); /* ALTER TABLE xxx ENABLE */ else if (Matches("ALTER", "TABLE", MatchAny, "ENABLE")) COMPLETE_WITH("ALWAYS", "REPLICA", "ROW LEVEL SECURITY", "RULE", @@ -2007,6 +2007,9 @@ psql_completion(const char *text, int start, int end) /* ALTER TABLE xxx INHERIT */ else if (Matches("ALTER", "TABLE", MatchAny, "INHERIT")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, ""); + /* ALTER TABLE xxx NO */ + else if (Matches("ALTER", "TABLE", MatchAny, "NO")) + COMPLETE_WITH("FORCE ROW LEVEL SECURITY", "INHERIT"); /* ALTER TABLE xxx NO INHERIT */ else if (Matches("ALTER", "TABLE", MatchAny, "NO", "INHERIT")) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, ""); @@ -2911,7 +2914,8 @@ psql_completion(const char *text, int start, int end) /* DEALLOCATE */ else if (Matches("DEALLOCATE")) - COMPLETE_WITH_QUERY(Query_for_list_of_prepared_statements); + COMPLETE_WITH_QUERY(Query_for_list_of_prepared_statements + " UNION SELECT 'ALL'"); /* DECLARE */ else if (Matches("DECLARE", MatchAny)) @@ -3076,19 +3080,27 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE"); /* FETCH && MOVE */ - /* Complete FETCH with one of FORWARD, BACKWARD, RELATIVE */ + + /* + * Complete FETCH with one of ABSOLUTE, BACKWARD, FORWARD, RELATIVE, ALL, + * NEXT, PRIOR, FIRST, LAST + */ else if (Matches("FETCH|MOVE")) - COMPLETE_WITH("ABSOLUTE", "BACKWARD", "FORWARD", "RELATIVE"); - /* Complete FETCH with one of ALL, NEXT, PRIOR */ - else if (Matches("FETCH|MOVE", MatchAny)) - COMPLETE_WITH("ALL", "NEXT", "PRIOR"); + COMPLETE_WITH("ABSOLUTE", "BACKWARD", "FORWARD", "RELATIVE", + "ALL", "NEXT", "PRIOR", "FIRST", "LAST"); + + /* Complete FETCH BACKWARD or FORWARD with one of ALL, FROM, IN */ + else if (Matches("FETCH|MOVE", "BACKWARD|FORWARD")) + COMPLETE_WITH("ALL", "FROM", "IN"); /* - * Complete FETCH with "FROM" or "IN". These are equivalent, + * Complete FETCH with "FROM" or "IN". These are equivalent, * but we may as well tab-complete both: perhaps some users prefer one * variant or the other. */ - else if (Matches("FETCH|MOVE", MatchAny, MatchAny)) + else if (Matches("FETCH|MOVE", "ABSOLUTE|BACKWARD|FORWARD|RELATIVE", + MatchAnyExcept("FROM|IN")) || + Matches("FETCH|MOVE", "ALL|NEXT|PRIOR|FIRST|LAST")) COMPLETE_WITH("FROM", "IN"); /* FOREIGN DATA WRAPPER */ @@ -3293,6 +3305,17 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH("FOREIGN SCHEMA"); else if (Matches("IMPORT", "FOREIGN")) COMPLETE_WITH("SCHEMA"); + else if (Matches("IMPORT", "FOREIGN", "SCHEMA", MatchAny)) + COMPLETE_WITH("EXCEPT (", "FROM SERVER", "LIMIT TO ("); + else if (TailMatches("LIMIT", "TO", "(*)") || + TailMatches("EXCEPT", "(*)")) + COMPLETE_WITH("FROM SERVER"); + else if (TailMatches("FROM", "SERVER", MatchAny)) + COMPLETE_WITH("INTO"); + else if (TailMatches("FROM", "SERVER", MatchAny, "INTO")) + COMPLETE_WITH_QUERY(Query_for_list_of_schemas); + else if (TailMatches("FROM", "SERVER", MatchAny, "INTO", MatchAny)) + COMPLETE_WITH("OPTIONS ("); /* INSERT --- can be inside EXPLAIN, RULE, etc */ /* Complete INSERT with "INTO" */ diff --git a/src/bin/scripts/clusterdb.c b/src/bin/scripts/clusterdb.c index 12972de0e91e7..2f786e61037be 100644 --- a/src/bin/scripts/clusterdb.c +++ b/src/bin/scripts/clusterdb.c @@ -17,15 +17,10 @@ #include "fe_utils/string_utils.h" -static void cluster_one_database(const char *dbname, bool verbose, const char *table, - const char *host, const char *port, - const char *username, enum trivalue prompt_password, - const char *progname, bool echo); -static void cluster_all_databases(bool verbose, const char *maintenance_db, - const char *host, const char *port, - const char *username, enum trivalue prompt_password, - const char *progname, bool echo, bool quiet); - +static void cluster_one_database(const ConnParams *cparams, const char *table, + const char *progname, bool verbose, bool echo); +static void cluster_all_databases(ConnParams *cparams, const char *progname, + bool verbose, bool echo, bool quiet); static void help(const char *progname); @@ -58,6 +53,7 @@ main(int argc, char *argv[]) char *port = NULL; char *username = NULL; enum trivalue prompt_password = TRI_DEFAULT; + ConnParams cparams; bool echo = false; bool quiet = false; bool alldb = false; @@ -134,6 +130,13 @@ main(int argc, char *argv[]) exit(1); } + /* fill cparams except for dbname, which is set below */ + cparams.pghost = host; + cparams.pgport = port; + cparams.pguser = username; + cparams.prompt_password = prompt_password; + cparams.override_dbname = NULL; + setup_cancel_handler(NULL); if (alldb) @@ -150,8 +153,9 @@ main(int argc, char *argv[]) exit(1); } - cluster_all_databases(verbose, maintenance_db, host, port, username, prompt_password, - progname, echo, quiet); + cparams.dbname = maintenance_db; + + cluster_all_databases(&cparams, progname, verbose, echo, quiet); } else { @@ -165,21 +169,21 @@ main(int argc, char *argv[]) dbname = get_user_name_or_exit(progname); } + cparams.dbname = dbname; + if (tables.head != NULL) { SimpleStringListCell *cell; for (cell = tables.head; cell; cell = cell->next) { - cluster_one_database(dbname, verbose, cell->val, - host, port, username, prompt_password, - progname, echo); + cluster_one_database(&cparams, cell->val, + progname, verbose, echo); } } else - cluster_one_database(dbname, verbose, NULL, - host, port, username, prompt_password, - progname, echo); + cluster_one_database(&cparams, NULL, + progname, verbose, echo); } exit(0); @@ -187,17 +191,14 @@ main(int argc, char *argv[]) static void -cluster_one_database(const char *dbname, bool verbose, const char *table, - const char *host, const char *port, - const char *username, enum trivalue prompt_password, - const char *progname, bool echo) +cluster_one_database(const ConnParams *cparams, const char *table, + const char *progname, bool verbose, bool echo) { PQExpBufferData sql; PGconn *conn; - conn = connectDatabase(dbname, host, port, username, prompt_password, - progname, echo, false, false); + conn = connectDatabase(cparams, progname, echo, false, false); initPQExpBuffer(&sql); @@ -228,22 +229,17 @@ cluster_one_database(const char *dbname, bool verbose, const char *table, static void -cluster_all_databases(bool verbose, const char *maintenance_db, - const char *host, const char *port, - const char *username, enum trivalue prompt_password, - const char *progname, bool echo, bool quiet) +cluster_all_databases(ConnParams *cparams, const char *progname, + bool verbose, bool echo, bool quiet) { PGconn *conn; PGresult *result; - PQExpBufferData connstr; int i; - conn = connectMaintenanceDatabase(maintenance_db, host, port, username, - prompt_password, progname, echo); + conn = connectMaintenanceDatabase(cparams, progname, echo); result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", echo); PQfinish(conn); - initPQExpBuffer(&connstr); for (i = 0; i < PQntuples(result); i++) { char *dbname = PQgetvalue(result, i, 0); @@ -254,15 +250,10 @@ cluster_all_databases(bool verbose, const char *maintenance_db, fflush(stdout); } - resetPQExpBuffer(&connstr); - appendPQExpBufferStr(&connstr, "dbname="); - appendConnStrVal(&connstr, dbname); + cparams->override_dbname = dbname; - cluster_one_database(connstr.data, verbose, NULL, - host, port, username, prompt_password, - progname, echo); + cluster_one_database(cparams, NULL, progname, verbose, echo); } - termPQExpBuffer(&connstr); PQclear(result); } diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c index e987eef23434d..3362221a31149 100644 --- a/src/bin/scripts/common.c +++ b/src/bin/scripts/common.c @@ -54,7 +54,7 @@ handle_help_version_opts(int argc, char *argv[], * Make a database connection with the given parameters. * * An interactive password prompt is automatically issued if needed and - * allowed by prompt_password. + * allowed by cparams->prompt_password. * * If allow_password_reuse is true, we will try to re-use any password * given during previous calls to this routine. (Callers should not pass @@ -62,22 +62,23 @@ handle_help_version_opts(int argc, char *argv[], * as before, else we might create password exposure hazards.) */ PGconn * -connectDatabase(const char *dbname, const char *pghost, - const char *pgport, const char *pguser, - enum trivalue prompt_password, const char *progname, +connectDatabase(const ConnParams *cparams, const char *progname, bool echo, bool fail_ok, bool allow_password_reuse) { PGconn *conn; bool new_pass; static char *password = NULL; + /* Callers must supply at least dbname; other params can be NULL */ + Assert(cparams->dbname); + if (!allow_password_reuse && password) { free(password); password = NULL; } - if (!password && prompt_password == TRI_YES) + if (cparams->prompt_password == TRI_YES && password == NULL) password = simple_prompt("Password: ", false); /* @@ -86,23 +87,35 @@ connectDatabase(const char *dbname, const char *pghost, */ do { - const char *keywords[7]; - const char *values[7]; - - keywords[0] = "host"; - values[0] = pghost; - keywords[1] = "port"; - values[1] = pgport; - keywords[2] = "user"; - values[2] = pguser; - keywords[3] = "password"; - values[3] = password; - keywords[4] = "dbname"; - values[4] = dbname; - keywords[5] = "fallback_application_name"; - values[5] = progname; - keywords[6] = NULL; - values[6] = NULL; + const char *keywords[8]; + const char *values[8]; + int i = 0; + + /* + * If dbname is a connstring, its entries can override the other + * values obtained from cparams; but in turn, override_dbname can + * override the dbname component of it. + */ + keywords[i] = "host"; + values[i++] = cparams->pghost; + keywords[i] = "port"; + values[i++] = cparams->pgport; + keywords[i] = "user"; + values[i++] = cparams->pguser; + keywords[i] = "password"; + values[i++] = password; + keywords[i] = "dbname"; + values[i++] = cparams->dbname; + if (cparams->override_dbname) + { + keywords[i] = "dbname"; + values[i++] = cparams->override_dbname; + } + keywords[i] = "fallback_application_name"; + values[i++] = progname; + keywords[i] = NULL; + values[i++] = NULL; + Assert(i <= lengthof(keywords)); new_pass = false; conn = PQconnectdbParams(keywords, values, true); @@ -110,7 +123,7 @@ connectDatabase(const char *dbname, const char *pghost, if (!conn) { pg_log_error("could not connect to database %s: out of memory", - dbname); + cparams->dbname); exit(1); } @@ -119,7 +132,7 @@ connectDatabase(const char *dbname, const char *pghost, */ if (PQstatus(conn) == CONNECTION_BAD && PQconnectionNeedsPassword(conn) && - prompt_password != TRI_NO) + cparams->prompt_password != TRI_NO) { PQfinish(conn); if (password) @@ -138,10 +151,11 @@ connectDatabase(const char *dbname, const char *pghost, return NULL; } pg_log_error("could not connect to database %s: %s", - dbname, PQerrorMessage(conn)); + cparams->dbname, PQerrorMessage(conn)); exit(1); } + /* Start strict; callers may override this. */ PQclear(executeQuery(conn, ALWAYS_SECURE_SEARCH_PATH_SQL, echo)); return conn; @@ -149,27 +163,30 @@ connectDatabase(const char *dbname, const char *pghost, /* * Try to connect to the appropriate maintenance database. + * + * This differs from connectDatabase only in that it has a rule for + * inserting a default "dbname" if none was given (which is why cparams + * is not const). Note that cparams->dbname should typically come from + * a --maintenance-db command line parameter. */ PGconn * -connectMaintenanceDatabase(const char *maintenance_db, - const char *pghost, const char *pgport, - const char *pguser, enum trivalue prompt_password, +connectMaintenanceDatabase(ConnParams *cparams, const char *progname, bool echo) { PGconn *conn; /* If a maintenance database name was specified, just connect to it. */ - if (maintenance_db) - return connectDatabase(maintenance_db, pghost, pgport, pguser, - prompt_password, progname, echo, false, false); + if (cparams->dbname) + return connectDatabase(cparams, progname, echo, false, false); /* Otherwise, try postgres first and then template1. */ - conn = connectDatabase("postgres", pghost, pgport, pguser, prompt_password, - progname, echo, true, false); + cparams->dbname = "postgres"; + conn = connectDatabase(cparams, progname, echo, true, false); if (!conn) - conn = connectDatabase("template1", pghost, pgport, pguser, - prompt_password, progname, echo, false, false); - + { + cparams->dbname = "template1"; + conn = connectDatabase(cparams, progname, echo, false, false); + } return conn; } diff --git a/src/bin/scripts/common.h b/src/bin/scripts/common.h index ddf6320b47c8f..9ec57cdd87c0e 100644 --- a/src/bin/scripts/common.h +++ b/src/bin/scripts/common.h @@ -21,20 +21,32 @@ enum trivalue TRI_YES }; +/* Parameters needed by connectDatabase/connectMaintenanceDatabase */ +typedef struct _connParams +{ + /* These fields record the actual command line parameters */ + const char *dbname; /* this may be a connstring! */ + const char *pghost; + const char *pgport; + const char *pguser; + enum trivalue prompt_password; + /* If not NULL, this overrides the dbname obtained from command line */ + /* (but *only* the DB name, not anything else in the connstring) */ + const char *override_dbname; +} ConnParams; + typedef void (*help_handler) (const char *progname); extern void handle_help_version_opts(int argc, char *argv[], const char *fixed_progname, help_handler hlp); -extern PGconn *connectDatabase(const char *dbname, const char *pghost, - const char *pgport, const char *pguser, - enum trivalue prompt_password, const char *progname, - bool echo, bool fail_ok, bool allow_password_reuse); +extern PGconn *connectDatabase(const ConnParams *cparams, + const char *progname, + bool echo, bool fail_ok, + bool allow_password_reuse); -extern PGconn *connectMaintenanceDatabase(const char *maintenance_db, - const char *pghost, const char *pgport, - const char *pguser, enum trivalue prompt_password, +extern PGconn *connectMaintenanceDatabase(ConnParams *cparams, const char *progname, bool echo); extern void disconnectDatabase(PGconn *conn); diff --git a/src/bin/scripts/createdb.c b/src/bin/scripts/createdb.c index 1353af97c49eb..91e6e2194bd77 100644 --- a/src/bin/scripts/createdb.c +++ b/src/bin/scripts/createdb.c @@ -51,6 +51,7 @@ main(int argc, char *argv[]) char *port = NULL; char *username = NULL; enum trivalue prompt_password = TRI_DEFAULT; + ConnParams cparams; bool echo = false; char *owner = NULL; char *tablespace = NULL; @@ -180,8 +181,14 @@ main(int argc, char *argv[]) if (maintenance_db == NULL && strcmp(dbname, "postgres") == 0) maintenance_db = "template1"; - conn = connectMaintenanceDatabase(maintenance_db, host, port, username, - prompt_password, progname, echo); + cparams.dbname = maintenance_db; + cparams.pghost = host; + cparams.pgport = port; + cparams.pguser = username; + cparams.prompt_password = prompt_password; + cparams.override_dbname = NULL; + + conn = connectMaintenanceDatabase(&cparams, progname, echo); initPQExpBuffer(&sql); diff --git a/src/bin/scripts/createuser.c b/src/bin/scripts/createuser.c index 6179199563c41..d6b56f15c3b66 100644 --- a/src/bin/scripts/createuser.c +++ b/src/bin/scripts/createuser.c @@ -59,6 +59,7 @@ main(int argc, char *argv[]) char *username = NULL; SimpleStringList roles = {NULL, NULL}; enum trivalue prompt_password = TRI_DEFAULT; + ConnParams cparams; bool echo = false; bool interactive = false; int conn_limit = -2; /* less than minimum valid value */ @@ -252,8 +253,14 @@ main(int argc, char *argv[]) if (login == 0) login = TRI_YES; - conn = connectDatabase("postgres", host, port, username, prompt_password, - progname, echo, false, false); + cparams.dbname = NULL; /* this program lacks any dbname option... */ + cparams.pghost = host; + cparams.pgport = port; + cparams.pguser = username; + cparams.prompt_password = prompt_password; + cparams.override_dbname = NULL; + + conn = connectMaintenanceDatabase(&cparams, progname, echo); initPQExpBuffer(&sql); diff --git a/src/bin/scripts/dropdb.c b/src/bin/scripts/dropdb.c index 581c7749c86a1..ccbf78e91a864 100644 --- a/src/bin/scripts/dropdb.c +++ b/src/bin/scripts/dropdb.c @@ -48,6 +48,7 @@ main(int argc, char *argv[]) char *port = NULL; char *username = NULL; enum trivalue prompt_password = TRI_DEFAULT; + ConnParams cparams; bool echo = false; bool interactive = false; bool force = false; @@ -137,9 +138,14 @@ main(int argc, char *argv[]) if (maintenance_db == NULL && strcmp(dbname, "postgres") == 0) maintenance_db = "template1"; - conn = connectMaintenanceDatabase(maintenance_db, - host, port, username, prompt_password, - progname, echo); + cparams.dbname = maintenance_db; + cparams.pghost = host; + cparams.pgport = port; + cparams.pguser = username; + cparams.prompt_password = prompt_password; + cparams.override_dbname = NULL; + + conn = connectMaintenanceDatabase(&cparams, progname, echo); if (echo) printf("%s\n", sql.data); diff --git a/src/bin/scripts/dropuser.c b/src/bin/scripts/dropuser.c index f7ddd1402db26..73d7328a88d58 100644 --- a/src/bin/scripts/dropuser.c +++ b/src/bin/scripts/dropuser.c @@ -46,6 +46,7 @@ main(int argc, char *argv[]) char *port = NULL; char *username = NULL; enum trivalue prompt_password = TRI_DEFAULT; + ConnParams cparams; bool echo = false; bool interactive = false; @@ -129,13 +130,19 @@ main(int argc, char *argv[]) exit(0); } + cparams.dbname = NULL; /* this program lacks any dbname option... */ + cparams.pghost = host; + cparams.pgport = port; + cparams.pguser = username; + cparams.prompt_password = prompt_password; + cparams.override_dbname = NULL; + + conn = connectMaintenanceDatabase(&cparams, progname, echo); + initPQExpBuffer(&sql); appendPQExpBuffer(&sql, "DROP ROLE %s%s;", (if_exists ? "IF EXISTS " : ""), fmtId(dropuser)); - conn = connectDatabase("postgres", host, port, username, prompt_password, - progname, echo, false, false); - if (echo) printf("%s\n", sql.data); result = PQexec(conn, sql.data); diff --git a/src/bin/scripts/reindexdb.c b/src/bin/scripts/reindexdb.c index 40dcbc9283324..b32a7746baf42 100644 --- a/src/bin/scripts/reindexdb.c +++ b/src/bin/scripts/reindexdb.c @@ -34,15 +34,12 @@ static SimpleStringList *get_parallel_object_list(PGconn *conn, ReindexType type, SimpleStringList *user_list, bool echo); -static void reindex_one_database(const char *dbname, ReindexType type, - SimpleStringList *user_list, const char *host, - const char *port, const char *username, - enum trivalue prompt_password, const char *progname, +static void reindex_one_database(const ConnParams *cparams, ReindexType type, + SimpleStringList *user_list, + const char *progname, bool echo, bool verbose, bool concurrently, int concurrentCons); -static void reindex_all_databases(const char *maintenance_db, - const char *host, const char *port, - const char *username, enum trivalue prompt_password, +static void reindex_all_databases(ConnParams *cparams, const char *progname, bool echo, bool quiet, bool verbose, bool concurrently, int concurrentCons); @@ -86,6 +83,7 @@ main(int argc, char *argv[]) const char *port = NULL; const char *username = NULL; enum trivalue prompt_password = TRI_DEFAULT; + ConnParams cparams; bool syscatalog = false; bool alldb = false; bool echo = false; @@ -188,6 +186,13 @@ main(int argc, char *argv[]) exit(1); } + /* fill cparams except for dbname, which is set below */ + cparams.pghost = host; + cparams.pgport = port; + cparams.pguser = username; + cparams.prompt_password = prompt_password; + cparams.override_dbname = NULL; + setup_cancel_handler(NULL); if (alldb) @@ -218,8 +223,9 @@ main(int argc, char *argv[]) exit(1); } - reindex_all_databases(maintenance_db, host, port, username, - prompt_password, progname, echo, quiet, verbose, + cparams.dbname = maintenance_db; + + reindex_all_databases(&cparams, progname, echo, quiet, verbose, concurrently, concurrentCons); } else if (syscatalog) @@ -256,9 +262,11 @@ main(int argc, char *argv[]) dbname = get_user_name_or_exit(progname); } - reindex_one_database(dbname, REINDEX_SYSTEM, NULL, host, - port, username, prompt_password, progname, - echo, verbose, concurrently, 1); + cparams.dbname = dbname; + + reindex_one_database(&cparams, REINDEX_SYSTEM, NULL, + progname, echo, verbose, + concurrently, 1); } else { @@ -283,40 +291,40 @@ main(int argc, char *argv[]) dbname = get_user_name_or_exit(progname); } + cparams.dbname = dbname; + if (schemas.head != NULL) - reindex_one_database(dbname, REINDEX_SCHEMA, &schemas, host, - port, username, prompt_password, progname, - echo, verbose, concurrently, concurrentCons); + reindex_one_database(&cparams, REINDEX_SCHEMA, &schemas, + progname, echo, verbose, + concurrently, concurrentCons); if (indexes.head != NULL) - reindex_one_database(dbname, REINDEX_INDEX, &indexes, host, - port, username, prompt_password, progname, - echo, verbose, concurrently, 1); + reindex_one_database(&cparams, REINDEX_INDEX, &indexes, + progname, echo, verbose, + concurrently, 1); if (tables.head != NULL) - reindex_one_database(dbname, REINDEX_TABLE, &tables, host, - port, username, prompt_password, progname, - echo, verbose, concurrently, - concurrentCons); + reindex_one_database(&cparams, REINDEX_TABLE, &tables, + progname, echo, verbose, + concurrently, concurrentCons); /* * reindex database only if neither index nor table nor schema is * specified */ if (indexes.head == NULL && tables.head == NULL && schemas.head == NULL) - reindex_one_database(dbname, REINDEX_DATABASE, NULL, host, - port, username, prompt_password, progname, - echo, verbose, concurrently, concurrentCons); + reindex_one_database(&cparams, REINDEX_DATABASE, NULL, + progname, echo, verbose, + concurrently, concurrentCons); } exit(0); } static void -reindex_one_database(const char *dbname, ReindexType type, - SimpleStringList *user_list, const char *host, - const char *port, const char *username, - enum trivalue prompt_password, const char *progname, bool echo, +reindex_one_database(const ConnParams *cparams, ReindexType type, + SimpleStringList *user_list, + const char *progname, bool echo, bool verbose, bool concurrently, int concurrentCons) { PGconn *conn; @@ -328,8 +336,7 @@ reindex_one_database(const char *dbname, ReindexType type, bool failed = false; int items_count = 0; - conn = connectDatabase(dbname, host, port, username, prompt_password, - progname, echo, false, false); + conn = connectDatabase(cparams, progname, echo, false, false); if (concurrently && PQserverVersion(conn) < 120000) { @@ -436,8 +443,7 @@ reindex_one_database(const char *dbname, ReindexType type, Assert(process_list != NULL); - slots = ParallelSlotsSetup(dbname, host, port, username, prompt_password, - progname, echo, conn, concurrentCons); + slots = ParallelSlotsSetup(cparams, progname, echo, conn, concurrentCons); cell = process_list->head; do @@ -614,16 +620,16 @@ get_parallel_object_list(PGconn *conn, ReindexType type, { case REINDEX_DATABASE: Assert(user_list == NULL); - appendPQExpBuffer(&catalog_query, - "SELECT c.relname, ns.nspname\n" - " FROM pg_catalog.pg_class c\n" - " JOIN pg_catalog.pg_namespace ns" - " ON c.relnamespace = ns.oid\n" - " WHERE ns.nspname != 'pg_catalog'\n" - " AND c.relkind IN (" - CppAsString2(RELKIND_RELATION) ", " - CppAsString2(RELKIND_MATVIEW) ")\n" - " ORDER BY c.relpages DESC;"); + appendPQExpBufferStr(&catalog_query, + "SELECT c.relname, ns.nspname\n" + " FROM pg_catalog.pg_class c\n" + " JOIN pg_catalog.pg_namespace ns" + " ON c.relnamespace = ns.oid\n" + " WHERE ns.nspname != 'pg_catalog'\n" + " AND c.relkind IN (" + CppAsString2(RELKIND_RELATION) ", " + CppAsString2(RELKIND_MATVIEW) ")\n" + " ORDER BY c.relpages DESC;"); break; case REINDEX_SCHEMA: @@ -637,30 +643,30 @@ get_parallel_object_list(PGconn *conn, ReindexType type, * All the tables from all the listed schemas are grabbed at * once. */ - appendPQExpBuffer(&catalog_query, - "SELECT c.relname, ns.nspname\n" - " FROM pg_catalog.pg_class c\n" - " JOIN pg_catalog.pg_namespace ns" - " ON c.relnamespace = ns.oid\n" - " WHERE c.relkind IN (" - CppAsString2(RELKIND_RELATION) ", " - CppAsString2(RELKIND_MATVIEW) ")\n" - " AND ns.nspname IN ("); + appendPQExpBufferStr(&catalog_query, + "SELECT c.relname, ns.nspname\n" + " FROM pg_catalog.pg_class c\n" + " JOIN pg_catalog.pg_namespace ns" + " ON c.relnamespace = ns.oid\n" + " WHERE c.relkind IN (" + CppAsString2(RELKIND_RELATION) ", " + CppAsString2(RELKIND_MATVIEW) ")\n" + " AND ns.nspname IN ("); for (cell = user_list->head; cell; cell = cell->next) { const char *nspname = cell->val; if (nsp_listed) - appendPQExpBuffer(&catalog_query, ", "); + appendPQExpBufferStr(&catalog_query, ", "); else nsp_listed = true; appendStringLiteralConn(&catalog_query, nspname, conn); } - appendPQExpBuffer(&catalog_query, ")\n" - " ORDER BY c.relpages DESC;"); + appendPQExpBufferStr(&catalog_query, ")\n" + " ORDER BY c.relpages DESC;"); } break; @@ -705,23 +711,18 @@ get_parallel_object_list(PGconn *conn, ReindexType type, } static void -reindex_all_databases(const char *maintenance_db, - const char *host, const char *port, - const char *username, enum trivalue prompt_password, +reindex_all_databases(ConnParams *cparams, const char *progname, bool echo, bool quiet, bool verbose, bool concurrently, int concurrentCons) { PGconn *conn; PGresult *result; - PQExpBufferData connstr; int i; - conn = connectMaintenanceDatabase(maintenance_db, host, port, username, - prompt_password, progname, echo); + conn = connectMaintenanceDatabase(cparams, progname, echo); result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", echo); PQfinish(conn); - initPQExpBuffer(&connstr); for (i = 0; i < PQntuples(result); i++) { char *dbname = PQgetvalue(result, i, 0); @@ -732,16 +733,12 @@ reindex_all_databases(const char *maintenance_db, fflush(stdout); } - resetPQExpBuffer(&connstr); - appendPQExpBufferStr(&connstr, "dbname="); - appendConnStrVal(&connstr, dbname); + cparams->override_dbname = dbname; - reindex_one_database(connstr.data, REINDEX_DATABASE, NULL, host, - port, username, prompt_password, + reindex_one_database(cparams, REINDEX_DATABASE, NULL, progname, echo, verbose, concurrently, concurrentCons); } - termPQExpBuffer(&connstr); PQclear(result); } diff --git a/src/bin/scripts/scripts_parallel.c b/src/bin/scripts/scripts_parallel.c index 01bc6dfeffc90..ec264a269a7d7 100644 --- a/src/bin/scripts/scripts_parallel.c +++ b/src/bin/scripts/scripts_parallel.c @@ -205,8 +205,7 @@ ParallelSlotsGetIdle(ParallelSlot *slots, int numslots) * set. */ ParallelSlot * -ParallelSlotsSetup(const char *dbname, const char *host, const char *port, - const char *username, bool prompt_password, +ParallelSlotsSetup(const ConnParams *cparams, const char *progname, bool echo, PGconn *conn, int numslots) { @@ -221,8 +220,7 @@ ParallelSlotsSetup(const char *dbname, const char *host, const char *port, { for (i = 1; i < numslots; i++) { - conn = connectDatabase(dbname, host, port, username, prompt_password, - progname, echo, false, true); + conn = connectDatabase(cparams, progname, echo, false, true); /* * Fail and exit immediately if trying to use a socket in an diff --git a/src/bin/scripts/scripts_parallel.h b/src/bin/scripts/scripts_parallel.h index cf20449ce3e29..c9d9f0623e949 100644 --- a/src/bin/scripts/scripts_parallel.h +++ b/src/bin/scripts/scripts_parallel.h @@ -12,6 +12,7 @@ #ifndef SCRIPTS_PARALLEL_H #define SCRIPTS_PARALLEL_H +#include "common.h" #include "libpq-fe.h" @@ -23,10 +24,7 @@ typedef struct ParallelSlot extern ParallelSlot *ParallelSlotsGetIdle(ParallelSlot *slots, int numslots); -extern ParallelSlot *ParallelSlotsSetup(const char *dbname, const char *host, - const char *port, - const char *username, - bool prompt_password, +extern ParallelSlot *ParallelSlotsSetup(const ConnParams *cparams, const char *progname, bool echo, PGconn *conn, int numslots); diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c index a8bc65421966d..8c2eade1d5d2e 100644 --- a/src/bin/scripts/vacuumdb.c +++ b/src/bin/scripts/vacuumdb.c @@ -42,19 +42,16 @@ typedef struct vacuumingOptions } vacuumingOptions; -static void vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, +static void vacuum_one_database(const ConnParams *cparams, + vacuumingOptions *vacopts, int stage, SimpleStringList *tables, - const char *host, const char *port, - const char *username, enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet); -static void vacuum_all_databases(vacuumingOptions *vacopts, +static void vacuum_all_databases(ConnParams *cparams, + vacuumingOptions *vacopts, bool analyze_in_stages, - const char *maintenance_db, - const char *host, const char *port, - const char *username, enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet); @@ -112,6 +109,7 @@ main(int argc, char *argv[]) char *port = NULL; char *username = NULL; enum trivalue prompt_password = TRI_DEFAULT; + ConnParams cparams; bool echo = false; bool quiet = false; vacuumingOptions vacopts; @@ -305,12 +303,19 @@ main(int argc, char *argv[]) } if (vacopts.full) { - pg_log_error("cannot use the \"%s\" option when performing full", + pg_log_error("cannot use the \"%s\" option when performing full vacuum", "parallel"); exit(1); } } + /* fill cparams except for dbname, which is set below */ + cparams.pghost = host; + cparams.pgport = port; + cparams.pguser = username; + cparams.prompt_password = prompt_password; + cparams.override_dbname = NULL; + setup_cancel_handler(NULL); /* Avoid opening extra connections. */ @@ -330,10 +335,10 @@ main(int argc, char *argv[]) exit(1); } - vacuum_all_databases(&vacopts, + cparams.dbname = maintenance_db; + + vacuum_all_databases(&cparams, &vacopts, analyze_in_stages, - maintenance_db, - host, port, username, prompt_password, concurrentCons, progname, echo, quiet); } @@ -349,25 +354,25 @@ main(int argc, char *argv[]) dbname = get_user_name_or_exit(progname); } + cparams.dbname = dbname; + if (analyze_in_stages) { int stage; for (stage = 0; stage < ANALYZE_NUM_STAGES; stage++) { - vacuum_one_database(dbname, &vacopts, + vacuum_one_database(&cparams, &vacopts, stage, &tables, - host, port, username, prompt_password, concurrentCons, progname, echo, quiet); } } else - vacuum_one_database(dbname, &vacopts, + vacuum_one_database(&cparams, &vacopts, ANALYZE_NO_STAGE, &tables, - host, port, username, prompt_password, concurrentCons, progname, echo, quiet); } @@ -389,11 +394,10 @@ main(int argc, char *argv[]) * a list of tables from the database. */ static void -vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, +vacuum_one_database(const ConnParams *cparams, + vacuumingOptions *vacopts, int stage, SimpleStringList *tables, - const char *host, const char *port, - const char *username, enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet) { @@ -424,8 +428,7 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, Assert(stage == ANALYZE_NO_STAGE || (stage >= 0 && stage < ANALYZE_NUM_STAGES)); - conn = connectDatabase(dbname, host, port, username, prompt_password, - progname, echo, false, true); + conn = connectDatabase(cparams, progname, echo, false, true); if (vacopts->disable_page_skipping && PQserverVersion(conn) < 90600) { @@ -663,8 +666,7 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, * for the first slot. If not in parallel mode, the first slot in the * array contains the connection. */ - slots = ParallelSlotsSetup(dbname, host, port, username, prompt_password, - progname, echo, conn, concurrentCons); + slots = ParallelSlotsSetup(cparams, progname, echo, conn, concurrentCons); /* * Prepare all the connections to run the appropriate analyze stage, if @@ -736,28 +738,23 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, * quickly everywhere before generating more detailed ones. */ static void -vacuum_all_databases(vacuumingOptions *vacopts, +vacuum_all_databases(ConnParams *cparams, + vacuumingOptions *vacopts, bool analyze_in_stages, - const char *maintenance_db, const char *host, - const char *port, const char *username, - enum trivalue prompt_password, int concurrentCons, const char *progname, bool echo, bool quiet) { PGconn *conn; PGresult *result; - PQExpBufferData connstr; int stage; int i; - conn = connectMaintenanceDatabase(maintenance_db, host, port, username, - prompt_password, progname, echo); + conn = connectMaintenanceDatabase(cparams, progname, echo); result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", echo); PQfinish(conn); - initPQExpBuffer(&connstr); if (analyze_in_stages) { /* @@ -772,14 +769,11 @@ vacuum_all_databases(vacuumingOptions *vacopts, { for (i = 0; i < PQntuples(result); i++) { - resetPQExpBuffer(&connstr); - appendPQExpBufferStr(&connstr, "dbname="); - appendConnStrVal(&connstr, PQgetvalue(result, i, 0)); + cparams->override_dbname = PQgetvalue(result, i, 0); - vacuum_one_database(connstr.data, vacopts, + vacuum_one_database(cparams, vacopts, stage, NULL, - host, port, username, prompt_password, concurrentCons, progname, echo, quiet); } @@ -789,19 +783,15 @@ vacuum_all_databases(vacuumingOptions *vacopts, { for (i = 0; i < PQntuples(result); i++) { - resetPQExpBuffer(&connstr); - appendPQExpBufferStr(&connstr, "dbname="); - appendConnStrVal(&connstr, PQgetvalue(result, i, 0)); + cparams->override_dbname = PQgetvalue(result, i, 0); - vacuum_one_database(connstr.data, vacopts, + vacuum_one_database(cparams, vacopts, ANALYZE_NO_STAGE, NULL, - host, port, username, prompt_password, concurrentCons, progname, echo, quiet); } } - termPQExpBuffer(&connstr); PQclear(result); } diff --git a/src/common/Makefile b/src/common/Makefile index f2817628851ea..25c55bd6423c3 100644 --- a/src/common/Makefile +++ b/src/common/Makefile @@ -88,16 +88,21 @@ OBJS_COMMON += sha2.o endif # A few files are currently only built for frontend, not server -# (Mkvcbuild.pm has a copy of this list, too) -OBJS_FRONTEND = \ +# (Mkvcbuild.pm has a copy of this list, too). logging.c is excluded +# from OBJS_FRONTEND_SHLIB (shared library) as a matter of policy, +# because it is not appropriate for general purpose libraries such +# as libpq to report errors directly. +OBJS_FRONTEND_SHLIB = \ $(OBJS_COMMON) \ fe_memutils.o \ - logging.o \ restricted_token.o \ sprompt.o +OBJS_FRONTEND = \ + $(OBJS_FRONTEND_SHLIB) \ + logging.o # foo.o, foo_shlib.o, and foo_srv.o are all built from foo.c -OBJS_SHLIB = $(OBJS_FRONTEND:%.o=%_shlib.o) +OBJS_SHLIB = $(OBJS_FRONTEND_SHLIB:%.o=%_shlib.o) OBJS_SRV = $(OBJS_COMMON:%.o=%_srv.o) # where to find gen_keywordlist.pl and subsidiary files diff --git a/src/common/keywords.c b/src/common/keywords.c index 54ed97709613e..2de0c717a8912 100644 --- a/src/common/keywords.c +++ b/src/common/keywords.c @@ -24,10 +24,25 @@ /* Keyword categories for SQL keywords */ -#define PG_KEYWORD(kwname, value, category) category, +#define PG_KEYWORD(kwname, value, category, collabel) category, const uint8 ScanKeywordCategories[SCANKEYWORDS_NUM_KEYWORDS] = { #include "parser/kwlist.h" }; #undef PG_KEYWORD + +/* Keyword can-be-bare-label flags for SQL keywords */ + +#define PG_KEYWORD(kwname, value, category, collabel) collabel, + +#define BARE_LABEL true +#define AS_LABEL false + +const bool ScanKeywordBareLabel[SCANKEYWORDS_NUM_KEYWORDS] = { +#include "parser/kwlist.h" +}; + +#undef PG_KEYWORD +#undef BARE_LABEL +#undef AS_LABEL diff --git a/src/common/logging.c b/src/common/logging.c index 6a3a437a34bd3..d9632fffc8ad1 100644 --- a/src/common/logging.c +++ b/src/common/logging.c @@ -157,12 +157,30 @@ pg_logging_config(int new_flags) log_flags = new_flags; } +/* + * pg_logging_init sets the default log level to INFO. Programs that prefer + * a different default should use this to set it, immediately afterward. + */ void pg_logging_set_level(enum pg_log_level new_level) { __pg_log_level = new_level; } +/* + * Command line switches such as --verbose should invoke this. + */ +void +pg_logging_increase_verbosity(void) +{ + /* + * The enum values are chosen such that we have to decrease __pg_log_level + * in order to become more verbose. + */ + if (__pg_log_level > PG_LOG_NOTSET + 1) + __pg_log_level--; +} + void pg_logging_set_pre_callback(void (*cb) (void)) { diff --git a/src/common/pg_get_line.c b/src/common/pg_get_line.c index 2fb8e198933df..9eb1a33bbb366 100644 --- a/src/common/pg_get_line.c +++ b/src/common/pg_get_line.c @@ -45,7 +45,8 @@ * Also note that the palloc'd buffer is usually a lot longer than * strictly necessary, so it may be inadvisable to use this function * to collect lots of long-lived data. A less memory-hungry option - * is to use pg_get_line_append() in a loop, then pstrdup() each line. + * is to use pg_get_line_buf() or pg_get_line_append() in a loop, + * then pstrdup() each line. */ char * pg_get_line(FILE *stream) @@ -67,11 +68,37 @@ pg_get_line(FILE *stream) return buf.data; } +/* + * pg_get_line_buf() + * + * This has similar behavior to pg_get_line(), and thence to fgets(), + * except that the collected data is returned in a caller-supplied + * StringInfo buffer. This is a convenient API for code that just + * wants to read and process one line at a time, without any artificial + * limit on line length. + * + * Returns true if a line was successfully collected (including the + * case of a non-newline-terminated line at EOF). Returns false if + * there was an I/O error or no data was available before EOF. + * (Check ferror(stream) to distinguish these cases.) + * + * In the false-result case, buf is reset to empty. + */ +bool +pg_get_line_buf(FILE *stream, StringInfo buf) +{ + /* We just need to drop any data from the previous call */ + resetStringInfo(buf); + return pg_get_line_append(stream, buf); +} + /* * pg_get_line_append() * * This has similar behavior to pg_get_line(), and thence to fgets(), * except that the collected data is appended to whatever is in *buf. + * This is useful in preference to pg_get_line_buf() if the caller wants + * to merge some lines together, e.g. to implement backslash continuation. * * Returns true if a line was successfully collected (including the * case of a non-newline-terminated line at EOF). Returns false if diff --git a/src/common/restricted_token.c b/src/common/restricted_token.c index d8d3aeffcdc2e..dcc88a75c59d8 100644 --- a/src/common/restricted_token.c +++ b/src/common/restricted_token.c @@ -66,7 +66,7 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo) return 0; } - _CreateRestrictedToken = (__CreateRestrictedToken) GetProcAddress(Advapi32Handle, "CreateRestrictedToken"); + _CreateRestrictedToken = (__CreateRestrictedToken) (pg_funcptr_t) GetProcAddress(Advapi32Handle, "CreateRestrictedToken"); if (_CreateRestrictedToken == NULL) { diff --git a/src/common/unicode/Makefile b/src/common/unicode/Makefile index 93a9d1615f1c1..eb14add28ad6d 100644 --- a/src/common/unicode/Makefile +++ b/src/common/unicode/Makefile @@ -18,7 +18,7 @@ LIBS += $(PTHREAD_LIBS) # By default, do nothing. all: -update-unicode: unicode_norm_table.h unicode_combining_table.h unicode_normprops_table.h +update-unicode: unicode_norm_table.h unicode_combining_table.h unicode_normprops_table.h unicode_norm_hashfunc.h mv $^ ../../../src/include/common/ $(MAKE) normalization-check @@ -30,6 +30,8 @@ UnicodeData.txt DerivedNormalizationProps.txt CompositionExclusions.txt Normaliz # Generation of conversion tables used for string normalization with # UTF-8 strings. +unicode_norm_hashfunc.h: unicode_norm_table.h + unicode_norm_table.h: generate-unicode_norm_table.pl UnicodeData.txt CompositionExclusions.txt $(PERL) generate-unicode_norm_table.pl diff --git a/src/common/unicode/generate-unicode_norm_table.pl b/src/common/unicode/generate-unicode_norm_table.pl index 7ce15e1a0395a..e4d3ccc2346a8 100644 --- a/src/common/unicode/generate-unicode_norm_table.pl +++ b/src/common/unicode/generate-unicode_norm_table.pl @@ -1,16 +1,22 @@ #!/usr/bin/perl # -# Generate a composition table, using Unicode data files as input +# Generate a composition table and its lookup utilities, using Unicode data +# files as input. # # Input: UnicodeData.txt and CompositionExclusions.txt -# Output: unicode_norm_table.h +# Output: unicode_norm_table.h and unicode_norm_hashfunc.h # # Copyright (c) 2000-2020, PostgreSQL Global Development Group use strict; use warnings; -my $output_file = "unicode_norm_table.h"; +use FindBin; +use lib "$FindBin::RealBin/../../tools/"; +use PerfectHash; + +my $output_table_file = "unicode_norm_table.h"; +my $output_func_file = "unicode_norm_hashfunc.h"; my $FH; @@ -64,11 +70,13 @@ my $num_characters = scalar @characters; -# Start writing out the output file -open my $OUTPUT, '>', $output_file - or die "Could not open output file $output_file: $!\n"; +# Start writing out the output files +open my $OT, '>', $output_table_file + or die "Could not open output file $output_table_file: $!\n"; +open my $OF, '>', $output_func_file + or die "Could not open output file $output_func_file: $!\n"; -print $OUTPUT <{code}; foreach my $char (@characters) @@ -121,6 +174,9 @@ my $class = $char->{class}; my $decomp = $char->{decomp}; + # Save the code point bytes as a string in network order. + push @dec_cp_packed, pack('N', hex($char->{code})); + # The character decomposition mapping field in UnicodeData.txt is a list # of unicode codepoints, separated by space. But it can be prefixed with # so-called compatibility formatting tag, like "", or "". @@ -163,7 +219,7 @@ { foreach my $lcode (@composition_exclusion_codes) { - if ($lcode eq $char->{code}) + if ($lcode eq $code) { $flags .= " | DECOMP_NO_COMPOSE"; $comment = "in exclusion list"; @@ -171,11 +227,26 @@ } } } + + # Save info for recomposeable codepoints. + # Note that this MUST match the macro DECOMPOSITION_NO_COMPOSE in C + # above! See also the inverse lookup in recompose_code() found in + # src/common/unicode_norm.c. + if (!($flags =~ /DECOMP_COMPAT/ || $flags =~ /DECOMP_NO_COMPOSE/)) + { + push @rec_info, + { + code => $code, + main_index => $main_index, + first => $first_decomp, + second => $decomp_elts[0] + }; + } } if ($decomp_size == 0) { - print $OUTPUT "\t{0x$code, $class, 0$flags, 0}"; + print $OT "\t{0x$code, $class, 0$flags, 0}"; } elsif ($decomp_size == 1 && length($first_decomp) <= 4) { @@ -183,12 +254,11 @@ # The decomposition consists of a single codepoint, and it fits # in a uint16, so we can store it "inline" in the main table. $flags .= " | DECOMP_INLINE"; - print $OUTPUT "\t{0x$code, $class, 1$flags, 0x$first_decomp}"; + print $OT "\t{0x$code, $class, 1$flags, 0x$first_decomp}"; } else { - print $OUTPUT - "\t{0x$code, $class, $decomp_size$flags, $decomp_index}"; + print $OT "\t{0x$code, $class, $decomp_size$flags, $decomp_index}"; # Now save the decompositions into a dedicated area that will # be written afterwards. First build the entry dedicated to @@ -205,25 +275,17 @@ } # Print a comma after all items except the last one. - print $OUTPUT "," unless ($code eq $last_code); - if ($comment ne "") - { + print $OT "," unless ($code eq $last_code); - # If the line is wide already, indent the comment with one tab, - # otherwise with two. This is to make the output match the way - # pgindent would mangle it. (This is quite hacky. To do this - # properly, we should actually track how long the line is so far, - # but this works for now.) - print $OUTPUT "\t" if ($decomp_index < 10); + print $OT "\t/* $comment */" if ($comment ne ""); + print $OT "\n"; - print $OUTPUT "\t/* $comment */" if ($comment ne ""); - } - print $OUTPUT "\n"; + $main_index++; } -print $OUTPUT "\n};\n\n"; +print $OT "\n};\n\n"; # Print the array of decomposed codes. -print $OUTPUT < 4); +print $OF "/* Perfect hash function for decomposition */\n"; +print $OF "static $dec_func\n"; + +# Emit the structure that wraps the hash lookup information into +# one variable. +print $OF <{first}) << 32) | hex($rec->{second}); + + # We are only interested in the lowest code point that decomposes + # to the given code pair. + next if $seenit{$hashkey}; + + # Save the hash key bytes in network order + push @rec_cp_packed, pack('Q>', $hashkey); + + # Append inverse lookup element + $recomp_string .= ",\n" if !$firstentry; + $recomp_string .= sprintf "\t/* U+%s+%s -> U+%s */ %s", + $rec->{first}, + $rec->{second}, + $rec->{code}, + $rec->{main_index}; + + $seenit{$hashkey} = 1; + $firstentry = 0; +} + +# Emit the inverse lookup array containing indexes into UnicodeDecompMain. +my $num_recomps = scalar @rec_cp_packed; +print $OF < 8); +print $OF "/* Perfect hash function for recomposition */\n"; +print $OF "static $rec_func\n"; + +# Emit the structure that wraps the hash lookup information into +# one variable. +print $OF <{first}); + my $b1 = hex($b->{first}); + + my $a2 = hex($a->{second}); + my $b2 = hex($b->{second}); + + # First sort by the first code point + return -1 if $a1 < $b1; + return 1 if $a1 > $b1; + + # Then sort by the second code point + return -1 if $a2 < $b2; + return 1 if $a2 > $b2; + + # Finally sort by the code point that decomposes into first and + # second ones. + my $acode = hex($a->{code}); + my $bcode = hex($b->{code}); + + return -1 if $acode < $bcode; + return -1 if $acode > $bcode; + + die "found duplicate entries of recomposeable code pairs"; +} diff --git a/src/common/unicode/generate-unicode_normprops_table.pl b/src/common/unicode/generate-unicode_normprops_table.pl index e8e5097c094b0..d652b95965dc7 100644 --- a/src/common/unicode/generate-unicode_normprops_table.pl +++ b/src/common/unicode/generate-unicode_normprops_table.pl @@ -9,6 +9,10 @@ use strict; use warnings; +use FindBin; +use lib "$FindBin::RealBin/../../tools/"; +use PerfectHash; + my %data; print @@ -18,13 +22,25 @@ #include "common/unicode_norm.h" /* - * We use a bit field here to save space. + * Normalization quick check entry for codepoint. We use a bit field + * here to save space. */ typedef struct { unsigned int codepoint:21; signed int quickcheck:4; /* really UnicodeNormalizationQC */ -} pg_unicode_normprops; +} pg_unicode_normprops; + +/* Typedef for hash function on quick check table */ +typedef int (*qc_hash_func) (const void *key); + +/* Information for quick check lookup with perfect hash function */ +typedef struct +{ + const pg_unicode_normprops *normprops; + qc_hash_func hash; + int num_normprops; +} pg_unicode_norminfo; EOS foreach my $line () @@ -66,6 +82,7 @@ "static const pg_unicode_normprops UnicodeNormProps_${prop}[] = {\n"; my %subdata = %{ $data{$prop} }; + my @cp_packed; foreach my $cp (sort { $a <=> $b } keys %subdata) { my $qc; @@ -82,7 +99,27 @@ die; } printf "\t{0x%04X, %s},\n", $cp, $qc; + + # Save the bytes as a string in network order. + push @cp_packed, pack('N', $cp); } print "};\n"; + + # Emit the definition of the perfect hash function. + my $funcname = $prop . '_hash_func'; + my $f = PerfectHash::generate_hash_function(\@cp_packed, $funcname, + fixed_key_length => 4); + printf "\n/* Perfect hash function for %s */", $prop; + print "\nstatic $f\n"; + + # Emit the structure that wraps the hash lookup information into + # one variable. + printf "/* Hash lookup information for %s */", $prop; + printf "\nstatic const pg_unicode_norminfo "; + printf "UnicodeNormInfo_%s = {\n", $prop; + printf "\tUnicodeNormProps_%s,\n", $prop; + printf "\t%s,\n", $funcname; + printf "\t%d\n", scalar @cp_packed; + printf "};\n"; } diff --git a/src/common/unicode_norm.c b/src/common/unicode_norm.c index ab5ce5934569b..abb83cbf985e0 100644 --- a/src/common/unicode_norm.c +++ b/src/common/unicode_norm.c @@ -19,10 +19,13 @@ #endif #include "common/unicode_norm.h" -#include "common/unicode_norm_table.h" #ifndef FRONTEND +#include "common/unicode_norm_hashfunc.h" #include "common/unicode_normprops_table.h" +#else +#include "common/unicode_norm_table.h" #endif +#include "port/pg_bswap.h" #ifndef FRONTEND #define ALLOC(size) palloc(size) @@ -43,6 +46,7 @@ #define NCOUNT VCOUNT * TCOUNT #define SCOUNT LCOUNT * NCOUNT +#ifdef FRONTEND /* comparison routine for bsearch() of decomposition lookup table. */ static int conv_compare(const void *p1, const void *p2) @@ -55,19 +59,53 @@ conv_compare(const void *p1, const void *p2) return (v1 > v2) ? 1 : ((v1 == v2) ? 0 : -1); } +#endif + /* + * get_code_entry + * * Get the entry corresponding to code in the decomposition lookup table. + * The backend version of this code uses a perfect hash function for the + * lookup, while the frontend version uses a binary search. */ -static pg_unicode_decomposition * +static const pg_unicode_decomposition * get_code_entry(pg_wchar code) { +#ifndef FRONTEND + int h; + uint32 hashkey; + pg_unicode_decompinfo decompinfo = UnicodeDecompInfo; + + /* + * Compute the hash function. The hash key is the codepoint with the bytes + * in network order. + */ + hashkey = pg_hton32(code); + h = decompinfo.hash(&hashkey); + + /* An out-of-range result implies no match */ + if (h < 0 || h >= decompinfo.num_decomps) + return NULL; + + /* + * Since it's a perfect hash, we need only match to the specific codepoint + * it identifies. + */ + if (code != decompinfo.decomps[h].codepoint) + return NULL; + + /* Success! */ + return &decompinfo.decomps[h]; +#else return bsearch(&(code), UnicodeDecompMain, lengthof(UnicodeDecompMain), sizeof(pg_unicode_decomposition), conv_compare); +#endif } + /* * Given a decomposition entry looked up earlier, get the decomposed * characters. @@ -76,7 +114,7 @@ get_code_entry(pg_wchar code) * is only valid until next call to this function! */ static const pg_wchar * -get_code_decomposition(pg_unicode_decomposition *entry, int *dec_size) +get_code_decomposition(const pg_unicode_decomposition *entry, int *dec_size) { static pg_wchar x; @@ -103,7 +141,7 @@ get_code_decomposition(pg_unicode_decomposition *entry, int *dec_size) static int get_decomposed_size(pg_wchar code, bool compat) { - pg_unicode_decomposition *entry; + const pg_unicode_decomposition *entry; int size = 0; int i; const uint32 *decomp; @@ -190,17 +228,51 @@ recompose_code(uint32 start, uint32 code, uint32 *result) } else { - int i; + const pg_unicode_decomposition *entry; /* * Do an inverse lookup of the decomposition tables to see if anything * matches. The comparison just needs to be a perfect match on the * sub-table of size two, because the start character has already been - * recomposed partially. + * recomposed partially. This lookup uses a perfect hash function for + * the backend code. + */ +#ifndef FRONTEND + + int h, + inv_lookup_index; + uint64 hashkey; + pg_unicode_recompinfo recompinfo = UnicodeRecompInfo; + + /* + * Compute the hash function. The hash key is formed by concatenating + * bytes of the two codepoints in network order. See also + * src/common/unicode/generate-unicode_norm_table.pl. */ + hashkey = pg_hton64(((uint64) start << 32) | (uint64) code); + h = recompinfo.hash(&hashkey); + + /* An out-of-range result implies no match */ + if (h < 0 || h >= recompinfo.num_recomps) + return false; + + inv_lookup_index = recompinfo.inverse_lookup[h]; + entry = &UnicodeDecompMain[inv_lookup_index]; + + if (start == UnicodeDecomp_codepoints[entry->dec_index] && + code == UnicodeDecomp_codepoints[entry->dec_index + 1]) + { + *result = entry->codepoint; + return true; + } + +#else + + int i; + for (i = 0; i < lengthof(UnicodeDecompMain); i++) { - const pg_unicode_decomposition *entry = &UnicodeDecompMain[i]; + entry = &UnicodeDecompMain[i]; if (DECOMPOSITION_SIZE(entry) != 2) continue; @@ -215,6 +287,7 @@ recompose_code(uint32 start, uint32 code, uint32 *result) return true; } } +#endif /* !FRONTEND */ } return false; @@ -230,7 +303,7 @@ recompose_code(uint32 start, uint32 code, uint32 *result) static void decompose_code(pg_wchar code, bool compat, pg_wchar **result, int *current) { - pg_unicode_decomposition *entry; + const pg_unicode_decomposition *entry; int i; const uint32 *decomp; int dec_size; @@ -357,8 +430,8 @@ unicode_normalize(UnicodeNormalizationForm form, const pg_wchar *input) pg_wchar prev = decomp_chars[count - 1]; pg_wchar next = decomp_chars[count]; pg_wchar tmp; - pg_unicode_decomposition *prevEntry = get_code_entry(prev); - pg_unicode_decomposition *nextEntry = get_code_entry(next); + const pg_unicode_decomposition *prevEntry = get_code_entry(prev); + const pg_unicode_decomposition *nextEntry = get_code_entry(next); /* * If no entries are found, the character used is either an Hangul @@ -416,7 +489,7 @@ unicode_normalize(UnicodeNormalizationForm form, const pg_wchar *input) for (count = 1; count < decomp_size; count++) { pg_wchar ch = decomp_chars[count]; - pg_unicode_decomposition *ch_entry = get_code_entry(ch); + const pg_unicode_decomposition *ch_entry = get_code_entry(ch); int ch_class = (ch_entry == NULL) ? 0 : ch_entry->comb_class; pg_wchar composite; @@ -457,7 +530,7 @@ unicode_normalize(UnicodeNormalizationForm form, const pg_wchar *input) static uint8 get_canonical_class(pg_wchar ch) { - pg_unicode_decomposition *entry = get_code_entry(ch); + const pg_unicode_decomposition *entry = get_code_entry(ch); if (!entry) return 0; @@ -465,15 +538,32 @@ get_canonical_class(pg_wchar ch) return entry->comb_class; } -static int -qc_compare(const void *p1, const void *p2) +static const pg_unicode_normprops * +qc_hash_lookup(pg_wchar ch, const pg_unicode_norminfo *norminfo) { - uint32 v1, - v2; + int h; + uint32 hashkey; + + /* + * Compute the hash function. The hash key is the codepoint with the bytes + * in network order. + */ + hashkey = pg_hton32(ch); + h = norminfo->hash(&hashkey); + + /* An out-of-range result implies no match */ + if (h < 0 || h >= norminfo->num_normprops) + return NULL; + + /* + * Since it's a perfect hash, we need only match to the specific codepoint + * it identifies. + */ + if (ch != norminfo->normprops[h].codepoint) + return NULL; - v1 = ((const pg_unicode_normprops *) p1)->codepoint; - v2 = ((const pg_unicode_normprops *) p2)->codepoint; - return (v1 - v2); + /* Success! */ + return &norminfo->normprops[h]; } /* @@ -482,26 +572,15 @@ qc_compare(const void *p1, const void *p2) static UnicodeNormalizationQC qc_is_allowed(UnicodeNormalizationForm form, pg_wchar ch) { - pg_unicode_normprops key; - pg_unicode_normprops *found = NULL; - - key.codepoint = ch; + const pg_unicode_normprops *found = NULL; switch (form) { case UNICODE_NFC: - found = bsearch(&key, - UnicodeNormProps_NFC_QC, - lengthof(UnicodeNormProps_NFC_QC), - sizeof(pg_unicode_normprops), - qc_compare); + found = qc_hash_lookup(ch, &UnicodeNormInfo_NFC_QC); break; case UNICODE_NFKC: - found = bsearch(&key, - UnicodeNormProps_NFKC_QC, - lengthof(UnicodeNormProps_NFKC_QC), - sizeof(pg_unicode_normprops), - qc_compare); + found = qc_hash_lookup(ch, &UnicodeNormInfo_NFKC_QC); break; default: Assert(false); diff --git a/src/fe_utils/archive.c b/src/fe_utils/archive.c index c4cb213198511..252dc0fb6a5d8 100644 --- a/src/fe_utils/archive.c +++ b/src/fe_utils/archive.c @@ -50,7 +50,7 @@ RestoreArchivedFile(const char *path, const char *xlogfname, xlogfname, NULL); if (xlogRestoreCmd == NULL) { - pg_log_fatal("could not use restore_command with %%r alias"); + pg_log_fatal("cannot use restore_command with %%r placeholder"); exit(1); } @@ -71,9 +71,9 @@ RestoreArchivedFile(const char *path, const char *xlogfname, { if (expectedSize > 0 && stat_buf.st_size != expectedSize) { - pg_log_fatal("unexpected file size for \"%s\": %lu instead of %lu", - xlogfname, (unsigned long) stat_buf.st_size, - (unsigned long) expectedSize); + pg_log_fatal("unexpected file size for \"%s\": %lld instead of %lld", + xlogfname, (long long int) stat_buf.st_size, + (long long int) expectedSize); exit(1); } else @@ -109,7 +109,7 @@ RestoreArchivedFile(const char *path, const char *xlogfname, */ if (wait_result_is_any_signal(rc, true)) { - pg_log_fatal("restore_command failed due to the signal: %s", + pg_log_fatal("restore_command failed: %s", wait_result_to_str(rc)); exit(1); } diff --git a/src/include/access/clog.h b/src/include/access/clog.h index 2db8acb189f5c..6c840cbf299b3 100644 --- a/src/include/access/clog.h +++ b/src/include/access/clog.h @@ -12,6 +12,7 @@ #define CLOG_H #include "access/xlogreader.h" +#include "storage/sync.h" #include "lib/stringinfo.h" /* @@ -50,6 +51,8 @@ extern void CheckPointCLOG(void); extern void ExtendCLOG(TransactionId newestXact); extern void TruncateCLOG(TransactionId oldestXact, Oid oldestxid_datoid); +extern int clogsyncfiletag(const FileTag *ftag, char *path); + /* XLOG stuff */ #define CLOG_ZEROPAGE 0x00 #define CLOG_TRUNCATE 0x10 diff --git a/src/include/access/commit_ts.h b/src/include/access/commit_ts.h index 2740c02a84fa8..2d1724952257a 100644 --- a/src/include/access/commit_ts.h +++ b/src/include/access/commit_ts.h @@ -14,6 +14,7 @@ #include "access/xlog.h" #include "datatype/timestamp.h" #include "replication/origin.h" +#include "storage/sync.h" #include "utils/guc.h" @@ -45,6 +46,8 @@ extern void SetCommitTsLimit(TransactionId oldestXact, TransactionId newestXact); extern void AdvanceOldestCommitTsXid(TransactionId oldestXact); +extern int committssyncfiletag(const FileTag *ftag, char *path); + /* XLOG stuff */ #define COMMIT_TS_ZEROPAGE 0x00 #define COMMIT_TS_TRUNCATE 0x10 diff --git a/src/include/access/gist.h b/src/include/access/gist.h index 4994351697c34..4f6dae9a76b0c 100644 --- a/src/include/access/gist.h +++ b/src/include/access/gist.h @@ -37,7 +37,8 @@ #define GIST_DISTANCE_PROC 8 #define GIST_FETCH_PROC 9 #define GIST_OPTIONS_PROC 10 -#define GISTNProcs 10 +#define GIST_SORTSUPPORT_PROC 11 +#define GISTNProcs 11 /* * Page opaque data in a GiST index page. diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index 02e985549f635..b68c01a5f246a 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -501,12 +501,15 @@ extern IndexTuple gistgetadjusted(Relation r, GISTSTATE *giststate); extern IndexTuple gistFormTuple(GISTSTATE *giststate, Relation r, Datum *attdata, bool *isnull, bool isleaf); +extern void gistCompressValues(GISTSTATE *giststate, Relation r, + Datum *attdata, bool *isnull, bool isleaf, Datum *compatt); extern OffsetNumber gistchoose(Relation r, Page p, IndexTuple it, GISTSTATE *giststate); extern void GISTInitBuffer(Buffer b, uint32 f); +extern void gistinitpage(Page page, uint32 f); extern void gistdentryinit(GISTSTATE *giststate, int nkey, GISTENTRY *e, Datum k, Relation r, Page pg, OffsetNumber o, bool l, bool isNull); diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h index 6d729008c6006..9a303809019a5 100644 --- a/src/include/access/multixact.h +++ b/src/include/access/multixact.h @@ -13,6 +13,7 @@ #include "access/xlogreader.h" #include "lib/stringinfo.h" +#include "storage/sync.h" /* @@ -108,6 +109,7 @@ extern MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members); extern MultiXactId ReadNextMultiXactId(void); +extern void ReadMultiXactIdRange(MultiXactId *oldest, MultiXactId *next); extern bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly); extern void MultiXactIdSetOldestMember(void); extern int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **xids, @@ -116,6 +118,9 @@ extern bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2); extern bool MultiXactIdPrecedesOrEquals(MultiXactId multi1, MultiXactId multi2); +extern int multixactoffsetssyncfiletag(const FileTag *ftag, char *path); +extern int multixactmemberssyncfiletag(const FileTag *ftag, char *path); + extern void AtEOXact_MultiXact(void); extern void AtPrepare_MultiXact(void); extern void PostPrepare_MultiXact(TransactionId xid); diff --git a/src/include/access/slru.h b/src/include/access/slru.h index 61fbc80ef0d6a..b39b43504d80c 100644 --- a/src/include/access/slru.h +++ b/src/include/access/slru.h @@ -15,6 +15,7 @@ #include "access/xlogdefs.h" #include "storage/lwlock.h" +#include "storage/sync.h" /* @@ -111,10 +112,10 @@ typedef struct SlruCtlData SlruShared shared; /* - * This flag tells whether to fsync writes (true for pg_xact and multixact - * stuff, false for pg_subtrans and pg_notify). + * Which sync handler function to use when handing sync requests over to + * the checkpointer. SYNC_HANDLER_NONE to disable fsync (eg pg_notify). */ - bool do_fsync; + SyncRequestHandler sync_handler; /* * Decide which of two page numbers is "older" for truncation purposes. We @@ -135,14 +136,15 @@ typedef SlruCtlData *SlruCtl; extern Size SimpleLruShmemSize(int nslots, int nlsns); extern void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, - LWLock *ctllock, const char *subdir, int tranche_id); + LWLock *ctllock, const char *subdir, int tranche_id, + SyncRequestHandler sync_handler); extern int SimpleLruZeroPage(SlruCtl ctl, int pageno); extern int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok, TransactionId xid); extern int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid); extern void SimpleLruWritePage(SlruCtl ctl, int slotno); -extern void SimpleLruFlush(SlruCtl ctl, bool allow_redirtied); +extern void SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied); extern void SimpleLruTruncate(SlruCtl ctl, int cutoffPage); extern bool SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno); @@ -151,6 +153,8 @@ typedef bool (*SlruScanCallback) (SlruCtl ctl, char *filename, int segpage, extern bool SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data); extern void SlruDeleteSegment(SlruCtl ctl, int segno); +extern int SlruSyncFileTag(SlruCtl ctl, const FileTag *ftag, char *path); + /* SlruScanDirectory public callbacks */ extern bool SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data); diff --git a/src/include/access/xact.h b/src/include/access/xact.h index df1b43a932e3d..7320de345c995 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -72,7 +72,8 @@ typedef enum SYNCHRONOUS_COMMIT_REMOTE_WRITE, /* wait for local flush and remote * write */ SYNCHRONOUS_COMMIT_REMOTE_FLUSH, /* wait for local and remote flush */ - SYNCHRONOUS_COMMIT_REMOTE_APPLY /* wait for local flush and remote apply */ + SYNCHRONOUS_COMMIT_REMOTE_APPLY /* wait for local and remote flush + and remote apply */ } SyncCommitLevel; /* Define the default setting for synchronous_commit */ diff --git a/src/include/access/xloginsert.h b/src/include/access/xloginsert.h index 63df25ae90fef..4ba2c56be60be 100644 --- a/src/include/access/xloginsert.h +++ b/src/include/access/xloginsert.h @@ -54,6 +54,8 @@ extern bool XLogCheckBufferNeedsBackup(Buffer buffer); extern XLogRecPtr log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blk, char *page, bool page_std); +extern void log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages, + BlockNumber *blknos, char **pages, bool page_std); extern XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std); extern void log_newpage_range(Relation rel, ForkNumber forkNum, BlockNumber startblk, BlockNumber endblk, bool page_std); diff --git a/src/include/c.h b/src/include/c.h index 2c61ca8aa894d..9cd67f8f76551 100644 --- a/src/include/c.h +++ b/src/include/c.h @@ -98,6 +98,7 @@ * * GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html * GCC: https://gcc.gnu.org/onlinedocs/gcc/Type-Attributes.html + * Clang: https://clang.llvm.org/docs/AttributeReference.html * Sunpro: https://docs.oracle.com/cd/E18659_01/html/821-1384/gjzke.html * XLC: https://www.ibm.com/support/knowledgecenter/SSGH2K_13.1.2/com.ibm.xlc131.aix.doc/language_ref/function_attributes.html * XLC: https://www.ibm.com/support/knowledgecenter/SSGH2K_13.1.2/com.ibm.xlc131.aix.doc/language_ref/type_attrib.html @@ -1132,7 +1133,8 @@ typedef union PGAlignedXLogBlock * access to the original string and translated string, and for cases where * immediate translation is not possible, like when initializing global * variables. - * http://www.gnu.org/software/autoconf/manual/gettext/Special-cases.html + * + * https://www.gnu.org/software/gettext/manual/html_node/Special-cases.html */ #define gettext_noop(x) (x) diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 0bbe0a122afd3..f44a09b0c2515 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 202009031 +#define CATALOG_VERSION_NO 202010201 #endif diff --git a/src/include/catalog/pg_amproc.dat b/src/include/catalog/pg_amproc.dat index 37b580883fcb9..a8e0c4ff8a527 100644 --- a/src/include/catalog/pg_amproc.dat +++ b/src/include/catalog/pg_amproc.dat @@ -480,6 +480,8 @@ amproc => 'gist_point_distance' }, { amprocfamily => 'gist/point_ops', amproclefttype => 'point', amprocrighttype => 'point', amprocnum => '9', amproc => 'gist_point_fetch' }, +{ amprocfamily => 'gist/point_ops', amproclefttype => 'point', + amprocrighttype => 'point', amprocnum => '11', amproc => 'gist_point_sortsupport' }, { amprocfamily => 'gist/box_ops', amproclefttype => 'box', amprocrighttype => 'box', amprocnum => '1', amproc => 'gist_box_consistent' }, { amprocfamily => 'gist/box_ops', amproclefttype => 'box', diff --git a/src/include/catalog/pg_operator.dat b/src/include/catalog/pg_operator.dat index 4f8b9865effc4..7cc812adda638 100644 --- a/src/include/catalog/pg_operator.dat +++ b/src/include/catalog/pg_operator.dat @@ -218,12 +218,6 @@ oprname => '>=', oprleft => 'xid8', oprright => 'xid8', oprresult => 'bool', oprcom => '<=(xid8,xid8)', oprnegate => '<(xid8,xid8)', oprcode => 'xid8ge', oprrest => 'scalargesel', oprjoin => 'scalargejoinsel' }, -{ oid => '388', descr => 'deprecated, use factorial() instead', - oprname => '!', oprkind => 'r', oprleft => 'int8', oprright => '0', - oprresult => 'numeric', oprcode => 'numeric_fac' }, -{ oid => '389', descr => 'deprecated, use factorial() instead', - oprname => '!!', oprkind => 'l', oprleft => '0', oprright => 'int8', - oprresult => 'numeric', oprcode => 'numeric_fac' }, { oid => '385', descr => 'equal', oprname => '=', oprcanhash => 't', oprleft => 'cid', oprright => 'cid', oprresult => 'bool', oprcom => '=(cid,cid)', oprcode => 'cideq', diff --git a/src/include/catalog/pg_operator.h b/src/include/catalog/pg_operator.h index 1daa2638520f6..62a7dbf23f69b 100644 --- a/src/include/catalog/pg_operator.h +++ b/src/include/catalog/pg_operator.h @@ -41,7 +41,7 @@ CATALOG(pg_operator,2617,OperatorRelationId) /* operator owner */ Oid oprowner BKI_DEFAULT(PGUID); - /* 'l', 'r', or 'b' */ + /* 'l' for prefix or 'b' for infix */ char oprkind BKI_DEFAULT(b); /* can be used in merge join? */ @@ -50,10 +50,10 @@ CATALOG(pg_operator,2617,OperatorRelationId) /* can be used in hash join? */ bool oprcanhash BKI_DEFAULT(f); - /* left arg type, or 0 if 'l' oprkind */ + /* left arg type, or 0 if prefix operator */ Oid oprleft BKI_LOOKUP(pg_type); - /* right arg type, or 0 if 'r' oprkind */ + /* right arg type */ Oid oprright BKI_LOOKUP(pg_type); /* result datatype */ diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat index 687509ba9265b..bbcac69d48f77 100644 --- a/src/include/catalog/pg_proc.dat +++ b/src/include/catalog/pg_proc.dat @@ -327,10 +327,6 @@ { oid => '110', descr => 'I/O', proname => 'unknownout', prorettype => 'cstring', proargtypes => 'unknown', prosrc => 'unknownout' }, -{ oid => '111', - descr => 'implementation of deprecated ! and !! factorial operators', - proname => 'numeric_fac', prorettype => 'numeric', proargtypes => 'int8', - prosrc => 'numeric_fac' }, { oid => '115', proname => 'box_above_eq', prorettype => 'bool', proargtypes => 'box box', @@ -3687,10 +3683,11 @@ prosrc => 'pg_get_function_arg_default' }, { oid => '1686', descr => 'list of SQL keywords', - proname => 'pg_get_keywords', procost => '10', prorows => '400', + proname => 'pg_get_keywords', procost => '10', prorows => '500', proretset => 't', provolatile => 's', prorettype => 'record', - proargtypes => '', proallargtypes => '{text,char,text}', - proargmodes => '{o,o,o}', proargnames => '{word,catcode,catdesc}', + proargtypes => '', proallargtypes => '{text,char,bool,text,text}', + proargmodes => '{o,o,o,o,o}', + proargnames => '{word,catcode,barelabel,catdesc,baredesc}', prosrc => 'pg_get_keywords' }, { oid => '2289', descr => 'convert generic options array to name/value table', @@ -5260,6 +5257,14 @@ proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}', proargnames => '{pid,status,receive_start_lsn,receive_start_tli,written_lsn,flushed_lsn,received_tli,last_msg_send_time,last_msg_receipt_time,latest_end_lsn,latest_end_time,slot_name,sender_host,sender_port,conninfo}', prosrc => 'pg_stat_get_wal_receiver' }, +{ oid => '8595', descr => 'statistics: information about replication slots', + proname => 'pg_stat_get_replication_slots', prorows => '10', proisstrict => 'f', + proretset => 't', provolatile => 's', proparallel => 'r', + prorettype => 'record', proargtypes => '', + proallargtypes => '{text,int8,int8,int8,timestamptz}', + proargmodes => '{o,o,o,o,o}', + proargnames => '{slot_name,spill_txns,spill_count,spill_bytes,stats_reset}', + prosrc => 'pg_stat_get_replication_slots' }, { oid => '6118', descr => 'statistics: information about subscription', proname => 'pg_stat_get_subscription', proisstrict => 'f', provolatile => 's', proparallel => 'r', prorettype => 'record', proargtypes => 'oid', @@ -5484,6 +5489,14 @@ proname => 'pg_stat_get_buf_alloc', provolatile => 's', proparallel => 'r', prorettype => 'int8', proargtypes => '', prosrc => 'pg_stat_get_buf_alloc' }, +{ oid => '1136', descr => 'statistics: information about WAL activity', + proname => 'pg_stat_get_wal', proisstrict => 'f', provolatile => 's', + proparallel => 'r', prorettype => 'record', proargtypes => '', + proallargtypes => '{int8,timestamptz}', + proargmodes => '{o,o}', + proargnames => '{wal_buffers_full,stats_reset}', + prosrc => 'pg_stat_get_wal' }, + { oid => '2306', descr => 'statistics: information about SLRU caches', proname => 'pg_stat_get_slru', prorows => '100', proisstrict => 'f', proretset => 't', provolatile => 's', proparallel => 'r', @@ -5601,6 +5614,10 @@ descr => 'statistics: reset collected statistics for a single SLRU', proname => 'pg_stat_reset_slru', proisstrict => 'f', provolatile => 'v', prorettype => 'void', proargtypes => 'text', prosrc => 'pg_stat_reset_slru' }, +{ oid => '8596', + descr => 'statistics: reset collected statistics for a single replication slot', + proname => 'pg_stat_reset_replication_slot', proisstrict => 'f', provolatile => 'v', + prorettype => 'void', proargtypes => 'text', prosrc => 'pg_stat_reset_replication_slot' }, { oid => '3163', descr => 'current trigger depth', proname => 'pg_trigger_depth', provolatile => 's', proparallel => 'r', @@ -8062,6 +8079,9 @@ proname => 'gist_poly_distance', prorettype => 'float8', proargtypes => 'internal polygon int2 oid internal', prosrc => 'gist_poly_distance' }, +{ oid => '3435', descr => 'sort support', + proname => 'gist_point_sortsupport', prorettype => 'void', + proargtypes => 'internal', prosrc => 'gist_point_sortsupport' }, # GIN array support { oid => '2743', descr => 'GIN array support', diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index b50fa25dbd860..268c81089653f 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -91,7 +91,7 @@ CATALOG(pg_proc,1255,ProcedureRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(81,Proce * proargtypes */ - /* parameter types (excludes OUT params) */ + /* parameter types (excludes OUT params of functions) */ oidvector proargtypes BKI_LOOKUP(pg_type) BKI_FORCE_NOT_NULL; #ifdef CATALOG_VARLEN diff --git a/src/include/catalog/pg_subscription_rel.h b/src/include/catalog/pg_subscription_rel.h index f384f4e7fa657..ff5c8d7ff91c9 100644 --- a/src/include/catalog/pg_subscription_rel.h +++ b/src/include/catalog/pg_subscription_rel.h @@ -80,8 +80,7 @@ extern void AddSubscriptionRelState(Oid subid, Oid relid, char state, XLogRecPtr sublsn); extern void UpdateSubscriptionRelState(Oid subid, Oid relid, char state, XLogRecPtr sublsn); -extern char GetSubscriptionRelState(Oid subid, Oid relid, - XLogRecPtr *sublsn, bool missing_ok); +extern char GetSubscriptionRelState(Oid subid, Oid relid, XLogRecPtr *sublsn); extern void RemoveSubscriptionRel(Oid subid, Oid relid); extern List *GetSubscriptionRelations(Oid subid); diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h index a40ddf5db52ca..e38d732ed477d 100644 --- a/src/include/commands/trigger.h +++ b/src/include/commands/trigger.h @@ -46,7 +46,7 @@ typedef struct TriggerData * The state for capturing old and new tuples into transition tables for a * single ModifyTable node (or other operation source, e.g. copy.c). * - * This is per-caller to avoid conflicts in setting tcs_map or + * This is per-caller to avoid conflicts in setting * tcs_original_insert_tuple. Note, however, that the pointed-to * private data may be shared across multiple callers. */ @@ -65,14 +65,6 @@ typedef struct TransitionCaptureState bool tcs_update_new_table; bool tcs_insert_new_table; - /* - * For UPDATE and DELETE, AfterTriggerSaveEvent may need to convert the - * new and old tuples from a child table's format to the format of the - * relation named in a query so that it is compatible with the transition - * tuplestores. The caller must store the conversion map here if so. - */ - TupleConversionMap *tcs_map; - /* * For INSERT and COPY, it would be wasteful to convert tuples from child * format to parent format after they have already been converted in the diff --git a/src/include/common/keywords.h b/src/include/common/keywords.h index 257c050903e34..c9f9a9f991a7b 100644 --- a/src/include/common/keywords.h +++ b/src/include/common/keywords.h @@ -25,9 +25,11 @@ #ifndef FRONTEND extern PGDLLIMPORT const ScanKeywordList ScanKeywords; extern PGDLLIMPORT const uint8 ScanKeywordCategories[]; +extern PGDLLIMPORT const bool ScanKeywordBareLabel[]; #else extern const ScanKeywordList ScanKeywords; extern const uint8 ScanKeywordCategories[]; +extern const bool ScanKeywordBareLabel[]; #endif #endif /* KEYWORDS_H */ diff --git a/src/include/common/logging.h b/src/include/common/logging.h index 028149c7a1528..3205b8fef9b70 100644 --- a/src/include/common/logging.h +++ b/src/include/common/logging.h @@ -66,6 +66,7 @@ extern enum pg_log_level __pg_log_level; void pg_logging_init(const char *argv0); void pg_logging_config(int new_flags); void pg_logging_set_level(enum pg_log_level new_level); +void pg_logging_increase_verbosity(void); void pg_logging_set_pre_callback(void (*cb) (void)); void pg_logging_set_locus_callback(void (*cb) (const char **filename, uint64 *lineno)); diff --git a/src/include/common/string.h b/src/include/common/string.h index 50c241a811b6a..6a4baa6f35900 100644 --- a/src/include/common/string.h +++ b/src/include/common/string.h @@ -21,6 +21,7 @@ extern int pg_strip_crlf(char *str); /* functions in src/common/pg_get_line.c */ extern char *pg_get_line(FILE *stream); +extern bool pg_get_line_buf(FILE *stream, struct StringInfoData *buf); extern bool pg_get_line_append(FILE *stream, struct StringInfoData *buf); /* functions in src/common/sprompt.c */ diff --git a/src/include/common/unicode_norm_hashfunc.h b/src/include/common/unicode_norm_hashfunc.h new file mode 100644 index 0000000000000..e6acb2a8d0f74 --- /dev/null +++ b/src/include/common/unicode_norm_hashfunc.h @@ -0,0 +1,2932 @@ +/*------------------------------------------------------------------------- + * + * unicode_norm_hashfunc.h + * Perfect hash functions used for Unicode normalization + * + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/common/unicode_norm_hashfunc.h + * + *------------------------------------------------------------------------- + */ + +/* + * File auto-generated by src/common/unicode/generate-unicode_norm_table.pl, + * do not edit. There is deliberately not an #ifndef PG_UNICODE_NORM_HASHFUNC_H + * here. + */ + +#include "common/unicode_norm_table.h" + +/* Typedef for perfect hash functions */ +typedef int (*cp_hash_func) (const void *key); + +/* Information for lookups with perfect hash functions */ +typedef struct +{ + const pg_unicode_decomposition *decomps; + cp_hash_func hash; + int num_decomps; +} pg_unicode_decompinfo; + +typedef struct +{ + const uint16 *inverse_lookup; + cp_hash_func hash; + int num_recomps; +} pg_unicode_recompinfo; + +/* Perfect hash function for decomposition */ +static int +Decomp_hash_func(const void *key) +{ + static const int16 h[13209] = { + 0, 1515, 4744, 4745, 0, 0, 0, 0, + 0, 0, 0, 0, 3890, 3890, 0, 0, + 3891, 3891, -2046, 2800, 3890, 3890, 3890, -4396, + 4361, 4362, -4441, -4441, -4396, 1773, 1773, 1773, + 4372, 4373, -4438, -4438, -4393, -4393, 2619, 17, + -4347, -4393, -4393, -4393, -4393, -4393, 2619, 2619, + 1560, 4346, 4347, 4348, 1917, 1873, 1874, 1875, + -7856, 4358, 17619, 2622, 2622, 2622, 6357, 6358, + 6359, 6360, 6361, 6362, 6363, 2622, -4390, -4390, + 4414, -5356, -5356, 4374, 4375, -5356, -5356, -6335, + -3020, 2511, -5356, -5356, -3583, -3583, -3583, -3583, + -995, 0, 0, -9799, -9754, 2874, 2875, 2876, + 2877, 2878, -9830, -3591, -9756, -9756, -2744, -5346, + -9710, -9756, 342, -5346, -9756, -5346, -2743, -449, + 348, 2894, 2895, -2853, 2897, 2898, 2899, 2900, + 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, + 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, + 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, + 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, + 2933, 2934, 32767, 32767, 32767, 32767, 32767, 32767, + -8721, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 1, 32767, 48, 32767, 32767, 32767, 32767, 49, + 32767, 32767, -8687, -8687, -6255, -6210, 32767, 32767, + -8689, -8689, -21949,32767, -18635,-15320,-15320,32767, + -12006,-8691, -8691, -8691, -8691, -8691, 32767, 66, + -8737, -8737, -8692, -8692, -8692, -8692, 73, 74, + 32767, -8738, -8693, -8693, -8693, -8693, -8693, 32767, + 32767, -8695, -8695, -8695, -8695, -8695, 32767, 32767, + 40, 41, -2390, -2434, 44, 45, 32767, 46, + 13307, 9993, 9994, 6680, 6681, 3367, 3368, 54, + 0, 55, 56, 57, -8699, -8699, 105, 32767, + 32767, 61, 62, 63, -8701, -8701, 32767, 111, + 32767, 67, 68, 69, 70, 1890, 3687, -1272, + 3690, 75, 76, 77, 78, 79, 80, 81, + 82, 32767, 32767, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 32767, + 32767, 103, 104, 105, 106, 107, 108, 109, + -8660, -8660, 32767, -8661, -8661, -8661, -8661, -8661, + -8661, 32767, 73, 74, 75, 76, -2355, -2399, + 79, 80, 32767, 32767, 13341, 10027, 10028, 6714, + 6715, 3401, 3402, 32767, 32767, 88, 89, 90, + -8666, -8666, 138, 32767, 32767, 94, 95, 96, + -8668, -8668, 144, 145, 101, -2553, -2553, -2553, + -2553, -4983, -2553, -2553, 154, -2553, 156, 32767, + 32767, 6114, 158, -3153, -3152, -3151, -12891,-6888, + -931, -3149, 166, -3148, -4728, 169, -3147, -3146, + -3145, -3144, -3143, -3142, -3141, -2543, -3139, -3138, + 180, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 3314, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 0, 3660, 3661, 2131, 2132, 2133, 2134, 2135, + 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, + 2144, 2145, -5472, -5472, -3612, -3612, -3612, -3612, + -3612, 2652, -3612, -3612, -3612, -3612, -3612, -3612, + -3612, -3612, 3693, -3613, -7015, -7015, 1742, 1743, + -7060, -7060, -7015, -846, -846, -846, 1753, 1754, + -7057, -7057, -7012, -7012, 0, -2602, -6966, -7012, + -7012, -7012, -7012, -7012, 0, 0, 1725, 1726, + 1727, 1728, -703, -747, -746, 0, 1735, 1736, + 14997, 0, 0, 0, 3735, 3736, 3737, 3738, + 3739, 3740, 3741, 0, -7012, -7012, 1792, 1793, + 1749, 1750, 1751, -7980, -7980, -8959, -5644, -113, + -7980, -113, -2382, -6116, -6116, -6116, -6116, -6116, + -6116, -6116, -2374, 4639, 4640, -4163, 5608, 5609, + -4120, -4120, 5612, 5613, 6593, 3279, -2251, 5617, + 5618, 3846, 3847, 3848, 3849, 1262, 1262, 10066, + 10067, 10023, 3855, 3856, 3857, 1259, 1259, 10071, + 3861, 10027, 10028, 3017, 5620, 9985, 10032, -65, + 5624, 10035, 5626, 3024, 731, -65, 1298, 12530, + 3727, 3727, 3772, 3772, 3772, 13504, 13505, 14485, + 11171, 5641, 13509, 5643, 7913, 11648, 11649, 11650, + 11651, 11652, 11653, 11654, 7913, 901, 901, 9705, + -65, -65, 9665, 9666, -65, -65, -1044, 2271, + 7802, -65, -65, 1708, 1708, 1708, 1708, 4296, + 4297, -4506, -4506, -4461, 1708, 1708, 1708, 4307, + 4308, -4503, 1708, -4457, -4457, 2555, -47, -4411, + -4457, 5641, -47, -4457, -47, 2556, 4850, 5647, + 4285, -6946, 1858, 1859, 1815, 1816, 1817, -7914, + -7914, -8893, -5578, -47, -7914, -47, -2316, -6050, + -6050, -6050, -6050, -6050, -6050, -6050, -2308, 4705, + 4706, -4097, 5674, 5675, -4054, -4054, 5678, 5679, + 6659, 3345, -2185, 5683, 5684, 3912, 3913, 3914, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, -3083, -3083, 232, 287, 233, 233, + 233, 8990, 8991, 32767, 32767, 3668, 32767, 3667, + 3667, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 208, 208, 208, 208, 208, 208, + 32767, 32767, 206, 206, 206, 206, 206, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 304, 305, -1274, 307, 308, + 309, 6753, -1374, 10488, 4486, -1470, 4488, 316, + 4489, -5607, 4490, 4491, 4492, 322, 760, 324, + 325, 326, 166, 763, 329, -2553, 765, 332, + 333, 334, 335, 772, 337, 6310, 339, 340, + 341, 342, 343, 344, 345, 346, -2542, -2542, + -2542, 350, 351, 352, 353, 354, 355, 356, + 357, 358, 359, 360, 361, 362, -6008, 364, + 365, 366, 367, 368, 369, 370, 254, 372, + 373, 374, 375, 376, 377, 378, 379, 380, + 381, 382, 32767, 383, 384, -3606, -3605, -3604, + -3603, 389, -3600, -3599, -3598, 2340, -1238, -3595, + -3594, -3593, 4694, -4062, -4062, 4742, 4743, 4699, + -1469, -1468, -1467, -4065, -4065, 4747, -1463, 4703, + 4704, -2307, 296, 32767, 0, 32767, 32767, 4708, + -1376, -1376, -1376, 32767, 32767, -1246, 506, 506, + 0, -1559, 32767, 32767, 32767, 32767, 32767, 305, + 419, 308, 2578, 6313, 6314, 424, 32767, -6030, + 32767, 426, 427, 428, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 32767, 0, + 32767, 0, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 0, 32767, 429, -5407, 431, + -5406, 433, -3601, 435, 32767, -3751, 32767, 32767, + 32767, 32767, -3755, 32767, 32767, 32767, 32767, 0, + 32767, 32767, 32767, 32767, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 436, -11425,-5422, + 535, -5422, 535, -5422, 4675, -5421, -5421, -5421, + -5421, -5421, 4681, 0, 0, 0, 4682, 4683, + 4684, 4685, 4686, 4687, 0, 0, 32767, 32767, + 0, 0, -5684, 0, 4688, 4689, 4690, 4691, + 4692, 4693, 4694, 4695, -1257, -1257, 4696, -5441, + -5441, 4699, 4700, 4701, -5443, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 454, 0, 32767, 456, + 32767, 32767, 0, 457, 32767, 32767, 32767, 0, + 458, 459, 460, 32767, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 4703, 4704, 4705, 4706, 32767, + 32767, 0, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 4655, 4656, 4657, 4658, + 4659, 4712, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 462, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 463, 464, 32767, 465, + 32767, 32767, 32767, 466, 32767, 32767, 32767, 32767, + 467, 468, 469, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 3011, 3011, 3011, + 3011, 3011, 3011, 3011, 32767, 32767, 32767, 32767, + 32767, 32767, 470, 471, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 472, + 473, 474, 475, 476, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 4713, 4714, 4715, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 477, 478, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 479, 480, 481, 482, + 32767, 32767, 483, 484, 32767, 32767, 485, 486, + 487, 488, 489, 490, 32767, 32767, 491, 492, + 493, 494, 495, 496, 32767, 32767, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 665, -255, 667, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 693, 694, 695, 696, + 697, 698, 699, 700, 701, 702, 703, 704, + 705, 706, 707, 708, 709, 710, 711, 712, + 7183, 714, -1580, 716, 2547, 718, 7194, 720, + 2553, 722, 723, 7204, 725, 726, 727, 728, + 729, 730, 731, 732, 733, 734, 735, 736, + 0, 0, 8114, 8159, 745, -1535, 747, 748, + 8161, -5019, -5019, -5019, -5019, 1938, 0, 0, + 0, 0, 0, 0, 767, 768, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 32767, 32767, 32767, 32767, 32767, 0, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, -2875, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, -2884, -2884, + -2884, -2884, -2884, -2884, -2884, -2884, -2884, -2884, + -2884, -2884, -4271, -2884, -2884, -2884, -2884, -2884, + -2884, -2884, -2884, -2884, -2884, -2884, -2884, -2884, + -2884, -2884, -2884, -2884, -2884, -2884, -2884, -2884, + -2884, -2884, -2884, -2884, -2884, -2884, -2884, -2884, + -2884, -2884, -2884, 32767, -2885, 32767, -2886, -2886, + 32767, -2887, -2887, 32767, -2888, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 563, 564, + 565, 566, 567, 568, 569, 570, 571, 572, + 573, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 574, 575, 576, 577, 578, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, -294, -294, -294, -3047, 583, 584, 585, + -4462, -4418, -4418, -4418, -4418, -4418, -4462, -4462, + -4462, 595, 596, 597, 598, 599, 32767, 32767, + 32767, 32767, -4471, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 4716, 4717, 4718, 4719, + 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, + 4728, 4729, 4730, 4731, 4732, 4733, 4734, 4735, + 3826, 4737, 4738, 4739, 4740, 4741, 4742, 3832, + 4744, 3833, 3120, 3121, 3835, 3835, 3124, 3836, + 3836, 4753, 4754, 4755, 4756, 4757, 4758, 4759, + 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4767, + 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, + 4776, 4777, 4778, 4779, 4780, 4781, 6619, 6620, + 6621, 11272, 6623, 6624, 4788, 4789, 4790, 3874, + 4761, 3874, 4794, 3874, 4796, 4797, 4798, 3874, + 4800, 32767, 0, 4802, 4803, 4804, 4805, 4806, + 4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, + 4815, 4816, 4817, 4818, 4819, 4820, 4821, 4822, + 4823, 4824, 4825, 4826, 4827, 4828, 11299, 4830, + 2536, 4832, 6663, 4834, 11310, 4836, 6669, 4838, + 4839, 11320, 4841, 4842, 4843, 4844, 4845, 4846, + 4847, 4848, 4849, 4850, 4851, 4852, 1188, 4854, + 4855, 4856, 4857, 2577, 4859, 4860, 12273, -907, + -907, -907, -907, -907, -907, 4868, 4869, 4870, + 4871, 32767, 4872, 4873, 32767, 32767, 4874, 32767, + 627, 4875, 4876, 32767, 32767, 4877, 4878, 4879, + 6722, 32767, 4881, 4882, 4883, 6730, 6731, 7446, + 6733, 4888, 7449, 7449, 4891, 4892, 32767, 4893, + 32767, 4894, 4895, 4896, 4897, 4898, 4899, 3512, + 3513, 3514, 3515, 3516, 4904, 3518, 3519, 3520, + 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, + 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, + 3537, 3538, 4926, 6797, 4928, 6800, 4930, 4931, + 4932, 4933, 4934, 4935, 6813, 4937, 4938, 6816, + 6817, 4941, 4942, 4943, 0, 4945, 6821, 0, + 0, 4949, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 32767, -127, -127, -127, + 7285, -127, -127, 0, -128, -128, -128, -128, + 0, 32767, -130, 4971, -129, 5613, 5614, 5615, + 4976, 5618, 32767, 5619, 5620, 5621, 4981, 5624, + 4983, 4984, 32767, 5630, 5631, -1986, -1986, -126, + -126, 5078, 4992, 5037, 5038, 5039, 5040, 5041, + 5086, 5087, 5088, 5089, -2322, 5091, 5092, 5093, + 5094, 5095, 5096, 5097, 5098, 5099, 5100, 0, + 5101, -640, -640, -640, 0, -641, -641, -641, + -641, -641, 0, -642, 0, 0, 32767, -645, + -645, 6973, 6974, 5115, 5116, -87, 0, -44, + -44, -44, -44, -44, -88, -88, -88, -88, + 7324, -88, -88, -88, -88, -88, -88, -88, + -88, -88, -88, -88, -88, 5654, 5655, 5656, + 5657, 5658, 5659, 5660, 5661, 5662, 5663, 5664, + 5665, 5666, 5667, 5668, 5669, -1948, -1948, -88, + -88, 5116, 5117, 5074, 5075, 5076, 5077, 5078, + 5123, 5124, 5125, 5126, -2285, 5128, 5129, 5130, + 5131, 5132, 5133, 5134, 5135, 5136, 5137, 5138, + 5139, -602, -602, -602, -602, -602, -602, -602, + -602, -602, -602, -602, -602, -602, -602, -602, + -602, 7016, 7017, 5158, 5159, -44, -44, 0, + 0, 0, 0, 0, -44, -44, -44, -44, + 7368, -44, -44, -44, -44, -44, -44, -44, + -44, -44, -44, -44, -44, 5698, 5699, 5700, + 5701, 5702, 5703, 5704, 5705, 5706, 5707, 5708, + 5709, 5710, 5711, 5712, 5713, -1904, -1904, -44, + -44, 5160, 5161, 5118, 5119, 5120, 5121, 5122, + 5167, 5168, 5169, 5170, -2241, 5172, 5173, 5174, + 5175, 5176, 5177, 5178, 5179, 5180, 5181, 5182, + 5183, -558, -558, -558, -558, -558, -558, -558, + -558, -558, -558, -558, -558, -558, -558, -558, + -558, 7060, 7061, 5202, 5203, 0, 0, 44, + 44, 44, 44, 44, 0, 0, 0, 0, + 7412, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 5742, 5743, 5744, + 5745, 5746, 5747, 5748, 5749, 5750, 5751, 5752, + 5753, 5754, 5755, 5756, 5757, -1860, -1860, 0, + 0, 0, 0, 0, 6264, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, -3402, + -3402, 5355, 5356, -3447, -3447, -3402, -3402, -3402, + -3402, 5363, 5364, -3447, -3447, -3402, -3402, -3402, + -3358, -3358, -3404, -3404, -3404, -3404, -3404, -3404, + -3404, 5331, 5332, 5333, 5334, 2903, 2859, 5337, + 5338, 5339, 5340, 18601, 15287, 15288, 11974, 11975, + 8661, 8662, 5348, 5349, 5350, 5351, 5352, -3404, + -3404, 5400, 5401, 5357, 5358, 5359, 5360, -3404, + -3404, 5408, 5409, 5365, 5366, 5367, 5324, 5325, + 5372, 5373, 5374, 5375, 5376, 5377, 5378, -3356, + -3356, -3356, -3356, -924, -879, -3356, -3356, -3356, + -3356, -16616,-13301,-13301,-9986, -9986, -6671, -6671, + -3356, -3356, -3356, -3356, -3356, 5401, 5402, -3401, + -3401, -3356, -3356, -3356, -3356, 5409, 5410, -3401, + -3401, -3356, -3356, -3356, -3312, -3312, -3358, -3358, + -3358, -3358, -3358, -3358, -3358, 5377, 5378, 5379, + 5380, 2949, 2905, 5383, 5384, 5385, 5386, 18647, + 15333, 15334, 12020, 12021, 8707, 8708, 5394, 5395, + 5396, 5397, 5398, -3358, -3358, 5446, 5447, 5403, + 5404, 5405, 5406, -3358, -3358, 5454, 5455, 5411, + 5412, 5413, 5414, 5415, 5416, 5417, 5418, 5419, + 5420, 5421, 5422, -3312, -3312, -3312, -3312, -880, + -835, -3312, -3312, -3312, -3312, -16572,-13257,-13257, + -9942, -9942, -6627, -6627, -3312, -3312, -3312, -3312, + -3312, 5445, 5446, -3357, -3357, -3312, -3312, -3312, + -3312, 5453, 5454, -3357, -3357, -3312, -3312, -3312, + -3312, -3312, -3312, -3312, -3312, -3312, -3312, -3312, + -3312, 5423, 5424, 5425, 5426, 2995, 2951, 5429, + 5430, 5431, 5432, 18693, 15379, 15380, 12066, 12067, + 8753, 8754, 5440, 5441, 5442, 5443, 5444, -3312, + -3312, 5492, 5493, 5449, 5450, 5451, 5452, -3312, + -3312, 5500, 5501, 5457, 2803, 2803, 2803, 2803, + 373, 2803, 2803, 5510, 2803, 5512, 11470, 5514, + 11472, 5516, 2205, 2206, 2207, -7533, -1530, 4427, + 2209, 5524, 2210, 630, 5527, 2211, 2212, 2213, + 2214, 2215, 2216, 2217, 2815, 2219, 2220, 5538, + 2221, 5540, 2222, 5542, 5543, 2223, -3312, -3312, + -3312, 5548, 5549, -3312, -3312, 2803, 2803, 2803, + 5555, 5556, 5557, 2803, 2803, 2803, 2803, 2803, + 2803, 2803, 2803, 2803, 2803, 2803, 2803, 2803, + 9050, 9051, 2803, 2803, 2803, 2803, 2803, 2803, + 2803, 2803, 2803, 2803, 2803, 2803, 4318, 7547, + 7548, 2803, 2803, 2803, 2803, 2803, 2803, 2803, + 2803, 6693, 6693, 2803, 2803, 6694, 6694, 757, + 5603, 6693, 6693, 6693, -1593, 7164, 7165, -1638, + -1638, -1593, 4576, 4576, 4576, 7175, 7176, -1635, + -1635, -1590, -1590, 5422, 2820, -1544, -1590, -1590, + -1590, -1590, -1590, 5422, 5422, 4363, 7149, 7150, + 7151, 4720, 4676, 4677, 4678, -5053, 7161, 20422, + 5425, 5425, 5425, 9160, 9161, 9162, 9163, 9164, + 9165, 9166, 5425, -1587, -1587, 7217, -2553, -2553, + 7177, 7178, -2553, 32767, 32767, -219, 5312, -2555, + -2555, -782, -782, -782, -782, 1806, 2801, 2801, + -6998, -6953, 5675, 5676, 5677, 5678, 5679, -7029, + -790, -6955, -6955, 57, -2545, -6909, -6955, 3143, + -2545, -6955, -2545, 58, 2352, 3149, 5695, 5696, + -52, 5698, 5699, 5700, 5701, 5702, 5703, 5704, + 5705, 5706, 5707, 5708, 5709, 5710, 5711, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, -1838, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 6927, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, -973, 32767, 32767, + 32767, 32767, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 4567, 4568, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -437, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -448, 32767, 32767, -450, -450, + -450, 0, 32767, 32767, 32767, -2166, 32767, 32767, + 32767, 32767, 32767, 32767, 0, 0, 32767, -464, + -464, 32767, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -514, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 5757, 5758, 5759, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -4186, -4186, -12097,-4186, 32767, + -4187, -4187, -8787, 32767, 0, 0, 5952, 0, + 0, -4183, -4183, -4183, 0, -2386, -4182, 778, + -4183, -5935, 32767, 32767, -4690, -6249, -4184, -4184, + -4184, 32767, 32767, -4186, -4186, -77, 32767, -77, + 32767, -4188, 0, -4189, 32767, 0, 0, 0, + 0, 32767, 0, 0, 0, 32767, 0, 0, + 0, 0, 0, 0, 0, 32767, 0, 0, + 0, 0, 0, 0, 32767, 32767, 32767, 32767, + 0, 0, 0, 0, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, -5937, -2358, 0, 0, 0, + -8286, 471, 472, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 1747, 32767, -2126, 32767, 32767, 1748, + 1749, 1750, 1751, 1752, 1753, 8224, 1755, -539, + 1757, 781, 32767, 32767, 32767, -1991, -2035, 32767, + 32767, 782, -3784, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 837, 32767, 32767, 32767, 32767, 32767, -4008, + -4008, -4008, 2949, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 0, -797, 1806, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 4605, 4606, + 32767, 32767, 0, 455, 32767, 0, 32767, 32767, + 32767, 0, 32767, 32767, 32767, 32767, 0, 0, + 0, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -4244, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 784, 32767, 32767, 2950, 2951, 32767, 32767, 32767, + 32767, 32767, 32767, 786, 787, 32767, 1252, 1253, + 32767, 790, 32767, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 0, 32767, 0, 32767, 32767, + 32767, 0, 32767, 32767, 32767, 32767, 0, 0, + 0, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 0, 0, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 0, 0, + 0, 0, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, -200, -200, -200, + -200, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + -5932, -5932, 32767, 32767, 2952, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -5387, + -5387, -5387, -5387, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 0, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 0, 0, 0, 32767, 32767, + 0, 0, 32767, 32767, 0, 0, 0, 0, + 0, 0, 32767, 32767, 0, 0, 0, 0, + 0, 0, 32767, 32767, 497, 498, 499, 500, + 501, 502, 503, 504, 505, 506, 507, 508, + 32767, 32767, -156, 765, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -861, + 32767, 6106, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 2953, 2954, 32767, 797, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 2955, 32767, 32767, 32767, -8929, + 32767, -8885, -8885, -8885, 32767, 32767, 32767, 32767, + 32767, 32767, -749, 7119, 7120, 32767, 32767, 32767, + 32767, 2760, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 0, 0, 0, 32767, 32767, 32767, 32767, + 32767, -1181, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, -5587, 0, 7596, + 7597, 0, 0, 0, 0, 0, 0, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, -714, 0, + 0, -713, -712, 0, -711, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1859, + 0, 3247, 32767, 32767, 0, 3247, 0, 3248, + 0, 3249, 0, 3250, 0, 3251, 0, 3252, + 808, 3252, 0, 3253, 0, 3254, 0, 0, + 3256, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 32767, 0, 0, 0, + 0, 32767, 32767, 32767, 32767, 0, 0, 6824, + 32767, 0, 32767, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 4207, 4208, 0, 0, 0, 0, 0, 1896, + 0, 0, 1898, 1898, 1898, 1898, 0, 0, + 0, 1901, 1901, 0, 0, 0, 0, 0, + 0, -1319, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 7618, 7619, 7620, + 3, 3, 1863, 1863, 7067, 7068, 7025, 7026, + 7027, 7028, 7029, 7074, 7075, 7076, 7077, -334, + 7079, 7080, 7081, 7082, 7083, 7084, 7085, 7086, + 7087, 7088, 7089, 7090, 1349, 1349, 1349, 1349, + 1349, 1349, 1349, 1349, 1349, 1349, 1349, 1349, + 1349, 1349, 1349, 1349, 8967, 8968, 7109, 7110, + 1907, 1907, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 2976, 2977, 2978, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 0, 0, 0, 820, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 821, + 2381, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 2005, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 823, 32767, 824, 32767, + 825, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 826, 32767, 32767, 32767, 32767, 32767, + 32767, 4575, 4576, 4577, 4578, 4579, 4580, 4581, + 4582, 4583, 4584, 4585, 32767, 32767, 829, 32767, + 32767, 32767, 32767, 830, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 6253, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 6253, -3848, 834, 835, 836, -3845, -3845, -3845, + -3845, -3845, -3845, 843, 844, -4280, 32767, 845, + 846, 6531, 848, -3839, 32767, -3840, -3840, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 1946, 32767, + 32767, 32767, -3849, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 853, 32767, 32767, 32767, + 32767, 854, 32767, 32767, 32767, 32767, 855, 32767, + 32767, 32767, 32767, 856, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 857, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, -3799, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 8266, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 859, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 860, + 32767, 861, -5065, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 10746, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 4526, + 32767, 4573, 4574, 4575, 32767, 32767, -2436, -1376, + 32767, 32767, 32767, 32767, 32767, -1689, -1689, 4349, + -4171, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 4588, 32767, + 4589, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 4590, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 4591, 4592, 32767, + 32767, 32767, 32767, 32767, 32767, 2933, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 864, 32767, 32767, 32767, + 0, 32767, 0, 32767, 32767, -2977, 335, 335, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 2992, 2993, 2994, 2995, + 32767, 32767, 32767, 4596, 2550, 32767, 32767, 32767, + -1188, 4769, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 4600, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 2997, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 4601, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 2013, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -11287,32767, 32767, 32767, 32767, + 32767, 32767, 32767, -4664, 32767, 32767, -4711, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, -4718, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 4049, + 32767, 32767, 32767, 4050, 4051, 4052, 17313, 32767, + 32767, 32767, 10684, 7370, 7371, 4057, 4058, 4059, + 4060, 4061, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 4603, 8793, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 1283, 4897, 4898, 4899, 12175, 4901, 4902, 32767, + 4903, 4904, 4905, 4906, 4907, 10276, -1469, 1282, + 1282, 1282, 1282, 1282, 1282, 1282, 1282, 1282, + 1282, 32767, 32767, 4920, 4921, 4063, -2051, -2050, + 4925, 4926, 32767, 7332, 7333, 32767, 7334, 7335, + 7336, 7337, 5045, 32767, 32767, 32767, -2049, -2048, + 32767, -8294, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 0, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1132, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 20166, 16852, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 6908, 6909, 6910, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + -4510, -4510, -4510, -4510, -4510, -4510, -4510, 0, + 0, 0, 0, 0, 0, -1831, -1831, -1831, + -15091,-11776,-11776,-8461, 0, 0, 0, -1834, + -1834, -1834, -1834, -1834, 0, 0, 0, 0, + 0, 0, 0, 0, 32767, 32767, 32767, 32767, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, -1819, -3615, 1345, -3616, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 32767, 32767, 0, + 0, 0, 0, 0, 0, 0, 8770, 8771, + 8772, 8773, 8774, 8775, 8776, 8777, 8778, 8779, + 45, 45, 45, 45, 2477, 2522, 45, 45, + 45, 45, -13215,-9900, -9900, -6585, -6585, -3270, + -3270, 45, 45, 45, 45, 45, 8802, 8803, + 0, 0, 45, 45, 45, 45, 8810, 8811, + 0, 0, 45, 2700, 2701, 2702, 2703, 5134, + 2705, 2706, 0, 2708, 0, -5957, 0, -5957, + 0, 3312, 3312, 3312, 13053, 7051, 1095, 3314, + 0, 3315, 4896, 0, 3317, 3317, 3317, 3317, + 3317, 3317, 3317, 2720, 3317, 3317, 0, 3318, + 0, 3319, 0, 0, 3321, 8857, 8858, 8859, + 0, 0, 8862, 8863, 2749, 2750, 2751, 0, + 0, 0, 2755, 2756, 2757, 2758, 2759, 2760, + 2761, 2762, 2763, 2764, 2765, 2766, 2767, -3479, + -3479, 2770, 2771, 2772, 2773, 2774, 2775, 2776, + 2777, 2778, 2779, 2780, 2781, 1267, -1961, -1961, + 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, + -1097, -1096, 2795, 2796, -1094, -1093, 4845, 0, + -1089, -1088, -1087, 7200, -1556, -1556, 7248, 7249, + 7205, 1037, 1038, 1039, -1559, -1559, 7253, 7254, + 7210, 7211, 200, 2803, 7168, 7215, 7216, 7217, + 7218, 7219, 208, 209, 1269, -1516, -1516, -1516, + 916, 961, 961, 961, 10693, -1520, -14780,218, + 219, 220, -3514, -3514, -3514, -3514, -3514, -3514, + -3514, 228, 7241, 7242, -1561, 8210, 8211, -1518, + -1518, 8214, 8215, 9195, 5881, 351, 8219, 8220, + 6448, 6449, 6450, 6451, 3864, 2870, 2871, 12671, + 12627, 0, 0, 0, 0, 0, 12709, 6471, + 12637, 12638, 5627, 8230, 12595, 12642, 2545, 8234, + 12645, 8236, 5634, 3341, 2545, 0, 0, 5749, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 0, 0, 0, 0, 11602, + 0, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 0, 1466, + 0, 0, 32767, 32767, 32767, 32767, 32767, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 5760, 0, 0, 0, 0, 0, 32767, + 0, 32767, 0, 0, 32767, 0, 0, 32767, + 0, 3507, 3508, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1644, 1645, 1646, 1647, -5764, 1649, 1650, 1651, + 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, + 1660, -4081, -4081, -4081, -4081, -4081, -4081, -4081, + -4081, -4081, -4081, -4081, -4081, -4081, -4081, -4081, + -4081, 3537, 3538, 1679, 3582, 3583, 3584, -3482, + -3482, -3482, -3482, -3482, -3526, -3526, -3526, -3526, + 3886, -3526, -3526, -3526, -3526, 3599, 3600, 3601, + 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, + 3610, 3611, 3612, 3613, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 0, 0, + -7275, 0, 0, -7234, 0, 0, 0, 0, + 0, -5368, 6378, 3628, 3629, 3630, 3631, 3632, + 3633, 3634, 3635, 3636, 3637, 3638, 3639, 0, + 0, 859, 6974, 6974, 0, 0, 3647, -2405, + -2405, 3650, -2405, -2405, -2405, -2405, -112, -2405, + -3201, 3658, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 0, 32767, 32767, 32767, + 32767, 5280, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 4637, 4638, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 4014, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 802, 32767, 32767, + 32767, 32767, 803, -1055, 805, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 4639, 32767, + 32767, 32767, 806, -2445, 0, -2443, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 810, 32767, 32767, + 32767, 32767, 811, 812, 813, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, -6211, -6211, -6211, -6211, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, -6271, -6271, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 935, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, -10300,32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 0, 0, 32767, 32767, 4640, 4641, 32767, + 32767, 32767, 32767, 32767, 4624, 32767, 32767, 32767, + -4233, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 1859, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 872, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, -4568, -1253, 32767, + -3590, 32767, 32767, 32767, -1820, -1820, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 0, 0, 0, 0, 0, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 873, 874, 875, 3629, 0, 0, + 0, 5048, 5005, 5006, 5007, 5008, 5009, 5054, + 5055, 5056, 0, 0, 0, 0, 0, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -4118, + 32767, 32767, 32767, 32767, -4122, -4122, -4122, -4122, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -4193, + 32767, -4194, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, -4209, 32767, 32767, -4211, -4211, -4211, + -4211, -4211, -4211, -4211, 32767, 32767, -4213, -10683, + -4213, -1918, -4213, -6043, 32767, 32767, -4215, -6047, + 32767, -4216, -10696,-4216, -4216, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 4646, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 876, 877, 0, 32767, 0, 32767, 0, + 32767, 0, 32767, 0, 32767, 32767, 32767, 0, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 1844, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 0, 0, 0, 0, + 0, 0, 0, 0, 0, -2899, 0, 32767, + 0, 32767, 0, 32767, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 836, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 32767, 0, 0, 0, 879, + 880, 881, 882, 883, 884, 885, 886, 0, + 0, 887, 0, 920, 0, 922, 923, 924, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 5431, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 0, 0, + 0, 32767, 3639, 889, 890, 891, 892, 893, + 894, 895, 896, 897, 898, 899, 900, -2739, + 927, -1881, 4234, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -459, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -458, + -457, 904, 32767, 905, 32767, 906, 32767, 907, + 32767, 908, 32767, 32767, 32767, 909, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 910, + 0, 0, 0, 0, 0, 0, 911, 0, + 912, 1626, 1626, 913, 914, 1626, 915, 916, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, -1837, -1837, -1837, + -6487, -1837, -1837, 0, 0, 0, 917, 31, + 919, 0, 921, 0, 0, 0, 925, 0, + 32767, 4801, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, -6470, 0, 2295, + 0, -1830, 0, -6475, 0, -1832, 0, 0, + -6480, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3665, 0, 0, + 0, 0, 2281, 0, 0, -7412, 5769, 5770, + 5771, 5772, 5773, 5774, 0, 0, 0, 0, + 32767, 0, 0, 32767, 32767, 0, 32767, 32767, + 0, 0, 32767, 32767, 0, 0, 0, -1842, + 32767, 0, 0, 0, -1846, -1846, -2560, -1846, + 0, -2560, -2559, 0, 0, 32767, 0, 32767, + 0, 0, 0, 0, 0, 0, 1388, 0, + 1387, 1387, 1387, 0, 1387, 1387, 1387, 1387, + 1387, 1387, 1387, 1387, 1387, 1387, 1387, 1387, + 1387, 1387, 1387, 1387, 1387, 1387, 1387, 1387, + 1387, 0, -1870, 0, -1871, 0, 0, 0, + 0, 0, 0, -1877, 0, 0, -1877, -1877, + 0, 0, 0, 4944, 0, -1875, 4947, 4948, + 0, 4950, 4951, 4952, 4953, 4954, 4955, 4956, + 4957, 4958, 4959, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 0, + 32767, 32767, 0, 0, 0, 0, 32767, 32767, + 32767, 0, 0, 931, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 4650, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 5375, + 5376, 5377, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 13180, 0, 0, + 0, 0, 0, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, -4011, 933, -4011, 32767, + 935, 936, -4012, 938, 939, 940, 941, 942, + 943, 944, 945, 946, 947, 32767, 1075, 1076, + 1077, -6334, 1079, 1080, 954, 32767, 32767, 32767, + 32767, 955, 32767, 32767, 32767, 32767, 32767, 32767, + -4659, 32767, 32767, 32767, -4662, -4662, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 0, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 959, 960, 961, 32767, 962, 963, 964, + 965, 966, 967, 968, 969, 970, 971, 972, + 32767, 973, 974, 975, 976, 977, 978, 979, + 980, 981, 982, 983, 984, 985, 986, 987, + 988, 989, 990, 32767, 991, 992, 993, 994, + 995, 996, 997, 998, 999, 1000, 1001, 1002, + 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, + 1011, 1012, 1013, 1014, 1015, 1016, 1017, -362, + -362, 32767, 32767, 32767, 32767, -410, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 1019, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 164, 1021, -3551, -3551, 1024, 1025, 1026, 1027, + 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, + 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, + 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, + 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, + 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, + 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, + 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, + 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, + 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, + 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, + 1108, 1109, 1110, 1111, 1112, 1113, 1114, 32767, + 1115, 1116, 1117, 1118, 1119, 32767, 1120, 1121, + 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, + 1130, 1131, 0, 1133, 1134, 1135, 1136, 1137, + 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, + 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, + 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, + 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, + 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, + 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, + 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, + 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, + 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, + -18956,-15641,1212, 1213, 1214, 1215, 1216, 1217, + 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, + -5682, -5682, -5682, 1229, 1230, 1231, 1232, 1233, + 1234, 1235, 1236, 1237, 1238, 1239, 5750, 5751, + 5752, 5753, 5754, 5755, 5756, 1247, 1248, 1249, + 1250, 1251, 1252, 3084, 3085, 3086, 16347, 13033, + 13034, 9720, 1260, 1261, 1262, 3097, 3098, 3099, + 3100, 3101, 1268, 1269, 1270, 1271, 1272, 1273, + 1274, 1275, 32767, 32767, 32767, 32767, 1276, 1277, + 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, + 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, + 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, + 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, + 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, + 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, + 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, + 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, + 1342, 3162, 4959, 0, 4962, 1347, 1348, 1349, + 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, + 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 7481, + 7482, 7483, 7484, 5053, 5009, 7487, 7488, 7489, + 7490, 20751, 17437, 17438, 14124, 14125, 10811, 10812, + 7498, 7499, 7500, 7501, 7502, 32767, 32767, 7548, + 7549, 7505, 7506, 7507, 7508, 32767, 32767, 7554, + 7555, 7511, 4857, 4857, 4857, 4857, 2427, 4857, + 4857, 7564, 4857, 7566, 13524, 7568, 13526, 7570, + 4259, 4260, 4261, -5479, 524, 6481, 4263, 7578, + 4264, 2684, 1421, -7842, -4527, -4527, -1212, -1212, + -1212, -1212, -1212, 7545, 7546, 0, 0, -1214, + -1214, -1214, -1214, 7551, 7552, 32767, 1610, -1216, + 1439, 1440, 1441, 1442, 3873, 1444, 1445, 32767, + 1446, 32767, -7220, 32767, -7221, 0, 2047, 2047, + 2047, 11788, 5786, -170, 2049, -1265, 2050, 3631, + -1265, 2052, 2052, 2052, 2052, 2052, 2052, 2052, + 1455, 2052, 2052, -1265, 2053, -1265, 2054, -1265, + -1265, 2056, 7592, 7593, 7594, 32767, 32767, 7595, + 7596, 1482, 1483, 1484, -1267, -1267, -1267, 1488, + 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, + 1497, 1498, 1499, 1500, -4746, -4746, 1503, 1504, + 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, + 1513, 1514, 0, -3228, -3228, 1518, 1519, 1520, + 1521, 1522, 1523, 1524, 1525, -2364, -2363, 1528, + 1529, -2361, -2360, 3578, 0, -2357, -2356, -2355, + 5932, -2824, -2824, 5980, 5981, 5937, -231, -230, + -229, -2827, -2827, 5985, -225, 5941, 5942, -1069, + 1534, 5899, 5946, 5947, 5948, 5949, 5950, -1061, + -1060, 0, -2785, 0, -355, -355, -310, -310, + -310, 9422, -2791, 32767, -1054, -1053, -1052, -4786, + -4786, -4786, -4786, -4786, -4786, -4786, -1044, 5969, + 5970, -2833, 6938, 6939, -2790, -2790, 6942, 0, + 32767, 4607, -923, 6945, 32767, 5173, 5174, 5175, + 5176, 2589, 1595, 1596, 11396, 11352, 32767, 32767, + 6126, 2812, 2813, 2814, 2815, 2816, -5940, -5940, + 1607, 1608, 2823, 32767, 32767, 1516, 0, -8581, + 0, 0, 728, 1525, 163, -11068,0, -2262, + -2306, -2305, 32767, 32767, 0, 0, 1580, 0, + 0, 0, -6443, 1685, -10176,-4173, 1784, -4173, + 0, -4172, 5925, -4171, -4171, -4171, 0, -437, + 0, 0, 0, 161, -435, 0, 2883, -434, + 0, 0, 0, 0, -436, 0, -5972, 0, + 0, 0, 0, 0, 0, 0, 0, 2889, + 2890, 2891, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 6371, + 0, 0, 0, 0, 0, 0, 0, 117, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 32767, 0, 0, 3991, 3991, + 3991, 3991, 0, 3990, 3990, 3990, -1947, 1632, + 3990, 3990, 3990, -4296, 4461, 4462, -4341, -4341, + -4296, 1873, 1873, 1873, 4472, 4473, -4338, 1873, + -4292, -4292, 2720, 118, -4246, -4292, -4292, 117, + -4293, -4293, 2719, 2719, 1660, 4446, 1662, 2018, + 2019, 1975, 1976, 1977, -7754, -7754, -8733, -5418, + 113, 0, 112, -2157, -5891, -5891, 0, -5892, + 6455, -5893, 0, 0, 0, 32767, 32767, 32767, + 5826, 32767, 32767, 32767, 32767, 6806, 32767, -2039, + 32767, 5829, 32767, 5830, 5831, 5832, 32767, 5833, + 5834, 32767, 5835, 32767, 32767, -3520, 0, 5837, + 0, 5838, 0, 4035, 0, 5840, 32767, 10251, + 154, 1671, 10253, 1673, 1674, 947, 151, 1514, + 12746, 1679, 3942, 3987, 3987, 3987, 13719, 13720, + 14700, 103, 5855, 13723, 5857, 8127, 0, 11862, + 5860, -96, 5862, 1690, 5863, -4233, 5864, 5865, + 5866, 5867, 5868, 5869, 5870, 5871, 5872, 5873, + 32767, 5874, 5875, 5876, 5877, 5878, 5879, 5880, + 5881, 5882, 5883, 13795, 5885, 5886, 5887, 5888, + 10489, 5890, 1703, 1704, -4247, 1706, 1707, 5891, + 5892, 5893, 1711, 4098, 5895, 5896, 5897, 7650, + 32767, 5899, 6406, 7966, 5902, 5903, 5904, 5905, + 5906, 5907, 5908, 1800, 5910, 1801, 5912, 5913, + 5914, 5915, 32767, 1727, 1728, 1729, 1730, 32767, + 1731, 1732, 1733, 32767, 1734, 1735, 1736, 1737, + 1738, 1739, 1740, 32767, 1741, 1742, 1743, 1744, + 1745, 1746, 32767, 32767, 32767, 32767, 1747, 1748, + 1749, 1750, 1751, 32767, 32767, 32767, 32767, 32767, + 32767, 1752, 1753, 1754, 1755, 1756, 1757, 1758, + 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, + 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, + 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, + 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, + 1791, 7729, 4151, 1794, 1795, 1796, 10083, 1327, + 1327, 10131, 10132, 10088, 3920, 3921, 3922, 1324, + 1324, 10136, 3926, 10092, 10093, 3082, 5685, 10050, + 10097, 0, 5689, 10100, 5691, 3089, 796, 0, + 1363, 12595, 3792, 3792, 3837, 3837, 3837, 13569, + 13570, 14550, 11236, 5706, 13574, 5708, 7978, 11713, + 11714, 11715, 11716, 11717, 11718, 11719, 7978, 966, + 966, 9770, 0, 0, 9730, 9731, 0, 0, + -979, 2336, 7867, 0, 0, 32767, 0, 0, + 0, 32767, 0, 0, 32767, 0, 32767, 32767, + 9356, 32767, 0, 32767, 0, 32767, 1804, 2602, + 0, -4364, -4410, 5688, 0, -4410, 0, 2603, + 4897, 5694, 4332, -6899, 1905, 1906, 1862, 1863, + 1864, -7867, -7867, -8846, -5531, 0, -7867, 0, + -2269, -6003, -6003, 0, 5957, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, -7911, 0, + 0, 0, 0, -4600, 0, 0, 4156, 32767, + 32767, 0, 0, 0, 0, 0, 1796, 0, + 0, 0, -1752, 0, 0, -506, -2065, 0, + 0, 0, 0, 0, 0, 0, 4109, 0, + 4110, 0, 0, 0, 0, 0, 4111, 17372, + 0, 14058, 10744, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, -4650, 0, 0, 4161, 32767, + 32767, 4117, 32767, 4118, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, -7946, 32767, -4632, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -4642, + -4642, 4123, 4124, -4687, 0, 0, -4644, -4644, + 0, 0, -4646, -4646, 32767, 32767, 32767, 32767, + 32767, 32767, 4084, 4085, 32767, 32767, 1609, 4087, + 32767, 32767, 4088, 17349, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 10092, 4136, + 10094, 4138, 10096, 0, 10097, 10098, 10099, 10100, + 10101, 0, 32767, 32767, 32767, 0, 0, 0, + 0, 0, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 0, 0, 0, 0, 0, + 0, 0, 0, 32767, 32767, 0, 10138, 10139, + 0, 0, 0, 10145, 32767, 32767, 32767, 32767, + 32767, 32767, -1425, 8316, 2314, -3642, 32767, 0, + 32767, 32767, 32767, 32767, -1426, -1426, -1426, -1426, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 0, 0, 0, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 52, 52, 52, 52, 52, + 0, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 1849, 1850, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 100, 101, 102, 103, 104, 105, 106, 107, + 108, -5633, -5633, -5633, -5633, -5633, -5633, -5633, + -5633, -5633, -5633, -5633, -5633, -5633, -5633, -5633, + -5633, 1985, 1986, 127, 2030, 2031, 2032, -5034, + 32767, 32767, 32767, 32767, 32767, 0, 32767, 32767, + 32767, 5916, 5917, 5918, 5919, 5920, 5921, 5922, + 5923, 5924, 8824, 5926, 32767, 32767, 0, 32767, + 0, 5927, 5928, 5929, 5930, 5931, 5932, 5933, + 5934, 5935, 5936, 5937, 5938, 5939, 5940, 5105, + 5942, 5943, 5944, 5945, 5946, 5947, 5948, 5949, + 5950, 5951, 5952, 5953, 5954, 5955, 5956, 5957, + 32767, 5958, 5959, 5960, 5082, 5082, 5082, 5082, + 5082, 5082, 5082, 5082, 5969, 5970, 5084, 5972, + 5053, 5974, 5053, 5053, 5053, 5978, 5979, 5980, + 5981, 5982, 5983, 5984, 5985, 5986, 5987, 5988, + 5989, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 2552, 32767, 32767, 32767, + 32767, 32767, 32767, 5990, 5991, 5992, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 5993, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 6936, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 1851, 1852, 1853, 1854, + 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, + 1863, 1864, 1200, 2121, 1200, 1868, 1869, 1870, + 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, + 1879, 1880, 1188, 1188, 1188, 1188, 1188, 1188, + 1188, 1188, 1188, 1188, 1188, 1188, 1188, 1188, + 1188, 1188, 1188, 1188, 1188, 1188, -5282, 1188, + 3483, 1188, -642, 1188, -5287, 1188, -644, 1188, + 1188, -5292, 1188, 1188, 1188, 1188, 1188, 1188, + 1188, 1188, 1188, 1188, 1188, 1188, 1925, 1926, + -6187, -6231, 1184, 3465, 1184, 1184, -6228, 6953, + 6954, 6955, 6956, 0, 1939, 1940, 1941, 1942, + 1943, 1944, 1178, 1178, 1947, 1948, 1949, 1950, + 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, + 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, + 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, + 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, + 1983, 1984, 1985, 1986, 1987, 1988, 1989, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 0, 0, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 0, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 709, 666, 667, 668, 32767, 669, + 714, 715, 716, 717, -6694, 719, 720, 721, + 32767, 722, 723, 724, 32767, 725, 726, 727, + 728, -5013, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 6052, 0, 0, 6055, + 0, 0, 0, 0, 2293, 0, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 1244, 1245, 1246, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -4660, + -4660, -4660, -4660, 4097, 4098, -4705, -4705, -4660, + -4660, -4660, -4660, 4105, 4106, -4705, 32767, -4661, + -4661, -4661, -4617, -4617, -4663, -4663, -4663, -4663, + -4663, -4663, -4663, 4072, 4073, 4074, 4075, 1644, + 1600, 4078, 4079, 4080, 4081, 17342, 14028, 14029, + 10715, 10716, 7402, 7403, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 0, 0, + 0, 32767, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 32767, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 32767, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1380, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 856, 0, 4573, + 4574, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 32767, 0, 0, 0, + 0, 0, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 5204, 5161, 5162, 5163, 5164, 5165, 5210, 5211, + 5212, 5213, -2198, 5215, 5216, 5217, 5218, 5219, + 5220, 5221, 5222, 5223, 5224, 5225, 5226, -515, + -515, -515, -515, -515, -515, -515, -515, -515, + -515, -515, -515, -515, -515, -515, -515, 7103, + 7104, 5245, 5246, 5247, 5248, 5249, -1014, 5251, + 5252, 5253, 5254, 5255, 5256, 5257, 5258, 5259, + 5260, 8663, 8664, -92, -92, 8712, 8713, 8669, + 8670, 8671, 8672, -92, -92, 8720, 8721, 8677, + 8678, 8679, 8636, 8637, 8684, 8685, 8686, 8687, + 8688, 8689, 8690, -44, -44, -44, -44, 2388, + 2433, -44, -44, -44, -44, -13304,-9989, -9989, + -6674, -6674, -3359, -3359, -44, -44, -44, -44, + -44, 8713, 8714, -89, -89, -44, -44, -44, + -44, 8721, 8722, -89, -89, -44, -44, -44, + 0, 0, -46, -46, -46, -46, -46, -46, + -46, 8689, 8690, 8691, 8692, 6261, 6217, 8695, + 8696, 8697, 8698, 21959, 18645, 18646, 15332, 15333, + 12019, 12020, 8706, 8707, 8708, 8709, 8710, -46, + -46, 8758, 8759, 8715, 8716, 8717, 8718, -46, + -46, 8766, 8767, 8723, 8724, 8725, 8726, 8727, + 8728, 8729, 8730, 8731, 8732, 8733, 8734, 0, + 0, 0, 0, 2432, 2477, 0, 0, 0, + 0, -13260,-9945, -9945, -6630, -6630, -3315, -3315, + 0, 0, 0, 0, 0, 8757, 8758, -45, + -45, 0, 0, 0, 0, 8765, 8766, -45, + -45, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 8735, 8736, 8737, + 8738, 6307, 6263, 8741, 8742, 8743, 8744, 22005, + 18691, 18692, 15378, 15379, 12065, 12066, 8752, 8753, + 8754, 8755, 8756, 0, 0, 8804, 8805, 8761, + 8762, 8763, 8764, 0, 0, 8812, 8813, 8769, + 6115, 6115, 6115, 6115, 3685, 6115, 6115, 8822, + 6115, 8824, 14782, 8826, 14784, 8828, 5517, 5518, + 5519, -4221, 1782, 7739, 5521, 8836, 5522, 3942, + 8839, 5523, 5524, 5525, 5526, 5527, 5528, 5529, + 6127, 5531, 5532, 8850, 5533, 8852, 5534, 8854, + 8855, 5535, 0, 0, 0, 8860, 8861, 0, + 0, 0, 13252, 9939, 9939, 6626, 6626, 3313, + 3313, 0, 0, 0, -9269, -3312, 0, 0, + 0, 9741, 32767, 32767, 0, 32767, 0, 32767, + 32767, 0, 0, 0, 0, 0, 0, 0, + -597, 0, 0, 32767, 0, 32767, 0, 32767, + 32767, 0, 0, 32767, 32767, 32767, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 32767, 32767, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, -1387, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 0, 0, 0, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, -1773, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 0, + 0, 0, 0, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, -4161, 1581, 1582, 32767, 32767, 1990, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 0, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 1539, 32767, 32767, 6150, 6151, 6152, 411, + 411, 411, 411, 411, 411, 411, 411, 411, + 411, 411, 411, 411, 411, 411, 411, 8029, + 8030, 6171, 6172, 969, 969, 1013, 1013, 1013, + 1013, 1013, 969, 969, 969, 969, 8381, 969, + 969, 969, 969, 969, 969, 969, 969, 969, + 969, 969, 969, 6711, 6712, 6713, 6714, 6715, + 6716, 6717, 6718, 6719, 6720, 6721, 6722, 6723, + 6724, 6725, 6726, -891, -891, 969, 969, 6173, + 6174, 6131, 6132, 6133, 6134, 6135, 6180, 6181, + 6182, 6183, -1228, 6185, 6186, 6187, 6188, 6189, + 6190, 6191, 6192, 6193, 6194, 6195, 6196, 455, + 455, 455, 455, 455, 455, 455, 455, 455, + 455, 455, 455, 455, 455, 455, 455, 8073, + 8074, 6215, 6216, 1013, 1013, 1057, 1057, 1057, + 1057, 1057, 1013, 1013, 1013, 1013, 8425, 1013, + 1013, 1013, 1013, 1013, 1013, 1013, 1013, 1013, + 1013, 1013, 1013, 6755, 6756, 6757, 6758, 6759, + 6760, 6761, 6762, 6763, 6764, 6765, 6766, 6767, + 6768, 6769, 6770, -847, -847, 1013, 1013, 6217, + 6218, 6175, 6176, 6177, 6178, 6179, 6224, 6225, + 6226, 6227, -1184, 6229, 6230, 6231, 6232, 6233, + 6234, 6235, 6236, 6237, 6238, 6239, 6240, 499, + 499, 499, 499, 499, 499, 499, 499, 499, + 499, 499, 499, 499, 499, 499, 499, 8117, + 8118, 6259, 6260, 6261, 6262, 6263, 0, 6265, + 6266, 6267, 6268, 6269, 6270, 6271, 6272, 6273, + 6274, 9677, 9678, 922, 922, 9726, 9727, 9683, + 9684, 9685, 9686, 922, 922, 9734, 9735, 9691, + 9692, 9693, 9650, 9651, 9698, 9699, 9700, 9701, + 9702, 9703, 9704, 970, 970, 970, 970, 3402, + 3447, 970, 970, 970, 970, -12290,-8975, -8975, + -5660, -5660, -2345, -2345, -2345, -2345, -2345, 6412, + 6413, -2390, -2390, -2345, -2345, -2345, -2345, 6420, + 6421, -2390, -2390, -2345, -2345, -2345, -2301, -2301, + -2347, -2347, -2347, -2347, -2347, -2347, -2347, 6388, + 6389, 6390, 6391, 3960, 3916, 6394, 6395, 6396, + 6397, 19658, 16344, 16345, 13031, 13032, 9718, 9719, + 6405, 6406, 6407, 6408, 6409, -2347, -2347, 6457, + 6458, 6414, 6415, 6416, 6417, -2347, -2347, 6465, + 6466, 6422, 6423, 6424, 6381, 6382, 6429, 6430, + 6431, 6432, 6433, 6434, 6435, -2299, -2299, -2299, + -2299, 133, 178, -2299, -2299, -2299, -2299, -15559, + -12244,-12244,-8929, -8929, -5614, -5614, -2299, -2299, + -2299, -2299, -2299, 6458, 6459, -2344, -2344, -2299, + -2299, -2299, -2299, 6466, 6467, -2344, -2344, -2299, + -2299, -2299, -2299, -2299, -2299, -2299, -2299, -2299, + -2299, -2299, -2299, 6436, 6437, 6438, 6439, 4008, + 3964, 6442, 6443, 6444, 6445, 19706, 16392, 16393, + 13079, 13080, 9766, 9767, 6453, 6454, 6455, 6456, + 6457, -2299, -2299, 6505, 6506, 6462, 6463, 6464, + 6465, -2299, -2299, 6513, 6514, 6470, 6471, 6472, + 6473, 6474, 6475, 6476, 6477, 6478, 6479, 6480, + 6481, -2253, -2253, -2253, -2253, 179, 224, -2253, + -2253, -2253, -2253, -15513,-12198,-12198,-8883, -8883, + -5568, -5568, -2253, -2253, -2253, -2253, -2253, 6504, + 6505, -2298, -2298, -2253, -2253, -2253, -2253, 6512, + 6513, -2298, -2298, -2253, 402, 403, 404, 405, + 2836, 407, 408, -2298, 410, -2298, -8255, -2298, + -8255, -2298, 1014, 1014, 1014, 10755, 4753, -1203, + 1016, -2298, 1017, 2598, -2298, 1019, 1019, 1019, + 1019, 1019, 1019, 1019, 422, 1019, 1019, -2298, + 1020, -2298, 1021, -2298, -2298, 1023, 6559, 6560, + 6561, -2298, -2298, 6564, 6565, 6566, -6685, -3371, + -3370, -56, -55, 3259, 3260, 3261, 12531, 6575, + 3264, 3265, 3266, -6474, -471, 5486, 3268, 6583, + 3269, 1689, 6586, 3270, 3271, 3272, 3273, 3274, + 3275, 3276, 3874, 3278, 3279, 6597, 3280, 6599, + 3281, 6601, 6602, 3282, 3283, 32767, 32767, 32767, + 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, + 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, + 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, + 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, + 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, + 3324, 3325, 3326, 3327, 3328, 3329, 3330, 3331, + 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, + 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3347, + 3348, 3349, 3350, 3351, 32767, 32767, 3352, 3353, + 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, + 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, + 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, + 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, + 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, + 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, + 3402, 3403, 3404, 3405, 3406, 3407, 4795, 3409, + 3410, 3411, 3412, 3413, 3414, 3415, 3416, 3417, + 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, + 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, + 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, + 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449, + 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 3458, + 3459, 3460, 3461, 3462, -8139, 3464, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 3465, 3466, 2001, 3468, 3469, 32767, + 32767, 32767, 32767, 32767, 3470, 3471, 3472, 3473, + 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, + 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, + 3490, 3491, 3492, 3493, 3494, 3495, 32767, 3496, + 3497, 3498, 3499, 3500, 32767, 3501, 32767, 3502, + 3503, 32767, 3504, 3505, 32767, 3506, 0, 0, + 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, + 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, + 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, + 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, + 3541, 3542, 3543, 3544, 3545, 1902, 1902, 1902, + 1902, 9314, 1902, 1902, 1902, 1902, 1902, 1902, + 1902, 1902, 1902, 1902, 1902, 1902, 7644, 7645, + 7646, 7647, 7648, 7649, 7650, 7651, 7652, 7653, + 7654, 7655, 7656, 7657, 7658, 7659, 42, 42, + 1902, 0, 0, 0, 7067, 7068, 7069, 7070, + 7071, 7116, 7117, 7118, 7119, -292, 7121, 7122, + 7123, 7124, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3614, 3615, 3616, 10892, 3618, 3619, + 10854, 3621, 3622, 3623, 3624, 3625, 8994, -2751, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3640, 3641, 2783, -3331, + -3330, 3645, 3646, 0, 6053, 6054, 0, 6056, + 6057, 6058, 6059, 3767, 6061, 6858, 0, 0, + 3659, 0, 0, 1531, 1531, 1531, 1531, 1531, + 1531, 1531, 1531, 1531, 1531, 1531, 1531, 1531, + 1531, 1531, 9149, 9150, 7291, 7292, 7293, 7294, + 7295, 1032, 7297, 7298, 7299, 7300, 7301, 7302, + 7303, 7304, 0, 7307, 10710, 10711, 1955, 1955, + 10759, 10760, 10716, 4548, 4549, 4550, 1952, 1952, + 10764, 10765, 10721, 10722, 3711, 6314, 10679, 10726, + 10727, 10728, 10729, 10730, 3719, 3720, 1996, 1996, + 1996, 1996, 4428, 4473, 4473, 3728, 1994, 1994, + -11266,3732, 3733, 3734, 0, 0, 0, 0, + 0, 0, 0, 3742, 10755, 10756, 1953, 1953, + 1998, 1998, 1998, 11730, 11731, 12711, 9397, 3867, + 11735, 3869, 6139, 9874, 9875, 9876, 9877, 9878, + 9879, 9880, 6139, -873, -873, 7931, -1839, -1839, + 7891, 7892, -1839, -1839, -2818, 497, 6028, -1839, + -1839, -66, -66, -66, -66, 2522, 2523, -6280, + -6280, -6235, -66, -66, -66, 2533, 2534, -6277, + -66, -6231, -6231, 781, -1821, -6185, -6231, 3867, + -1821, -6231, -1821, 782, 3076, 3873, 2511, -8720, + 84, 85, 41, 42, 43, -9688, -9688, -10667, + -7352, -1821, -9688, -1821, -4090, -7824, -7824, -7824, + -7824, -7824, -7824, -7824, -4082, 2931, 2932, -5871, + 3900, 3901, -5828, -5828, 3904, 3905, 4885, 1571, + -3959, 3909, 3910, 2138, 2139, 2140, 2141, -446, + -446, 8358, 8359, 8315, 2147, 2148, 2149, -449, + -449, 8363, 2153, 8319, 8320, 1309, 3912, 8277, + 8324, -1773, 3916, 8327, 3918, 1316, -977, -1773, + -410, 10822, 2019, 2019, 2064, 2064, 2064, 11796, + 11797, 12777, 9463, 3933, 11801, 3935, 6205, 9940, + 9941, 9942, 9943, 9944, 9945, 9946, 6205, -807, + -807, 7997, -1773, -1773, 7957, 7958, -1773, -1773, + -2752, 563, 6094, -1773, -1773, 0, 0, 0, + 0, 2588, 2589, -6214, -6214, -6169, 0, 0, + 0, 2599, 2600, -6211, 0, -6165, -6165, 847, + -1755, -6119, -6165, 3933, -1755, -6165, -1755, 848, + 3142, 3939, 2577, -8654, 150, 151, 107, 108, + 109, -9622, -9622, -10601,-7286, -1755, -9622, -1755, + -4024, -7758, -7758, -7758, -7758, -7758, -7758, -7758, + -4016, 2997, 2998, -5805, 3966, 3967, -5762, -5762, + 3970, 3971, 4951, 1637, -3893, 3975, 3976, 2204, + 2205, 2206, 2207, -380, -380, 8424, 8425, 8381, + 2213, 2214, 2215, -383, -383, 8429, 2219, 8385, + 8386, 1375, 3978, 8343, 8390, -1707, 3982, 8393, + 3984, 1382, -911, -1707, -344, 10888, 2085, 2085, + 2130, 2130, 2130, 11862, 11863, 12843, 9529, 3999, + 11867, 4001, 6271, 10006, 10007, 4005, -1951, 4007, + 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, + 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, + 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, + 11943, 4033, 4034, 4035, 4036, 8637, 4038, 4039, + -116, 32767, 32767, 4041, 4042, 4043, 4044, 4045, + 2250, 4047, 4048, 4049, 5802, 4051, 4052, 4559, + 6119, 4055, 4056, 4057, 4058, 4059, 4060, 4061, + -47, 4063, -46, 4065, 4066, 4067, 4068, 4069, + -41, -13301,4072, -9985, -6670, 4075, 4076, 4077, + 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, + 4086, 4087, 4088, 4089, 4090, 8741, 4092, 4093, + -67, 32767, 32767, 32767, 32767, 32767, 2257, 32767, + 2258, 2259, 2260, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 2261, 32767, 2262, 32767, + 2263, 32767, 2264, 32767, 2265, 32767, 2266, 32767, + 2267, 8737, 8738, -26, -26, 8786, 4100, 4101, + 8746, 8747, 4104, 4105, 8752, 8753, 32767, 2274, + 32767, 2275, 32767, 32767, 32767, 32767, 32767, 32767, + 2276, 2277, 32767, 2278, 2279, 32767, 2280, 0, + 32767, 2282, 9695, 4109, -3486, -3486, 4112, 4113, + 4114, 4115, 4116, 4117, 32767, 32767, 32767, 32767, + 32767, 32767, 4118, 4119, 4120, 4121, 4122, 4123, + 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, + 4132, 4133, 4134, 4849, 4136, 4137, 4851, 4851, + 4140, 4852, 4142, 4143, 4144, 4145, 4146, 4147, + 4148, 4149, 4150, 4151, 2293, 4153, 907, 32767, + 2295, 4155, 909, 4157, 910, 4159, 911, 4161, + 912, 4163, 913, 4165, 914, 32767, 915, 4168, + 916, 4170, 917, 4172, 4173, 918, 4175, 4176, + 4177, 4178, 4179, 4180, 4181, 4182, 4183, 4184, + 4185, 2309, 4186, 4187, 4188, 4189, 2312, 2313, + 32767, 2314, 4190, 4191, -2632, 2317, 4193, 32767, + 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, + 4202, 4203, 4204, 4205, 4206, 0, 0, 4209, + 4210, 4211, 4212, 4213, 2318, 4215, 4216, 2319, + 2320, 2321, 2322, 4221, 4222, 4223, 2323, 2324, + 4226, 4227, 4228, 4229, 4230, 4231, 5551, 4233, + 4234, 4235, 4236, 4237, 4238, 4239, 4240, 4241, + 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, + 4250, 4251, 4252, 4253, 4254, 4255, 4256, 4257, + 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, + 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, + 4274, 4275, -3342, -3342, -3342, 4276, 4277, 2418, + 2419, -2784, -2784, -2740, -2740, -2740, -2740, -2740, + -2784, -2784, -2784, -2784, 4628, -2784, -2784, -2784, + -2784, -2784, -2784, -2784, -2784, -2784, -2784, -2784, + -2784, 2958, 2959, 2960, 2961, 2962, 2963, 2964, + 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, + 2973, -4644, -4644, -2784, -2784, 2420, 2421, 2378, + 2379, 2380, 2381, 2382, 2427, 2428, 2429, 2430, + -4981, 2432, 2433, 2434, 2435, 2436, 2437, 2438, + 2439, 2440, 2441, 2442, 2443, -3298, -3298, -3298, + -3298, -3298, -3298, -3298, -3298, -3298, -3298, -3298, + -3298, -3298, -3298, -3298, -3298, 4320, 4321, 2462, + 4365, 4366, 4367, -2699, -2699, -2699, -2699, -2699, + -2743, -2743, -2743, -2743, 4669, -2743, -2743, -2743, + -2743, 4382, 4383, 4384, 4385, 4386, 4387, 4388, + 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, + 4397, 4398, 4399, 4400, 4401, 4402, 4403, 4404, + 4405, 4406, 4407, 4408, 4409, 4410, 4411, 4412, + 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, + 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, + 4429, 816, 816, 816, -6459, 816, 816, -6418, + 816, 816, 816, 816, 816, -4552, 7194, 4444, + 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, + 4453, 4454, 4455, 816, 816, 1675, 7790, 7790, + 816, 816, 4463, -1589, -1589, 4466, -1589, -1589, + -1589, -1589, 704, -1589, -2385, 4474, 4475, 817, + 4477, 4478, 2948, 2949, 2950, 2951, 2952, 2953, + 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, + 2962, -4655, -4655, -2795, -2795, -2795, -2795, -2795, + 3469, -2795, -2795, -2795, -2795, -2795, -2795, -2795, + -2795, 4510, -2796, -6198, -6198, 2559, 2560, -6243, + -6243, -6198, -6198, -6198, -6198, 2567, 2568, -6243, + -6243, -6198, -6198, -6198, -6154, -6154, -6200, -6200, + -6200, -6200, -6200, -6200, -6200, 2535, 2536, 2537, + 2538, 107, 63, 2541, 2542, 2543, 2544, 15805, + 12491, 12492, 32767, 4540, 4541, 4542, 4543, 4544, + 4545, 4546, 2548, -6208, -6208, 2596, 2597, 2553, + 2554, 2555, 2556, -6208, -6208, 2604, 2605, 2561, + 2562, 2563, 2520, 2521, 2568, 2569, 2570, 2571, + 2572, 2573, 2574, -6160, -6160, -6160, -6160, -3728, + -3683, -6160, -6160, -6160, -6160, -19420,-16105,-16105, + -12790,-12790,-9475, -9475, -6160, -6160, -6160, -6160, + -6160, 32767, 2597, -6206, -6206, -6161, -6161, -6161, + -6161, 2604, 2605, -6206, -6206, -6161, -6161, -6161, + -6161, -6161, -6161, -6161, -6161, -6161, -6161, -6161, + -6161, 2574, 2575, 2576, 2577, 146, 102, 2580, + 2581, 2582, 2583, 15844, 12530, 12531, 9217, 9218, + 5904, 5905, 2591, 2592, 2593, 2594, 2595, -6161, + -6161, 2643, 2644, 2600, 2601, 2602, 2603, -6161, + -6161, 2651, 2652, 2608, 2609, 2610, 2611, 2612, + 2613, 2614, 2615, 2616, 2617, 2618, 2619, -6115, + -6115, -6115, -6115, -3683, -3638, -6115, -6115, -6115, + -6115, -19375,-16060,-16060,-12745,-12745,-9430, -9430, + -6115, -6115, -6115, -6115, -6115, 2642, 2643, -6160, + -6160, -6115, -6115, -6115, -6115, 2650, 2651, -6160, + -6160, -6115, -3460, -3459, -3458, -3457, -1026, -3455, + -3454, -6160, -3452, -6160, -12117,-6160, -12117,-6160, + -2848, -2848, -2848, 6893, 891, -5065, -2846, -6160, + -2845, -1264, 0, 9264, 5950, 5951, 2637, 2638, + 2639, 2640, 2641, -6115, -6115, 2689, 2690, 2646, + 2647, 2648, 2649, -6115, -6115, 2697, 2698, 2654, + 0, 0, 0, 0, -2430, 0, 0, 2707, + 0, 2709, 8667, 2711, 8669, 2713, -598, -597, + -596, -10336,-4333, 1624, -594, 2721, -593, -2173, + 2724, -592, -591, -590, -589, -588, -587, -586, + 12, -584, -583, 2735, -582, 2737, -581, 2739, + 2740, -580, -6115, -6115, -6115, 2745, 2746, -6115, + -6115, 0, 0, 0, 2752, 2753, 2754, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 6247, 6248, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0 + }; + + const unsigned char *k = (const unsigned char *) key; + size_t keylen = 4; + uint32 a = 0; + uint32 b = 1; + + while (keylen--) + { + unsigned char c = *k++; + + a = a * 257 + c; + b = b * 8191 + c; + } + return h[a % 13209] + h[b % 13209]; +} + +/* Hash lookup information for decomposition */ +static const pg_unicode_decompinfo UnicodeDecompInfo = +{ + UnicodeDecompMain, + Decomp_hash_func, + 6604 +}; + +/* Inverse lookup array -- contains indexes into UnicodeDecompMain[] */ +static const uint16 RecompInverseLookup[941] = +{ + /* U+003C+0338 -> U+226E */ 1823, + /* U+003D+0338 -> U+2260 */ 1820, + /* U+003E+0338 -> U+226F */ 1824, + /* U+0041+0300 -> U+00C0 */ 14, + /* U+0041+0301 -> U+00C1 */ 15, + /* U+0041+0302 -> U+00C2 */ 16, + /* U+0041+0303 -> U+00C3 */ 17, + /* U+0041+0304 -> U+0100 */ 67, + /* U+0041+0306 -> U+0102 */ 69, + /* U+0041+0307 -> U+0226 */ 270, + /* U+0041+0308 -> U+00C4 */ 18, + /* U+0041+0309 -> U+1EA2 */ 1278, + /* U+0041+030A -> U+00C5 */ 19, + /* U+0041+030C -> U+01CD */ 194, + /* U+0041+030F -> U+0200 */ 240, + /* U+0041+0311 -> U+0202 */ 242, + /* U+0041+0323 -> U+1EA0 */ 1276, + /* U+0041+0325 -> U+1E00 */ 1120, + /* U+0041+0328 -> U+0104 */ 71, + /* U+0042+0307 -> U+1E02 */ 1122, + /* U+0042+0323 -> U+1E04 */ 1124, + /* U+0042+0331 -> U+1E06 */ 1126, + /* U+0043+0301 -> U+0106 */ 73, + /* U+0043+0302 -> U+0108 */ 75, + /* U+0043+0307 -> U+010A */ 77, + /* U+0043+030C -> U+010C */ 79, + /* U+0043+0327 -> U+00C7 */ 20, + /* U+0044+0307 -> U+1E0A */ 1130, + /* U+0044+030C -> U+010E */ 81, + /* U+0044+0323 -> U+1E0C */ 1132, + /* U+0044+0327 -> U+1E10 */ 1136, + /* U+0044+032D -> U+1E12 */ 1138, + /* U+0044+0331 -> U+1E0E */ 1134, + /* U+0045+0300 -> U+00C8 */ 21, + /* U+0045+0301 -> U+00C9 */ 22, + /* U+0045+0302 -> U+00CA */ 23, + /* U+0045+0303 -> U+1EBC */ 1304, + /* U+0045+0304 -> U+0112 */ 83, + /* U+0045+0306 -> U+0114 */ 85, + /* U+0045+0307 -> U+0116 */ 87, + /* U+0045+0308 -> U+00CB */ 24, + /* U+0045+0309 -> U+1EBA */ 1302, + /* U+0045+030C -> U+011A */ 91, + /* U+0045+030F -> U+0204 */ 244, + /* U+0045+0311 -> U+0206 */ 246, + /* U+0045+0323 -> U+1EB8 */ 1300, + /* U+0045+0327 -> U+0228 */ 272, + /* U+0045+0328 -> U+0118 */ 89, + /* U+0045+032D -> U+1E18 */ 1144, + /* U+0045+0330 -> U+1E1A */ 1146, + /* U+0046+0307 -> U+1E1E */ 1150, + /* U+0047+0301 -> U+01F4 */ 230, + /* U+0047+0302 -> U+011C */ 93, + /* U+0047+0304 -> U+1E20 */ 1152, + /* U+0047+0306 -> U+011E */ 95, + /* U+0047+0307 -> U+0120 */ 97, + /* U+0047+030C -> U+01E6 */ 216, + /* U+0047+0327 -> U+0122 */ 99, + /* U+0048+0302 -> U+0124 */ 101, + /* U+0048+0307 -> U+1E22 */ 1154, + /* U+0048+0308 -> U+1E26 */ 1158, + /* U+0048+030C -> U+021E */ 268, + /* U+0048+0323 -> U+1E24 */ 1156, + /* U+0048+0327 -> U+1E28 */ 1160, + /* U+0048+032E -> U+1E2A */ 1162, + /* U+0049+0300 -> U+00CC */ 25, + /* U+0049+0301 -> U+00CD */ 26, + /* U+0049+0302 -> U+00CE */ 27, + /* U+0049+0303 -> U+0128 */ 103, + /* U+0049+0304 -> U+012A */ 105, + /* U+0049+0306 -> U+012C */ 107, + /* U+0049+0307 -> U+0130 */ 111, + /* U+0049+0308 -> U+00CF */ 28, + /* U+0049+0309 -> U+1EC8 */ 1316, + /* U+0049+030C -> U+01CF */ 196, + /* U+0049+030F -> U+0208 */ 248, + /* U+0049+0311 -> U+020A */ 250, + /* U+0049+0323 -> U+1ECA */ 1318, + /* U+0049+0328 -> U+012E */ 109, + /* U+0049+0330 -> U+1E2C */ 1164, + /* U+004A+0302 -> U+0134 */ 114, + /* U+004B+0301 -> U+1E30 */ 1168, + /* U+004B+030C -> U+01E8 */ 218, + /* U+004B+0323 -> U+1E32 */ 1170, + /* U+004B+0327 -> U+0136 */ 116, + /* U+004B+0331 -> U+1E34 */ 1172, + /* U+004C+0301 -> U+0139 */ 118, + /* U+004C+030C -> U+013D */ 122, + /* U+004C+0323 -> U+1E36 */ 1174, + /* U+004C+0327 -> U+013B */ 120, + /* U+004C+032D -> U+1E3C */ 1180, + /* U+004C+0331 -> U+1E3A */ 1178, + /* U+004D+0301 -> U+1E3E */ 1182, + /* U+004D+0307 -> U+1E40 */ 1184, + /* U+004D+0323 -> U+1E42 */ 1186, + /* U+004E+0300 -> U+01F8 */ 232, + /* U+004E+0301 -> U+0143 */ 126, + /* U+004E+0303 -> U+00D1 */ 29, + /* U+004E+0307 -> U+1E44 */ 1188, + /* U+004E+030C -> U+0147 */ 130, + /* U+004E+0323 -> U+1E46 */ 1190, + /* U+004E+0327 -> U+0145 */ 128, + /* U+004E+032D -> U+1E4A */ 1194, + /* U+004E+0331 -> U+1E48 */ 1192, + /* U+004F+0300 -> U+00D2 */ 30, + /* U+004F+0301 -> U+00D3 */ 31, + /* U+004F+0302 -> U+00D4 */ 32, + /* U+004F+0303 -> U+00D5 */ 33, + /* U+004F+0304 -> U+014C */ 133, + /* U+004F+0306 -> U+014E */ 135, + /* U+004F+0307 -> U+022E */ 278, + /* U+004F+0308 -> U+00D6 */ 34, + /* U+004F+0309 -> U+1ECE */ 1322, + /* U+004F+030B -> U+0150 */ 137, + /* U+004F+030C -> U+01D1 */ 198, + /* U+004F+030F -> U+020C */ 252, + /* U+004F+0311 -> U+020E */ 254, + /* U+004F+031B -> U+01A0 */ 181, + /* U+004F+0323 -> U+1ECC */ 1320, + /* U+004F+0328 -> U+01EA */ 220, + /* U+0050+0301 -> U+1E54 */ 1204, + /* U+0050+0307 -> U+1E56 */ 1206, + /* U+0052+0301 -> U+0154 */ 139, + /* U+0052+0307 -> U+1E58 */ 1208, + /* U+0052+030C -> U+0158 */ 143, + /* U+0052+030F -> U+0210 */ 256, + /* U+0052+0311 -> U+0212 */ 258, + /* U+0052+0323 -> U+1E5A */ 1210, + /* U+0052+0327 -> U+0156 */ 141, + /* U+0052+0331 -> U+1E5E */ 1214, + /* U+0053+0301 -> U+015A */ 145, + /* U+0053+0302 -> U+015C */ 147, + /* U+0053+0307 -> U+1E60 */ 1216, + /* U+0053+030C -> U+0160 */ 151, + /* U+0053+0323 -> U+1E62 */ 1218, + /* U+0053+0326 -> U+0218 */ 264, + /* U+0053+0327 -> U+015E */ 149, + /* U+0054+0307 -> U+1E6A */ 1226, + /* U+0054+030C -> U+0164 */ 155, + /* U+0054+0323 -> U+1E6C */ 1228, + /* U+0054+0326 -> U+021A */ 266, + /* U+0054+0327 -> U+0162 */ 153, + /* U+0054+032D -> U+1E70 */ 1232, + /* U+0054+0331 -> U+1E6E */ 1230, + /* U+0055+0300 -> U+00D9 */ 35, + /* U+0055+0301 -> U+00DA */ 36, + /* U+0055+0302 -> U+00DB */ 37, + /* U+0055+0303 -> U+0168 */ 157, + /* U+0055+0304 -> U+016A */ 159, + /* U+0055+0306 -> U+016C */ 161, + /* U+0055+0308 -> U+00DC */ 38, + /* U+0055+0309 -> U+1EE6 */ 1346, + /* U+0055+030A -> U+016E */ 163, + /* U+0055+030B -> U+0170 */ 165, + /* U+0055+030C -> U+01D3 */ 200, + /* U+0055+030F -> U+0214 */ 260, + /* U+0055+0311 -> U+0216 */ 262, + /* U+0055+031B -> U+01AF */ 183, + /* U+0055+0323 -> U+1EE4 */ 1344, + /* U+0055+0324 -> U+1E72 */ 1234, + /* U+0055+0328 -> U+0172 */ 167, + /* U+0055+032D -> U+1E76 */ 1238, + /* U+0055+0330 -> U+1E74 */ 1236, + /* U+0056+0303 -> U+1E7C */ 1244, + /* U+0056+0323 -> U+1E7E */ 1246, + /* U+0057+0300 -> U+1E80 */ 1248, + /* U+0057+0301 -> U+1E82 */ 1250, + /* U+0057+0302 -> U+0174 */ 169, + /* U+0057+0307 -> U+1E86 */ 1254, + /* U+0057+0308 -> U+1E84 */ 1252, + /* U+0057+0323 -> U+1E88 */ 1256, + /* U+0058+0307 -> U+1E8A */ 1258, + /* U+0058+0308 -> U+1E8C */ 1260, + /* U+0059+0300 -> U+1EF2 */ 1358, + /* U+0059+0301 -> U+00DD */ 39, + /* U+0059+0302 -> U+0176 */ 171, + /* U+0059+0303 -> U+1EF8 */ 1364, + /* U+0059+0304 -> U+0232 */ 282, + /* U+0059+0307 -> U+1E8E */ 1262, + /* U+0059+0308 -> U+0178 */ 173, + /* U+0059+0309 -> U+1EF6 */ 1362, + /* U+0059+0323 -> U+1EF4 */ 1360, + /* U+005A+0301 -> U+0179 */ 174, + /* U+005A+0302 -> U+1E90 */ 1264, + /* U+005A+0307 -> U+017B */ 176, + /* U+005A+030C -> U+017D */ 178, + /* U+005A+0323 -> U+1E92 */ 1266, + /* U+005A+0331 -> U+1E94 */ 1268, + /* U+0061+0300 -> U+00E0 */ 40, + /* U+0061+0301 -> U+00E1 */ 41, + /* U+0061+0302 -> U+00E2 */ 42, + /* U+0061+0303 -> U+00E3 */ 43, + /* U+0061+0304 -> U+0101 */ 68, + /* U+0061+0306 -> U+0103 */ 70, + /* U+0061+0307 -> U+0227 */ 271, + /* U+0061+0308 -> U+00E4 */ 44, + /* U+0061+0309 -> U+1EA3 */ 1279, + /* U+0061+030A -> U+00E5 */ 45, + /* U+0061+030C -> U+01CE */ 195, + /* U+0061+030F -> U+0201 */ 241, + /* U+0061+0311 -> U+0203 */ 243, + /* U+0061+0323 -> U+1EA1 */ 1277, + /* U+0061+0325 -> U+1E01 */ 1121, + /* U+0061+0328 -> U+0105 */ 72, + /* U+0062+0307 -> U+1E03 */ 1123, + /* U+0062+0323 -> U+1E05 */ 1125, + /* U+0062+0331 -> U+1E07 */ 1127, + /* U+0063+0301 -> U+0107 */ 74, + /* U+0063+0302 -> U+0109 */ 76, + /* U+0063+0307 -> U+010B */ 78, + /* U+0063+030C -> U+010D */ 80, + /* U+0063+0327 -> U+00E7 */ 46, + /* U+0064+0307 -> U+1E0B */ 1131, + /* U+0064+030C -> U+010F */ 82, + /* U+0064+0323 -> U+1E0D */ 1133, + /* U+0064+0327 -> U+1E11 */ 1137, + /* U+0064+032D -> U+1E13 */ 1139, + /* U+0064+0331 -> U+1E0F */ 1135, + /* U+0065+0300 -> U+00E8 */ 47, + /* U+0065+0301 -> U+00E9 */ 48, + /* U+0065+0302 -> U+00EA */ 49, + /* U+0065+0303 -> U+1EBD */ 1305, + /* U+0065+0304 -> U+0113 */ 84, + /* U+0065+0306 -> U+0115 */ 86, + /* U+0065+0307 -> U+0117 */ 88, + /* U+0065+0308 -> U+00EB */ 50, + /* U+0065+0309 -> U+1EBB */ 1303, + /* U+0065+030C -> U+011B */ 92, + /* U+0065+030F -> U+0205 */ 245, + /* U+0065+0311 -> U+0207 */ 247, + /* U+0065+0323 -> U+1EB9 */ 1301, + /* U+0065+0327 -> U+0229 */ 273, + /* U+0065+0328 -> U+0119 */ 90, + /* U+0065+032D -> U+1E19 */ 1145, + /* U+0065+0330 -> U+1E1B */ 1147, + /* U+0066+0307 -> U+1E1F */ 1151, + /* U+0067+0301 -> U+01F5 */ 231, + /* U+0067+0302 -> U+011D */ 94, + /* U+0067+0304 -> U+1E21 */ 1153, + /* U+0067+0306 -> U+011F */ 96, + /* U+0067+0307 -> U+0121 */ 98, + /* U+0067+030C -> U+01E7 */ 217, + /* U+0067+0327 -> U+0123 */ 100, + /* U+0068+0302 -> U+0125 */ 102, + /* U+0068+0307 -> U+1E23 */ 1155, + /* U+0068+0308 -> U+1E27 */ 1159, + /* U+0068+030C -> U+021F */ 269, + /* U+0068+0323 -> U+1E25 */ 1157, + /* U+0068+0327 -> U+1E29 */ 1161, + /* U+0068+032E -> U+1E2B */ 1163, + /* U+0068+0331 -> U+1E96 */ 1270, + /* U+0069+0300 -> U+00EC */ 51, + /* U+0069+0301 -> U+00ED */ 52, + /* U+0069+0302 -> U+00EE */ 53, + /* U+0069+0303 -> U+0129 */ 104, + /* U+0069+0304 -> U+012B */ 106, + /* U+0069+0306 -> U+012D */ 108, + /* U+0069+0308 -> U+00EF */ 54, + /* U+0069+0309 -> U+1EC9 */ 1317, + /* U+0069+030C -> U+01D0 */ 197, + /* U+0069+030F -> U+0209 */ 249, + /* U+0069+0311 -> U+020B */ 251, + /* U+0069+0323 -> U+1ECB */ 1319, + /* U+0069+0328 -> U+012F */ 110, + /* U+0069+0330 -> U+1E2D */ 1165, + /* U+006A+0302 -> U+0135 */ 115, + /* U+006A+030C -> U+01F0 */ 226, + /* U+006B+0301 -> U+1E31 */ 1169, + /* U+006B+030C -> U+01E9 */ 219, + /* U+006B+0323 -> U+1E33 */ 1171, + /* U+006B+0327 -> U+0137 */ 117, + /* U+006B+0331 -> U+1E35 */ 1173, + /* U+006C+0301 -> U+013A */ 119, + /* U+006C+030C -> U+013E */ 123, + /* U+006C+0323 -> U+1E37 */ 1175, + /* U+006C+0327 -> U+013C */ 121, + /* U+006C+032D -> U+1E3D */ 1181, + /* U+006C+0331 -> U+1E3B */ 1179, + /* U+006D+0301 -> U+1E3F */ 1183, + /* U+006D+0307 -> U+1E41 */ 1185, + /* U+006D+0323 -> U+1E43 */ 1187, + /* U+006E+0300 -> U+01F9 */ 233, + /* U+006E+0301 -> U+0144 */ 127, + /* U+006E+0303 -> U+00F1 */ 55, + /* U+006E+0307 -> U+1E45 */ 1189, + /* U+006E+030C -> U+0148 */ 131, + /* U+006E+0323 -> U+1E47 */ 1191, + /* U+006E+0327 -> U+0146 */ 129, + /* U+006E+032D -> U+1E4B */ 1195, + /* U+006E+0331 -> U+1E49 */ 1193, + /* U+006F+0300 -> U+00F2 */ 56, + /* U+006F+0301 -> U+00F3 */ 57, + /* U+006F+0302 -> U+00F4 */ 58, + /* U+006F+0303 -> U+00F5 */ 59, + /* U+006F+0304 -> U+014D */ 134, + /* U+006F+0306 -> U+014F */ 136, + /* U+006F+0307 -> U+022F */ 279, + /* U+006F+0308 -> U+00F6 */ 60, + /* U+006F+0309 -> U+1ECF */ 1323, + /* U+006F+030B -> U+0151 */ 138, + /* U+006F+030C -> U+01D2 */ 199, + /* U+006F+030F -> U+020D */ 253, + /* U+006F+0311 -> U+020F */ 255, + /* U+006F+031B -> U+01A1 */ 182, + /* U+006F+0323 -> U+1ECD */ 1321, + /* U+006F+0328 -> U+01EB */ 221, + /* U+0070+0301 -> U+1E55 */ 1205, + /* U+0070+0307 -> U+1E57 */ 1207, + /* U+0072+0301 -> U+0155 */ 140, + /* U+0072+0307 -> U+1E59 */ 1209, + /* U+0072+030C -> U+0159 */ 144, + /* U+0072+030F -> U+0211 */ 257, + /* U+0072+0311 -> U+0213 */ 259, + /* U+0072+0323 -> U+1E5B */ 1211, + /* U+0072+0327 -> U+0157 */ 142, + /* U+0072+0331 -> U+1E5F */ 1215, + /* U+0073+0301 -> U+015B */ 146, + /* U+0073+0302 -> U+015D */ 148, + /* U+0073+0307 -> U+1E61 */ 1217, + /* U+0073+030C -> U+0161 */ 152, + /* U+0073+0323 -> U+1E63 */ 1219, + /* U+0073+0326 -> U+0219 */ 265, + /* U+0073+0327 -> U+015F */ 150, + /* U+0074+0307 -> U+1E6B */ 1227, + /* U+0074+0308 -> U+1E97 */ 1271, + /* U+0074+030C -> U+0165 */ 156, + /* U+0074+0323 -> U+1E6D */ 1229, + /* U+0074+0326 -> U+021B */ 267, + /* U+0074+0327 -> U+0163 */ 154, + /* U+0074+032D -> U+1E71 */ 1233, + /* U+0074+0331 -> U+1E6F */ 1231, + /* U+0075+0300 -> U+00F9 */ 61, + /* U+0075+0301 -> U+00FA */ 62, + /* U+0075+0302 -> U+00FB */ 63, + /* U+0075+0303 -> U+0169 */ 158, + /* U+0075+0304 -> U+016B */ 160, + /* U+0075+0306 -> U+016D */ 162, + /* U+0075+0308 -> U+00FC */ 64, + /* U+0075+0309 -> U+1EE7 */ 1347, + /* U+0075+030A -> U+016F */ 164, + /* U+0075+030B -> U+0171 */ 166, + /* U+0075+030C -> U+01D4 */ 201, + /* U+0075+030F -> U+0215 */ 261, + /* U+0075+0311 -> U+0217 */ 263, + /* U+0075+031B -> U+01B0 */ 184, + /* U+0075+0323 -> U+1EE5 */ 1345, + /* U+0075+0324 -> U+1E73 */ 1235, + /* U+0075+0328 -> U+0173 */ 168, + /* U+0075+032D -> U+1E77 */ 1239, + /* U+0075+0330 -> U+1E75 */ 1237, + /* U+0076+0303 -> U+1E7D */ 1245, + /* U+0076+0323 -> U+1E7F */ 1247, + /* U+0077+0300 -> U+1E81 */ 1249, + /* U+0077+0301 -> U+1E83 */ 1251, + /* U+0077+0302 -> U+0175 */ 170, + /* U+0077+0307 -> U+1E87 */ 1255, + /* U+0077+0308 -> U+1E85 */ 1253, + /* U+0077+030A -> U+1E98 */ 1272, + /* U+0077+0323 -> U+1E89 */ 1257, + /* U+0078+0307 -> U+1E8B */ 1259, + /* U+0078+0308 -> U+1E8D */ 1261, + /* U+0079+0300 -> U+1EF3 */ 1359, + /* U+0079+0301 -> U+00FD */ 65, + /* U+0079+0302 -> U+0177 */ 172, + /* U+0079+0303 -> U+1EF9 */ 1365, + /* U+0079+0304 -> U+0233 */ 283, + /* U+0079+0307 -> U+1E8F */ 1263, + /* U+0079+0308 -> U+00FF */ 66, + /* U+0079+0309 -> U+1EF7 */ 1363, + /* U+0079+030A -> U+1E99 */ 1273, + /* U+0079+0323 -> U+1EF5 */ 1361, + /* U+007A+0301 -> U+017A */ 175, + /* U+007A+0302 -> U+1E91 */ 1265, + /* U+007A+0307 -> U+017C */ 177, + /* U+007A+030C -> U+017E */ 179, + /* U+007A+0323 -> U+1E93 */ 1267, + /* U+007A+0331 -> U+1E95 */ 1269, + /* U+00A8+0300 -> U+1FED */ 1584, + /* U+00A8+0301 -> U+0385 */ 419, + /* U+00A8+0342 -> U+1FC1 */ 1544, + /* U+00C2+0300 -> U+1EA6 */ 1282, + /* U+00C2+0301 -> U+1EA4 */ 1280, + /* U+00C2+0303 -> U+1EAA */ 1286, + /* U+00C2+0309 -> U+1EA8 */ 1284, + /* U+00C4+0304 -> U+01DE */ 210, + /* U+00C5+0301 -> U+01FA */ 234, + /* U+00C6+0301 -> U+01FC */ 236, + /* U+00C6+0304 -> U+01E2 */ 214, + /* U+00C7+0301 -> U+1E08 */ 1128, + /* U+00CA+0300 -> U+1EC0 */ 1308, + /* U+00CA+0301 -> U+1EBE */ 1306, + /* U+00CA+0303 -> U+1EC4 */ 1312, + /* U+00CA+0309 -> U+1EC2 */ 1310, + /* U+00CF+0301 -> U+1E2E */ 1166, + /* U+00D4+0300 -> U+1ED2 */ 1326, + /* U+00D4+0301 -> U+1ED0 */ 1324, + /* U+00D4+0303 -> U+1ED6 */ 1330, + /* U+00D4+0309 -> U+1ED4 */ 1328, + /* U+00D5+0301 -> U+1E4C */ 1196, + /* U+00D5+0304 -> U+022C */ 276, + /* U+00D5+0308 -> U+1E4E */ 1198, + /* U+00D6+0304 -> U+022A */ 274, + /* U+00D8+0301 -> U+01FE */ 238, + /* U+00DC+0300 -> U+01DB */ 208, + /* U+00DC+0301 -> U+01D7 */ 204, + /* U+00DC+0304 -> U+01D5 */ 202, + /* U+00DC+030C -> U+01D9 */ 206, + /* U+00E2+0300 -> U+1EA7 */ 1283, + /* U+00E2+0301 -> U+1EA5 */ 1281, + /* U+00E2+0303 -> U+1EAB */ 1287, + /* U+00E2+0309 -> U+1EA9 */ 1285, + /* U+00E4+0304 -> U+01DF */ 211, + /* U+00E5+0301 -> U+01FB */ 235, + /* U+00E6+0301 -> U+01FD */ 237, + /* U+00E6+0304 -> U+01E3 */ 215, + /* U+00E7+0301 -> U+1E09 */ 1129, + /* U+00EA+0300 -> U+1EC1 */ 1309, + /* U+00EA+0301 -> U+1EBF */ 1307, + /* U+00EA+0303 -> U+1EC5 */ 1313, + /* U+00EA+0309 -> U+1EC3 */ 1311, + /* U+00EF+0301 -> U+1E2F */ 1167, + /* U+00F4+0300 -> U+1ED3 */ 1327, + /* U+00F4+0301 -> U+1ED1 */ 1325, + /* U+00F4+0303 -> U+1ED7 */ 1331, + /* U+00F4+0309 -> U+1ED5 */ 1329, + /* U+00F5+0301 -> U+1E4D */ 1197, + /* U+00F5+0304 -> U+022D */ 277, + /* U+00F5+0308 -> U+1E4F */ 1199, + /* U+00F6+0304 -> U+022B */ 275, + /* U+00F8+0301 -> U+01FF */ 239, + /* U+00FC+0300 -> U+01DC */ 209, + /* U+00FC+0301 -> U+01D8 */ 205, + /* U+00FC+0304 -> U+01D6 */ 203, + /* U+00FC+030C -> U+01DA */ 207, + /* U+0102+0300 -> U+1EB0 */ 1292, + /* U+0102+0301 -> U+1EAE */ 1290, + /* U+0102+0303 -> U+1EB4 */ 1296, + /* U+0102+0309 -> U+1EB2 */ 1294, + /* U+0103+0300 -> U+1EB1 */ 1293, + /* U+0103+0301 -> U+1EAF */ 1291, + /* U+0103+0303 -> U+1EB5 */ 1297, + /* U+0103+0309 -> U+1EB3 */ 1295, + /* U+0112+0300 -> U+1E14 */ 1140, + /* U+0112+0301 -> U+1E16 */ 1142, + /* U+0113+0300 -> U+1E15 */ 1141, + /* U+0113+0301 -> U+1E17 */ 1143, + /* U+014C+0300 -> U+1E50 */ 1200, + /* U+014C+0301 -> U+1E52 */ 1202, + /* U+014D+0300 -> U+1E51 */ 1201, + /* U+014D+0301 -> U+1E53 */ 1203, + /* U+015A+0307 -> U+1E64 */ 1220, + /* U+015B+0307 -> U+1E65 */ 1221, + /* U+0160+0307 -> U+1E66 */ 1222, + /* U+0161+0307 -> U+1E67 */ 1223, + /* U+0168+0301 -> U+1E78 */ 1240, + /* U+0169+0301 -> U+1E79 */ 1241, + /* U+016A+0308 -> U+1E7A */ 1242, + /* U+016B+0308 -> U+1E7B */ 1243, + /* U+017F+0307 -> U+1E9B */ 1275, + /* U+01A0+0300 -> U+1EDC */ 1336, + /* U+01A0+0301 -> U+1EDA */ 1334, + /* U+01A0+0303 -> U+1EE0 */ 1340, + /* U+01A0+0309 -> U+1EDE */ 1338, + /* U+01A0+0323 -> U+1EE2 */ 1342, + /* U+01A1+0300 -> U+1EDD */ 1337, + /* U+01A1+0301 -> U+1EDB */ 1335, + /* U+01A1+0303 -> U+1EE1 */ 1341, + /* U+01A1+0309 -> U+1EDF */ 1339, + /* U+01A1+0323 -> U+1EE3 */ 1343, + /* U+01AF+0300 -> U+1EEA */ 1350, + /* U+01AF+0301 -> U+1EE8 */ 1348, + /* U+01AF+0303 -> U+1EEE */ 1354, + /* U+01AF+0309 -> U+1EEC */ 1352, + /* U+01AF+0323 -> U+1EF0 */ 1356, + /* U+01B0+0300 -> U+1EEB */ 1351, + /* U+01B0+0301 -> U+1EE9 */ 1349, + /* U+01B0+0303 -> U+1EEF */ 1355, + /* U+01B0+0309 -> U+1EED */ 1353, + /* U+01B0+0323 -> U+1EF1 */ 1357, + /* U+01B7+030C -> U+01EE */ 224, + /* U+01EA+0304 -> U+01EC */ 222, + /* U+01EB+0304 -> U+01ED */ 223, + /* U+0226+0304 -> U+01E0 */ 212, + /* U+0227+0304 -> U+01E1 */ 213, + /* U+0228+0306 -> U+1E1C */ 1148, + /* U+0229+0306 -> U+1E1D */ 1149, + /* U+022E+0304 -> U+0230 */ 280, + /* U+022F+0304 -> U+0231 */ 281, + /* U+0292+030C -> U+01EF */ 225, + /* U+0391+0300 -> U+1FBA */ 1537, + /* U+0391+0301 -> U+0386 */ 420, + /* U+0391+0304 -> U+1FB9 */ 1536, + /* U+0391+0306 -> U+1FB8 */ 1535, + /* U+0391+0313 -> U+1F08 */ 1374, + /* U+0391+0314 -> U+1F09 */ 1375, + /* U+0391+0345 -> U+1FBC */ 1539, + /* U+0395+0300 -> U+1FC8 */ 1550, + /* U+0395+0301 -> U+0388 */ 422, + /* U+0395+0313 -> U+1F18 */ 1388, + /* U+0395+0314 -> U+1F19 */ 1389, + /* U+0397+0300 -> U+1FCA */ 1552, + /* U+0397+0301 -> U+0389 */ 423, + /* U+0397+0313 -> U+1F28 */ 1402, + /* U+0397+0314 -> U+1F29 */ 1403, + /* U+0397+0345 -> U+1FCC */ 1554, + /* U+0399+0300 -> U+1FDA */ 1566, + /* U+0399+0301 -> U+038A */ 424, + /* U+0399+0304 -> U+1FD9 */ 1565, + /* U+0399+0306 -> U+1FD8 */ 1564, + /* U+0399+0308 -> U+03AA */ 429, + /* U+0399+0313 -> U+1F38 */ 1418, + /* U+0399+0314 -> U+1F39 */ 1419, + /* U+039F+0300 -> U+1FF8 */ 1592, + /* U+039F+0301 -> U+038C */ 425, + /* U+039F+0313 -> U+1F48 */ 1432, + /* U+039F+0314 -> U+1F49 */ 1433, + /* U+03A1+0314 -> U+1FEC */ 1583, + /* U+03A5+0300 -> U+1FEA */ 1581, + /* U+03A5+0301 -> U+038E */ 426, + /* U+03A5+0304 -> U+1FE9 */ 1580, + /* U+03A5+0306 -> U+1FE8 */ 1579, + /* U+03A5+0308 -> U+03AB */ 430, + /* U+03A5+0314 -> U+1F59 */ 1446, + /* U+03A9+0300 -> U+1FFA */ 1594, + /* U+03A9+0301 -> U+038F */ 427, + /* U+03A9+0313 -> U+1F68 */ 1458, + /* U+03A9+0314 -> U+1F69 */ 1459, + /* U+03A9+0345 -> U+1FFC */ 1596, + /* U+03AC+0345 -> U+1FB4 */ 1532, + /* U+03AE+0345 -> U+1FC4 */ 1547, + /* U+03B1+0300 -> U+1F70 */ 1466, + /* U+03B1+0301 -> U+03AC */ 431, + /* U+03B1+0304 -> U+1FB1 */ 1529, + /* U+03B1+0306 -> U+1FB0 */ 1528, + /* U+03B1+0313 -> U+1F00 */ 1366, + /* U+03B1+0314 -> U+1F01 */ 1367, + /* U+03B1+0342 -> U+1FB6 */ 1533, + /* U+03B1+0345 -> U+1FB3 */ 1531, + /* U+03B5+0300 -> U+1F72 */ 1468, + /* U+03B5+0301 -> U+03AD */ 432, + /* U+03B5+0313 -> U+1F10 */ 1382, + /* U+03B5+0314 -> U+1F11 */ 1383, + /* U+03B7+0300 -> U+1F74 */ 1470, + /* U+03B7+0301 -> U+03AE */ 433, + /* U+03B7+0313 -> U+1F20 */ 1394, + /* U+03B7+0314 -> U+1F21 */ 1395, + /* U+03B7+0342 -> U+1FC6 */ 1548, + /* U+03B7+0345 -> U+1FC3 */ 1546, + /* U+03B9+0300 -> U+1F76 */ 1472, + /* U+03B9+0301 -> U+03AF */ 434, + /* U+03B9+0304 -> U+1FD1 */ 1559, + /* U+03B9+0306 -> U+1FD0 */ 1558, + /* U+03B9+0308 -> U+03CA */ 436, + /* U+03B9+0313 -> U+1F30 */ 1410, + /* U+03B9+0314 -> U+1F31 */ 1411, + /* U+03B9+0342 -> U+1FD6 */ 1562, + /* U+03BF+0300 -> U+1F78 */ 1474, + /* U+03BF+0301 -> U+03CC */ 438, + /* U+03BF+0313 -> U+1F40 */ 1426, + /* U+03BF+0314 -> U+1F41 */ 1427, + /* U+03C1+0313 -> U+1FE4 */ 1575, + /* U+03C1+0314 -> U+1FE5 */ 1576, + /* U+03C5+0300 -> U+1F7A */ 1476, + /* U+03C5+0301 -> U+03CD */ 439, + /* U+03C5+0304 -> U+1FE1 */ 1572, + /* U+03C5+0306 -> U+1FE0 */ 1571, + /* U+03C5+0308 -> U+03CB */ 437, + /* U+03C5+0313 -> U+1F50 */ 1438, + /* U+03C5+0314 -> U+1F51 */ 1439, + /* U+03C5+0342 -> U+1FE6 */ 1577, + /* U+03C9+0300 -> U+1F7C */ 1478, + /* U+03C9+0301 -> U+03CE */ 440, + /* U+03C9+0313 -> U+1F60 */ 1450, + /* U+03C9+0314 -> U+1F61 */ 1451, + /* U+03C9+0342 -> U+1FF6 */ 1590, + /* U+03C9+0345 -> U+1FF3 */ 1588, + /* U+03CA+0300 -> U+1FD2 */ 1560, + /* U+03CA+0301 -> U+0390 */ 428, + /* U+03CA+0342 -> U+1FD7 */ 1563, + /* U+03CB+0300 -> U+1FE2 */ 1573, + /* U+03CB+0301 -> U+03B0 */ 435, + /* U+03CB+0342 -> U+1FE7 */ 1578, + /* U+03CE+0345 -> U+1FF4 */ 1589, + /* U+03D2+0301 -> U+03D3 */ 444, + /* U+03D2+0308 -> U+03D4 */ 445, + /* U+0406+0308 -> U+0407 */ 457, + /* U+0410+0306 -> U+04D0 */ 479, + /* U+0410+0308 -> U+04D2 */ 481, + /* U+0413+0301 -> U+0403 */ 456, + /* U+0415+0300 -> U+0400 */ 454, + /* U+0415+0306 -> U+04D6 */ 483, + /* U+0415+0308 -> U+0401 */ 455, + /* U+0416+0306 -> U+04C1 */ 477, + /* U+0416+0308 -> U+04DC */ 487, + /* U+0417+0308 -> U+04DE */ 489, + /* U+0418+0300 -> U+040D */ 459, + /* U+0418+0304 -> U+04E2 */ 491, + /* U+0418+0306 -> U+0419 */ 461, + /* U+0418+0308 -> U+04E4 */ 493, + /* U+041A+0301 -> U+040C */ 458, + /* U+041E+0308 -> U+04E6 */ 495, + /* U+0423+0304 -> U+04EE */ 501, + /* U+0423+0306 -> U+040E */ 460, + /* U+0423+0308 -> U+04F0 */ 503, + /* U+0423+030B -> U+04F2 */ 505, + /* U+0427+0308 -> U+04F4 */ 507, + /* U+042B+0308 -> U+04F8 */ 509, + /* U+042D+0308 -> U+04EC */ 499, + /* U+0430+0306 -> U+04D1 */ 480, + /* U+0430+0308 -> U+04D3 */ 482, + /* U+0433+0301 -> U+0453 */ 465, + /* U+0435+0300 -> U+0450 */ 463, + /* U+0435+0306 -> U+04D7 */ 484, + /* U+0435+0308 -> U+0451 */ 464, + /* U+0436+0306 -> U+04C2 */ 478, + /* U+0436+0308 -> U+04DD */ 488, + /* U+0437+0308 -> U+04DF */ 490, + /* U+0438+0300 -> U+045D */ 468, + /* U+0438+0304 -> U+04E3 */ 492, + /* U+0438+0306 -> U+0439 */ 462, + /* U+0438+0308 -> U+04E5 */ 494, + /* U+043A+0301 -> U+045C */ 467, + /* U+043E+0308 -> U+04E7 */ 496, + /* U+0443+0304 -> U+04EF */ 502, + /* U+0443+0306 -> U+045E */ 469, + /* U+0443+0308 -> U+04F1 */ 504, + /* U+0443+030B -> U+04F3 */ 506, + /* U+0447+0308 -> U+04F5 */ 508, + /* U+044B+0308 -> U+04F9 */ 510, + /* U+044D+0308 -> U+04ED */ 500, + /* U+0456+0308 -> U+0457 */ 466, + /* U+0474+030F -> U+0476 */ 470, + /* U+0475+030F -> U+0477 */ 471, + /* U+04D8+0308 -> U+04DA */ 485, + /* U+04D9+0308 -> U+04DB */ 486, + /* U+04E8+0308 -> U+04EA */ 497, + /* U+04E9+0308 -> U+04EB */ 498, + /* U+0627+0653 -> U+0622 */ 574, + /* U+0627+0654 -> U+0623 */ 575, + /* U+0627+0655 -> U+0625 */ 577, + /* U+0648+0654 -> U+0624 */ 576, + /* U+064A+0654 -> U+0626 */ 578, + /* U+06C1+0654 -> U+06C2 */ 606, + /* U+06D2+0654 -> U+06D3 */ 607, + /* U+06D5+0654 -> U+06C0 */ 605, + /* U+0928+093C -> U+0929 */ 733, + /* U+0930+093C -> U+0931 */ 734, + /* U+0933+093C -> U+0934 */ 735, + /* U+09C7+09BE -> U+09CB */ 751, + /* U+09C7+09D7 -> U+09CC */ 752, + /* U+0B47+0B3E -> U+0B4B */ 770, + /* U+0B47+0B56 -> U+0B48 */ 769, + /* U+0B47+0B57 -> U+0B4C */ 771, + /* U+0B92+0BD7 -> U+0B94 */ 775, + /* U+0BC6+0BBE -> U+0BCA */ 776, + /* U+0BC6+0BD7 -> U+0BCC */ 778, + /* U+0BC7+0BBE -> U+0BCB */ 777, + /* U+0C46+0C56 -> U+0C48 */ 780, + /* U+0CBF+0CD5 -> U+0CC0 */ 785, + /* U+0CC6+0CC2 -> U+0CCA */ 788, + /* U+0CC6+0CD5 -> U+0CC7 */ 786, + /* U+0CC6+0CD6 -> U+0CC8 */ 787, + /* U+0CCA+0CD5 -> U+0CCB */ 789, + /* U+0D46+0D3E -> U+0D4A */ 793, + /* U+0D46+0D57 -> U+0D4C */ 795, + /* U+0D47+0D3E -> U+0D4B */ 794, + /* U+0DD9+0DCA -> U+0DDA */ 798, + /* U+0DD9+0DCF -> U+0DDC */ 799, + /* U+0DD9+0DDF -> U+0DDE */ 801, + /* U+0DDC+0DCA -> U+0DDD */ 800, + /* U+1025+102E -> U+1026 */ 859, + /* U+1B05+1B35 -> U+1B06 */ 904, + /* U+1B07+1B35 -> U+1B08 */ 905, + /* U+1B09+1B35 -> U+1B0A */ 906, + /* U+1B0B+1B35 -> U+1B0C */ 907, + /* U+1B0D+1B35 -> U+1B0E */ 908, + /* U+1B11+1B35 -> U+1B12 */ 909, + /* U+1B3A+1B35 -> U+1B3B */ 911, + /* U+1B3C+1B35 -> U+1B3D */ 912, + /* U+1B3E+1B35 -> U+1B40 */ 913, + /* U+1B3F+1B35 -> U+1B41 */ 914, + /* U+1B42+1B35 -> U+1B43 */ 915, + /* U+1E36+0304 -> U+1E38 */ 1176, + /* U+1E37+0304 -> U+1E39 */ 1177, + /* U+1E5A+0304 -> U+1E5C */ 1212, + /* U+1E5B+0304 -> U+1E5D */ 1213, + /* U+1E62+0307 -> U+1E68 */ 1224, + /* U+1E63+0307 -> U+1E69 */ 1225, + /* U+1EA0+0302 -> U+1EAC */ 1288, + /* U+1EA0+0306 -> U+1EB6 */ 1298, + /* U+1EA1+0302 -> U+1EAD */ 1289, + /* U+1EA1+0306 -> U+1EB7 */ 1299, + /* U+1EB8+0302 -> U+1EC6 */ 1314, + /* U+1EB9+0302 -> U+1EC7 */ 1315, + /* U+1ECC+0302 -> U+1ED8 */ 1332, + /* U+1ECD+0302 -> U+1ED9 */ 1333, + /* U+1F00+0300 -> U+1F02 */ 1368, + /* U+1F00+0301 -> U+1F04 */ 1370, + /* U+1F00+0342 -> U+1F06 */ 1372, + /* U+1F00+0345 -> U+1F80 */ 1480, + /* U+1F01+0300 -> U+1F03 */ 1369, + /* U+1F01+0301 -> U+1F05 */ 1371, + /* U+1F01+0342 -> U+1F07 */ 1373, + /* U+1F01+0345 -> U+1F81 */ 1481, + /* U+1F02+0345 -> U+1F82 */ 1482, + /* U+1F03+0345 -> U+1F83 */ 1483, + /* U+1F04+0345 -> U+1F84 */ 1484, + /* U+1F05+0345 -> U+1F85 */ 1485, + /* U+1F06+0345 -> U+1F86 */ 1486, + /* U+1F07+0345 -> U+1F87 */ 1487, + /* U+1F08+0300 -> U+1F0A */ 1376, + /* U+1F08+0301 -> U+1F0C */ 1378, + /* U+1F08+0342 -> U+1F0E */ 1380, + /* U+1F08+0345 -> U+1F88 */ 1488, + /* U+1F09+0300 -> U+1F0B */ 1377, + /* U+1F09+0301 -> U+1F0D */ 1379, + /* U+1F09+0342 -> U+1F0F */ 1381, + /* U+1F09+0345 -> U+1F89 */ 1489, + /* U+1F0A+0345 -> U+1F8A */ 1490, + /* U+1F0B+0345 -> U+1F8B */ 1491, + /* U+1F0C+0345 -> U+1F8C */ 1492, + /* U+1F0D+0345 -> U+1F8D */ 1493, + /* U+1F0E+0345 -> U+1F8E */ 1494, + /* U+1F0F+0345 -> U+1F8F */ 1495, + /* U+1F10+0300 -> U+1F12 */ 1384, + /* U+1F10+0301 -> U+1F14 */ 1386, + /* U+1F11+0300 -> U+1F13 */ 1385, + /* U+1F11+0301 -> U+1F15 */ 1387, + /* U+1F18+0300 -> U+1F1A */ 1390, + /* U+1F18+0301 -> U+1F1C */ 1392, + /* U+1F19+0300 -> U+1F1B */ 1391, + /* U+1F19+0301 -> U+1F1D */ 1393, + /* U+1F20+0300 -> U+1F22 */ 1396, + /* U+1F20+0301 -> U+1F24 */ 1398, + /* U+1F20+0342 -> U+1F26 */ 1400, + /* U+1F20+0345 -> U+1F90 */ 1496, + /* U+1F21+0300 -> U+1F23 */ 1397, + /* U+1F21+0301 -> U+1F25 */ 1399, + /* U+1F21+0342 -> U+1F27 */ 1401, + /* U+1F21+0345 -> U+1F91 */ 1497, + /* U+1F22+0345 -> U+1F92 */ 1498, + /* U+1F23+0345 -> U+1F93 */ 1499, + /* U+1F24+0345 -> U+1F94 */ 1500, + /* U+1F25+0345 -> U+1F95 */ 1501, + /* U+1F26+0345 -> U+1F96 */ 1502, + /* U+1F27+0345 -> U+1F97 */ 1503, + /* U+1F28+0300 -> U+1F2A */ 1404, + /* U+1F28+0301 -> U+1F2C */ 1406, + /* U+1F28+0342 -> U+1F2E */ 1408, + /* U+1F28+0345 -> U+1F98 */ 1504, + /* U+1F29+0300 -> U+1F2B */ 1405, + /* U+1F29+0301 -> U+1F2D */ 1407, + /* U+1F29+0342 -> U+1F2F */ 1409, + /* U+1F29+0345 -> U+1F99 */ 1505, + /* U+1F2A+0345 -> U+1F9A */ 1506, + /* U+1F2B+0345 -> U+1F9B */ 1507, + /* U+1F2C+0345 -> U+1F9C */ 1508, + /* U+1F2D+0345 -> U+1F9D */ 1509, + /* U+1F2E+0345 -> U+1F9E */ 1510, + /* U+1F2F+0345 -> U+1F9F */ 1511, + /* U+1F30+0300 -> U+1F32 */ 1412, + /* U+1F30+0301 -> U+1F34 */ 1414, + /* U+1F30+0342 -> U+1F36 */ 1416, + /* U+1F31+0300 -> U+1F33 */ 1413, + /* U+1F31+0301 -> U+1F35 */ 1415, + /* U+1F31+0342 -> U+1F37 */ 1417, + /* U+1F38+0300 -> U+1F3A */ 1420, + /* U+1F38+0301 -> U+1F3C */ 1422, + /* U+1F38+0342 -> U+1F3E */ 1424, + /* U+1F39+0300 -> U+1F3B */ 1421, + /* U+1F39+0301 -> U+1F3D */ 1423, + /* U+1F39+0342 -> U+1F3F */ 1425, + /* U+1F40+0300 -> U+1F42 */ 1428, + /* U+1F40+0301 -> U+1F44 */ 1430, + /* U+1F41+0300 -> U+1F43 */ 1429, + /* U+1F41+0301 -> U+1F45 */ 1431, + /* U+1F48+0300 -> U+1F4A */ 1434, + /* U+1F48+0301 -> U+1F4C */ 1436, + /* U+1F49+0300 -> U+1F4B */ 1435, + /* U+1F49+0301 -> U+1F4D */ 1437, + /* U+1F50+0300 -> U+1F52 */ 1440, + /* U+1F50+0301 -> U+1F54 */ 1442, + /* U+1F50+0342 -> U+1F56 */ 1444, + /* U+1F51+0300 -> U+1F53 */ 1441, + /* U+1F51+0301 -> U+1F55 */ 1443, + /* U+1F51+0342 -> U+1F57 */ 1445, + /* U+1F59+0300 -> U+1F5B */ 1447, + /* U+1F59+0301 -> U+1F5D */ 1448, + /* U+1F59+0342 -> U+1F5F */ 1449, + /* U+1F60+0300 -> U+1F62 */ 1452, + /* U+1F60+0301 -> U+1F64 */ 1454, + /* U+1F60+0342 -> U+1F66 */ 1456, + /* U+1F60+0345 -> U+1FA0 */ 1512, + /* U+1F61+0300 -> U+1F63 */ 1453, + /* U+1F61+0301 -> U+1F65 */ 1455, + /* U+1F61+0342 -> U+1F67 */ 1457, + /* U+1F61+0345 -> U+1FA1 */ 1513, + /* U+1F62+0345 -> U+1FA2 */ 1514, + /* U+1F63+0345 -> U+1FA3 */ 1515, + /* U+1F64+0345 -> U+1FA4 */ 1516, + /* U+1F65+0345 -> U+1FA5 */ 1517, + /* U+1F66+0345 -> U+1FA6 */ 1518, + /* U+1F67+0345 -> U+1FA7 */ 1519, + /* U+1F68+0300 -> U+1F6A */ 1460, + /* U+1F68+0301 -> U+1F6C */ 1462, + /* U+1F68+0342 -> U+1F6E */ 1464, + /* U+1F68+0345 -> U+1FA8 */ 1520, + /* U+1F69+0300 -> U+1F6B */ 1461, + /* U+1F69+0301 -> U+1F6D */ 1463, + /* U+1F69+0342 -> U+1F6F */ 1465, + /* U+1F69+0345 -> U+1FA9 */ 1521, + /* U+1F6A+0345 -> U+1FAA */ 1522, + /* U+1F6B+0345 -> U+1FAB */ 1523, + /* U+1F6C+0345 -> U+1FAC */ 1524, + /* U+1F6D+0345 -> U+1FAD */ 1525, + /* U+1F6E+0345 -> U+1FAE */ 1526, + /* U+1F6F+0345 -> U+1FAF */ 1527, + /* U+1F70+0345 -> U+1FB2 */ 1530, + /* U+1F74+0345 -> U+1FC2 */ 1545, + /* U+1F7C+0345 -> U+1FF2 */ 1587, + /* U+1FB6+0345 -> U+1FB7 */ 1534, + /* U+1FBF+0300 -> U+1FCD */ 1555, + /* U+1FBF+0301 -> U+1FCE */ 1556, + /* U+1FBF+0342 -> U+1FCF */ 1557, + /* U+1FC6+0345 -> U+1FC7 */ 1549, + /* U+1FF6+0345 -> U+1FF7 */ 1591, + /* U+1FFE+0300 -> U+1FDD */ 1568, + /* U+1FFE+0301 -> U+1FDE */ 1569, + /* U+1FFE+0342 -> U+1FDF */ 1570, + /* U+2190+0338 -> U+219A */ 1801, + /* U+2192+0338 -> U+219B */ 1802, + /* U+2194+0338 -> U+21AE */ 1803, + /* U+21D0+0338 -> U+21CD */ 1804, + /* U+21D2+0338 -> U+21CF */ 1806, + /* U+21D4+0338 -> U+21CE */ 1805, + /* U+2203+0338 -> U+2204 */ 1807, + /* U+2208+0338 -> U+2209 */ 1808, + /* U+220B+0338 -> U+220C */ 1809, + /* U+2223+0338 -> U+2224 */ 1810, + /* U+2225+0338 -> U+2226 */ 1811, + /* U+223C+0338 -> U+2241 */ 1816, + /* U+2243+0338 -> U+2244 */ 1817, + /* U+2245+0338 -> U+2247 */ 1818, + /* U+2248+0338 -> U+2249 */ 1819, + /* U+224D+0338 -> U+226D */ 1822, + /* U+2261+0338 -> U+2262 */ 1821, + /* U+2264+0338 -> U+2270 */ 1825, + /* U+2265+0338 -> U+2271 */ 1826, + /* U+2272+0338 -> U+2274 */ 1827, + /* U+2273+0338 -> U+2275 */ 1828, + /* U+2276+0338 -> U+2278 */ 1829, + /* U+2277+0338 -> U+2279 */ 1830, + /* U+227A+0338 -> U+2280 */ 1831, + /* U+227B+0338 -> U+2281 */ 1832, + /* U+227C+0338 -> U+22E0 */ 1841, + /* U+227D+0338 -> U+22E1 */ 1842, + /* U+2282+0338 -> U+2284 */ 1833, + /* U+2283+0338 -> U+2285 */ 1834, + /* U+2286+0338 -> U+2288 */ 1835, + /* U+2287+0338 -> U+2289 */ 1836, + /* U+2291+0338 -> U+22E2 */ 1843, + /* U+2292+0338 -> U+22E3 */ 1844, + /* U+22A2+0338 -> U+22AC */ 1837, + /* U+22A8+0338 -> U+22AD */ 1838, + /* U+22A9+0338 -> U+22AE */ 1839, + /* U+22AB+0338 -> U+22AF */ 1840, + /* U+22B2+0338 -> U+22EA */ 1845, + /* U+22B3+0338 -> U+22EB */ 1846, + /* U+22B4+0338 -> U+22EC */ 1847, + /* U+22B5+0338 -> U+22ED */ 1848, + /* U+3046+3099 -> U+3094 */ 2286, + /* U+304B+3099 -> U+304C */ 2261, + /* U+304D+3099 -> U+304E */ 2262, + /* U+304F+3099 -> U+3050 */ 2263, + /* U+3051+3099 -> U+3052 */ 2264, + /* U+3053+3099 -> U+3054 */ 2265, + /* U+3055+3099 -> U+3056 */ 2266, + /* U+3057+3099 -> U+3058 */ 2267, + /* U+3059+3099 -> U+305A */ 2268, + /* U+305B+3099 -> U+305C */ 2269, + /* U+305D+3099 -> U+305E */ 2270, + /* U+305F+3099 -> U+3060 */ 2271, + /* U+3061+3099 -> U+3062 */ 2272, + /* U+3064+3099 -> U+3065 */ 2273, + /* U+3066+3099 -> U+3067 */ 2274, + /* U+3068+3099 -> U+3069 */ 2275, + /* U+306F+3099 -> U+3070 */ 2276, + /* U+306F+309A -> U+3071 */ 2277, + /* U+3072+3099 -> U+3073 */ 2278, + /* U+3072+309A -> U+3074 */ 2279, + /* U+3075+3099 -> U+3076 */ 2280, + /* U+3075+309A -> U+3077 */ 2281, + /* U+3078+3099 -> U+3079 */ 2282, + /* U+3078+309A -> U+307A */ 2283, + /* U+307B+3099 -> U+307C */ 2284, + /* U+307B+309A -> U+307D */ 2285, + /* U+309D+3099 -> U+309E */ 2291, + /* U+30A6+3099 -> U+30F4 */ 2318, + /* U+30AB+3099 -> U+30AC */ 2293, + /* U+30AD+3099 -> U+30AE */ 2294, + /* U+30AF+3099 -> U+30B0 */ 2295, + /* U+30B1+3099 -> U+30B2 */ 2296, + /* U+30B3+3099 -> U+30B4 */ 2297, + /* U+30B5+3099 -> U+30B6 */ 2298, + /* U+30B7+3099 -> U+30B8 */ 2299, + /* U+30B9+3099 -> U+30BA */ 2300, + /* U+30BB+3099 -> U+30BC */ 2301, + /* U+30BD+3099 -> U+30BE */ 2302, + /* U+30BF+3099 -> U+30C0 */ 2303, + /* U+30C1+3099 -> U+30C2 */ 2304, + /* U+30C4+3099 -> U+30C5 */ 2305, + /* U+30C6+3099 -> U+30C7 */ 2306, + /* U+30C8+3099 -> U+30C9 */ 2307, + /* U+30CF+3099 -> U+30D0 */ 2308, + /* U+30CF+309A -> U+30D1 */ 2309, + /* U+30D2+3099 -> U+30D3 */ 2310, + /* U+30D2+309A -> U+30D4 */ 2311, + /* U+30D5+3099 -> U+30D6 */ 2312, + /* U+30D5+309A -> U+30D7 */ 2313, + /* U+30D8+3099 -> U+30D9 */ 2314, + /* U+30D8+309A -> U+30DA */ 2315, + /* U+30DB+3099 -> U+30DC */ 2316, + /* U+30DB+309A -> U+30DD */ 2317, + /* U+30EF+3099 -> U+30F7 */ 2319, + /* U+30F0+3099 -> U+30F8 */ 2320, + /* U+30F1+3099 -> U+30F9 */ 2321, + /* U+30F2+3099 -> U+30FA */ 2322, + /* U+30FD+3099 -> U+30FE */ 2323, + /* U+11099+110BA -> U+1109A */ 4588, + /* U+1109B+110BA -> U+1109C */ 4589, + /* U+110A5+110BA -> U+110AB */ 4590, + /* U+11131+11127 -> U+1112E */ 4596, + /* U+11132+11127 -> U+1112F */ 4597, + /* U+11347+1133E -> U+1134B */ 4609, + /* U+11347+11357 -> U+1134C */ 4610, + /* U+114B9+114B0 -> U+114BC */ 4628, + /* U+114B9+114BA -> U+114BB */ 4627, + /* U+114B9+114BD -> U+114BE */ 4629, + /* U+115B8+115AF -> U+115BA */ 4632, + /* U+115B9+115AF -> U+115BB */ 4633, + /* U+11935+11930 -> U+11938 */ 4642 +}; + +/* Perfect hash function for recomposition */ +static int +Recomp_hash_func(const void *key) +{ + static const int16 h[1883] = { + 772, 773, 621, 32767, 32767, 387, 653, 196, + 32767, 32767, 855, 463, -19, 651, 32767, 32767, + 32767, 364, 32767, 32767, -108, 32767, 32767, 32767, + 32767, 0, -568, 32767, 32767, 32767, 0, 0, + 0, -103, 364, 0, 210, 732, 0, 0, + -506, 0, 0, 0, 32767, 32767, 0, 32767, + 407, -140, 32767, 409, 32767, 772, 0, 86, + 842, 934, 32767, 32767, -499, -355, 32767, 32767, + 532, 138, 174, -243, 860, 1870, 742, 32767, + 32767, 339, 32767, 1290, 0, 32767, 32767, 0, + -449, -1386, 1633, 560, 561, 32767, 1219, 1004, + 139, -804, 32767, -179, 141, 579, 1586, 32767, + 32767, 32767, 142, 199, 32767, 32767, 143, 0, + 32767, 32767, 314, 896, 32767, 32767, 428, 129, + 286, -58, 0, 68, 32767, 0, 244, -566, + 32767, 32767, 32767, 246, 32767, 32767, 0, 32767, + 32767, 271, -108, 928, 32767, 715, 32767, 32767, + -211, -497, 32767, 0, 1055, 1339, 32767, 0, + 32767, 32767, -968, -144, 32767, 32767, 248, 32767, + -161, 32767, 32767, 282, 32767, -372, 0, 2, + -137, 1116, 32767, 687, 32767, 459, 913, 0, + 461, 879, -816, 443, 32767, 32767, 462, 1089, + 32767, 1054, 0, 314, 447, -26, 480, 32767, + 64, 0, 0, 112, 32767, 66, 0, 646, + 603, 22, -292, 0, 710, 475, 32767, 24, + -781, 32767, 32767, 32767, 281, 307, 32767, 1289, + 32767, 0, 1064, -149, 454, 118, 32767, 32767, + 0, 32767, -126, 0, 32767, 32767, 858, 32767, + 32767, 32767, 1029, 886, 665, 209, 0, 26, + 359, 0, 0, -108, -508, -603, 894, 906, + 32767, 32767, 14, 0, 0, 534, 984, 876, + 32767, -93, 110, -367, 167, 843, 32767, 32767, + -947, -290, 169, 0, 0, 32767, -42, 564, + 0, -927, 32767, 817, 32767, 32767, 32767, 110, + 0, 32767, 32767, -38, 32767, 32767, -101, 694, + -142, 190, 191, 1288, 32767, -687, 194, -579, + 534, -452, 0, -72, 536, 765, 823, 266, + -259, 684, 767, 32767, 654, 32767, 32767, 64, + 920, 32767, 32767, 32767, 0, 1653, 0, 0, + 32767, 32767, -452, -222, 855, 0, 32767, -1153, + 127, 490, 449, 863, 32767, -144, 32767, -379, + 545, 32767, 32767, 32767, 530, 32767, 32767, 1331, + 611, -612, 332, 545, -73, 0, 604, 201, + 32767, -279, 338, 836, 340, 408, 32767, -60, + -358, 32767, 343, 69, 707, 0, -129, 582, + 32767, 0, 32767, 96, 392, 490, 639, 157, + -4, 406, 32767, 32767, -571, 1077, 546, 32767, + 551, 0, 0, 0, 32767, 32767, 348, 32767, + 498, -181, 0, -433, 1057, 260, 0, 32767, + 32767, 397, 32767, 816, -130, 32767, 624, 0, + 0, 32767, 32767, 32767, 485, 0, 32767, 32767, + 32767, 32767, 32767, 0, 32767, 32767, 32767, 1222, + -230, 32767, 797, -538, 32767, 974, 32767, 32767, + 831, 70, -658, 145, 0, 147, 0, 32767, + 1295, 32767, 0, 0, 895, 0, 0, -385, + 491, -287, 32767, -587, 32767, 32767, 32767, 813, + -471, -13, 32767, 32767, 32767, 0, 203, 411, + 470, 0, -546, -179, 146, 0, 0, 32767, + -468, 32767, 0, 0, 32767, 32767, 32767, 211, + 32767, 32767, 0, 32767, 0, 52, 32767, 0, + 32767, 0, 692, 990, 32767, 32767, 32767, 56, + -507, 784, 951, 0, 32767, 0, 697, 32767, + 187, 0, 32767, 32767, 430, 1209, 682, 32767, + 130, 0, -25, 0, -1006, 0, 32767, 214, + 433, 22, 0, -1119, 32767, 285, 32767, 32767, + 32767, 216, 32767, 32767, 32767, 217, 527, 32767, + 32767, 32767, 829, 485, 419, 717, 620, 731, + 32767, 470, 0, -145, -620, 1162, -644, 848, + 287, -632, 32767, 32767, 32767, 32767, 381, 32767, + 510, 511, -554, -2, 32767, 0, 0, 698, + 32767, 32767, 436, 1154, 32767, 463, 32767, 32767, + 627, 517, 32767, 32767, 854, 579, 723, 396, + 110, -42, 354, 32767, 664, 32767, 32767, 0, + 0, 32767, 65, -163, 67, 140, 69, 341, + 70, 71, 402, 73, 623, 544, 624, 417, + -1375, 648, 32767, -26, 904, 0, 548, 0, + 0, 32767, 32767, 855, 32767, 488, -524, 599, + 130, 131, 32767, 32767, 542, -1110, -324, -462, + 32767, -405, -440, 0, 0, 629, 850, 0, + 741, 257, 258, 32767, 32767, 0, 32767, 923, + 0, 32767, 0, 32767, 1559, 32767, 32767, 32767, + 671, 32767, 134, 32767, 32767, -336, -104, 576, + 577, 829, 32767, 32767, 762, 902, 32767, 0, + 32767, 0, 1506, 887, 32767, 636, 601, 2465, + 426, 0, 236, 317, 427, 968, 32767, -975, + -559, -343, 341, 32767, 937, 241, 0, 32767, + 32767, 547, 32767, 32767, 32767, 32767, 32767, 789, + 0, 32767, 32767, 32767, 0, 0, 0, 32767, + -192, 859, 1185, 1153, 69, 32767, 32767, 32767, + -539, 32767, 32767, 0, 32767, 32767, 32767, 32767, + 640, 578, 32767, 32767, -766, 32767, 32767, 32767, + 32767, 1050, -572, 32767, 32767, 32767, 32767, 1268, + 32767, 32767, 32767, 754, 32767, 32767, 1640, 179, + 804, 32767, 32767, 32767, 32767, 0, 684, 943, + 1006, 32767, 32767, 652, 0, 32767, 1041, 32767, + 718, 791, 32767, 274, 697, 32767, 32767, 0, + 32767, 32767, 32767, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 735, + 0, 32767, 32767, 32767, 275, 358, 688, 32767, + 32767, 32767, 548, -87, 770, 32767, -42, 0, + 551, 32767, 691, 222, 32767, 32767, 32767, 32767, + 0, 1273, 403, -121, 806, 553, 554, 163, + 32767, 32767, 892, 825, 32767, 32767, -490, 32767, + 32767, 32767, 32767, 32767, -109, 744, 910, 32767, + 91, 32767, 32767, 0, 0, 32767, 32767, 32767, + 1521, 50, 701, 32767, 32767, 32767, 32767, 164, + 658, 32767, 288, 0, 32767, 0, 51, 0, + 32767, 32767, 32767, 32767, 555, 1547, 32767, 32767, + 595, 585, 429, 32767, -80, 32767, 1258, 0, + 540, 486, -434, 865, 0, 192, 0, 884, + 0, 0, 0, 175, 555, 0, 32767, 32767, + 0, 32767, -566, 866, 591, 32767, 32767, 32767, + 32767, 32767, 496, 495, -215, 32767, 849, -772, + 32767, 32767, 502, 178, 483, 32767, 912, 793, + 794, 0, 32767, 32767, 32767, -556, 499, 838, + 32767, 32767, -506, 331, 0, 0, -1096, 512, + 880, 0, 774, -338, 649, 32767, 270, 32767, + 32767, -624, 328, 459, 32767, 32767, 32767, 32767, + 329, -201, -835, 813, -879, 560, 0, -212, + -114, 35, -494, 37, 523, 653, 751, -653, + -743, 32767, 1356, 818, 32767, 32767, 856, 0, + 44, 902, 0, 0, 0, 0, 32767, -26, + 526, 795, 456, 32767, 104, -209, -341, 133, + -372, 0, 45, 110, 111, 0, 511, 47, + 114, 32767, 32767, 93, 48, 116, -1031, -279, + 32767, 192, 0, 32767, 453, 415, 0, -190, + 32767, 471, 240, 175, 29, 665, 684, 0, + -11, -95, -344, 32767, 245, 148, 0, 530, + 0, 1185, -615, -712, 693, 784, 32767, 0, + -776, 32767, 32767, -813, 0, 0, 0, 207, + 208, 32767, 674, 32767, 742, -289, 249, 32767, + 520, 929, -50, 781, 0, -778, 32767, 0, + 302, 32767, 720, -465, 0, 32767, 32767, 32767, + 0, 0, 32767, 833, 328, 806, 32767, -403, + 0, 32767, -77, 32767, 0, 441, 930, 32767, + 643, 0, 32767, 1938, 0, 1334, 381, 32767, + 216, 32767, 32767, 0, 32767, 484, 383, 0, + 242, 395, 0, 32767, 32767, 32767, -781, 355, + 356, 32767, 292, 706, 32767, 32767, 32767, 32767, + 32767, -410, 32767, 32767, 782, 32767, 189, 32767, + 32767, 943, 0, -212, 407, 335, 0, 135, + 32767, 616, 0, -497, 0, -67, 853, 32767, + 700, 32767, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 459, -48, 32767, 58, 0, + -856, 1017, 32767, 59, 916, -731, 32767, 940, + -855, 347, 650, 0, 678, 32767, 0, 32767, + 32767, 530, 32767, 0, -80, 32767, -730, 32767, + 1214, 799, 58, 651, 841, 0, 0, -589, + -1530, -478, 651, 652, 93, 576, -1215, 32767, + 125, 32767, 1279, 32767, 32767, 0, 32767, 0, + -367, 416, -1236, 32767, 418, 32767, 815, 558, + 559, 781, 419, 32767, 739, 32767, 0, 32767, + 128, 570, 1349, -298, -66, 0, 147, -488, + 32767, 590, 189, 274, 524, 32767, 1082, -209, + 32767, 423, 32767, 32767, 975, 573, 32767, 424, + 32767, 32767, 1241, 32767, 32767, 32767, 32767, 32767, + 612, 391, 32767, 0, -803, 1004, -561, 32767, + 32767, 735, 870, 32767, 0, 32767, 32767, -123, + 99, 210, 600, 1294, 109, 1053, 32767, 307, + 834, 32767, 0, 1651, 32767, 644, 32767, 32767, + 0, 32767, -801, 385, 379, 32767, -368, 32767, + 32767, 830, 0, 32767, 32767, 739, 371, 372, + -275, 32767, 32767, 331, -780, 32767, 0, 1229, + -1462, 913, 266, 827, 125, 32767, 32767, 32767, + 393, 32767, 631, -33, -883, -661, -204, 6, + -19, 257, 8, 9, 118, 519, 615, -541, + -893, 0, 32767, 0, 1156, 15, 900, 32767, + 32767, 32767, 32767, 32767, 32767, 1022, 376, 0, + 32767, 32767, -972, 676, 840, -661, 631, 58, + 0, 17, 32767, 0, -799, 82, 0, 32767, + 32767, 680, 32767, 905, 0, 0, 32767, 32767, + 0, 0, 32767, 0, 828, 386, 802, 0, + 146, 0, 148, 32767, -1146, 0, 150, 151, + -743, 153, 154, 32767, 32767, 442, 32767, 743, + 0, 0, 746, 0, 32767, 32767, 32767, 98, + 32767, 157, 0, 696, 0, 32767, 32767, -294, + 32767, 158, 159, 32767, 0, 32767, 160, 32767, + 933, 32767, 32767, -50, 759, 824, 162, 672, + 32767, 356, 0, 356, 32767, 32767, 0, 0, + 656, 692, 253, 254, -374, 102, 256, 32767, + 0, 0, 32767, 32767, 259, 32767, 63, 260, + 510, 261, 32767, 0, 32767, 1061, 32767, 521, + 32767, 32767, 32767, 32767, 32767, 32767, 316, 317, + 846, 0, 32767, -500, 318, 0, 32767, 32767, + 263, 0, 790, 872, 32767, 32767, 32767, 2171, + 264, 32767, 32767, 32767, 32767, 486, 334, 465, + 32767, 466, 32767, 444, 606, 32767, 0, 445, + 320, -317, 0, 520, 322, 718, 32767, 32767, + 32767, 0, 1013, 32767, 32767, 32767, 32767, 32767, + 32767, 611, 32767, 0, 0, 32767, 32767, -120, + 156, 613, 0, 0, 32767, -68, 32767, 622, + 32767, 32767, 32767, 32767, 32767, 455, 32767, 32767, + 32767, 403, 533, 0, -161, 405, 95, 96, + 32767, 97, 32767, 0, 29, 0, 32767, 32767, + 30, 32767, 99, 32767, 32767, 0, 161, 32767, + 97, 0, 32, 32767, 32767, 0, 0, 315, + 32767, 32767, 414, 966, 0, 585, 32767, 32767, + -616, -256, 171, 172, 666, 101, 562, 563, + 32767, 95, 0, 0, 1492, 390, -251, 103, + 32767, 0, 32767, 188, 1487, 32767, 0, 0, + 586, 668, -126, 0, 0, 32767, 32767, 204, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 656, 32767, 32767, + 599, 0, 222, 32767, 0, 1368, -412, 435, + 32767, 936, 32767, -17, 32767, 832, 32767, 437, + 0, -518, 787, 32767, 864, -449, 0, 636, + 713, 206, 592, 572, 0, 483, -139, 32767, + 32767, 180, 818, 32767, 32767, 1304, 0, 32767, + 274, 0, 0, 0, 0, 705, 32767, 32767, + 32767, 0, -272, 0, 502, 503, 319, 0, + 32767, 0, 13, 32767, 32767, 0, 32767, 270, + 737, 0, 32767, 32767, 32767, 901, 32767, 616, + 180, 32767, 721, 353, 32767, 0, 32767, 32767, + -199, 0, 280, 788, 32767, 940, 32767, 51, + 0, 400, 53, 0, 54, -637, 0, -453, + 0, 0, 0, 380, 0, 32767, 504, 0, + 2049, 0, -964, 32767, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 798, 32767, 32767, 32767, 0, + 538, 488, 0, 32767, -528, 57, 819, 32767, + 32767, 1244, 0, 488, 739, 908, 32767, 32767, + 0, 32767, 32767, 0, 55, 533, 0, 32767, + 814, 0, 32767, 458, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 776, 777, 920, 0, + 0, 755, 32767, 0, 32767, 32767, 0, 32767, + 55, -954, 0, 372, 166, 218, 165, 857, + 221, 675, 0, 223, 224, -155, 226, 32767, + 1851, 227, 32767, 32767, 1192, 0, 229, 0, + -72, 0, 865, 0, 0, -330, 0, 683, + 32767, -550, -196, 725, -573, 293, 102, 32767, + -589, 296, 297, 298, 231, -256, 300, 32767, + 32767, 301, 233, 868, 32767, 234, 0, 811, + 1187, 32767, 32767, 0, 32767, 518, 0, 361, + 362, 466, 0, 365, 32767, -179, 366, 367, + 874, 369, 305, 0, 32767, 0, 32767, 0, + 32767, 2000, 1215, 451, 652, 0, 0, 799, + 32767, 32767, 32767 + }; + + const unsigned char *k = (const unsigned char *) key; + size_t keylen = 8; + uint32 a = 0; + uint32 b = 0; + + while (keylen--) + { + unsigned char c = *k++; + + a = a * 257 + c; + b = b * 17 + c; + } + return h[a % 1883] + h[b % 1883]; +} + +/* Hash lookup information for recomposition */ +static const pg_unicode_recompinfo UnicodeRecompInfo = +{ + RecompInverseLookup, + Recomp_hash_func, + 941 +}; diff --git a/src/include/common/unicode_normprops_table.h b/src/include/common/unicode_normprops_table.h index 93a2e55b75837..8c310f10d79c0 100644 --- a/src/include/common/unicode_normprops_table.h +++ b/src/include/common/unicode_normprops_table.h @@ -3,7 +3,8 @@ #include "common/unicode_norm.h" /* - * We use a bit field here to save space. + * Normalization quick check entry for codepoint. We use a bit field + * here to save space. */ typedef struct { @@ -11,6 +12,17 @@ typedef struct signed int quickcheck:4; /* really UnicodeNormalizationQC */ } pg_unicode_normprops; +/* Typedef for hash function on quick check table */ +typedef int (*qc_hash_func) (const void *key); + +/* Information for quick check lookup with perfect hash function */ +typedef struct +{ + const pg_unicode_normprops *normprops; + qc_hash_func hash; + int num_normprops; +} pg_unicode_norminfo; + static const pg_unicode_normprops UnicodeNormProps_NFC_QC[] = { {0x0300, UNICODE_NORM_QC_MAYBE}, {0x0301, UNICODE_NORM_QC_MAYBE}, @@ -1245,6 +1257,343 @@ static const pg_unicode_normprops UnicodeNormProps_NFC_QC[] = { {0x2FA1D, UNICODE_NORM_QC_NO}, }; +/* Perfect hash function for NFC_QC */ +static int +NFC_QC_hash_func(const void *key) +{ + static const int16 h[2463] = { + 0, -2717, 0, 221, 1293, 223, 1295, 225, + 226, 241, 0, 229, 230, 231, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + -386, 0, 0, 0, 0, 0, 0, 0, + -163, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + -246, -175, 1260, 0, 0, 0, -174, -173, + 0, -172, 0, 0, 0, 0, 0, 0, + 1049, 0, 300, 301, 1071, 0, 1071, 0, + 1071, 1071, 1057, 0, 0, 0, 0, 1061, + 0, -1053, 1664, 0, 2956, 0, 0, -13, + 0, 0, 0, 0, 2156, 0, 0, 0, + 0, 0, 0, 0, 71, 0, 1082, 0, + 1083, 1083, 0, 1084, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 359, 360, 361, + -1091, 363, -762, -130, -129, -128, -127, -126, + 137, -124, -708, -707, -706, -120, -185, -705, + -117, -184, -1307, -114, -113, -112, -111, 0, + 386, 387, 388, 389, -90, 391, 171, 172, + 394, -94, -183, 397, 398, 399, -98, -225, + 402, -1019, -636, -1019, -225, 407, 408, 409, + 410, 411, 674, 413, -171, -170, -169, 417, + 352, -168, 420, 353, -770, 423, 424, 425, + 426, 427, 428, 32767, 239, 239, 239, 239, + 239, 239, 239, 239, 239, 239, 239, 239, + 239, 239, 32767, 32767, 237, 32767, 236, 32767, + 32767, 234, 234, 234, 234, 617, 234, 234, + 234, -2483, 234, -1430, 1526, -1430, 1527, 47, + 48, 471, 230, 32767, 32767, 32767, 227, 227, + 227, 227, 227, 227, 227, 227, 227, 227, + 227, 227, 227, 227, 227, 227, 227, 227, + -159, 227, 227, 227, 227, 227, 227, 227, + 64, 227, 227, 227, 227, 227, 227, 227, + 227, 227, 227, 227, 227, 227, 227, 227, + 227, 227, 227, 227, 227, 227, 227, 227, + -19, 52, 1487, 227, 227, 227, 53, 54, + 227, 55, 227, 227, 227, 227, 227, 227, + 1276, 227, -989, 32767, 1296, 225, 1296, 225, + 1296, 1296, 1282, 225, 225, 225, 225, 1286, + 225, -828, 1889, 225, 3181, 225, 225, 212, + 225, 225, 225, 225, 2381, 225, 225, 225, + 225, 225, 225, 225, 296, 225, 1307, 225, + 1308, 1308, 225, 1309, 225, 225, 225, 225, + 225, 225, 225, 225, 225, 225, 225, 225, + 225, 225, 225, 225, 225, 584, 585, 586, + -866, 588, -537, 95, 96, 97, 98, 99, + 362, 101, -483, -482, -481, 105, 40, -480, + 108, 41, -1082, 111, 112, 113, 114, 225, + 611, 612, 613, 614, 135, 616, 396, 397, + 619, 131, 42, 622, 623, 624, 127, 0, + 627, -794, -411, -794, 0, 632, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + -272, 32767, 32767, 32767, 0, 32767, 32767, 32767, + 32767, 32767, -166, -165, 32767, 32767, 32767, 32767, + -164, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 397, 32767, 396, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 386, + 0, 386, 386, 386, 386, 386, 386, 386, + 223, 386, 386, 386, 32767, 385, 385, 385, + 385, 385, 32767, 384, 32767, 383, 383, 32767, + 382, 382, 32767, 381, 381, 381, 381, 381, + 135, 206, 1641, 381, 32767, 32767, 32767, 32767, + 32767, 32767, -160, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 1148, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 0, + 32767, 32767, 32767, 0, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, -257, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, -910, -910, 32767, 32767, + 0, 32767, 0, 32767, 0, 32767, 0, 32767, + 147, 32767, 0, 32767, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 143, 32767, 144, 32767, 145, + 32767, 146, 32767, 0, 32767, 148, 32767, 149, + 32767, 32767, 32767, -160, 32767, 32767, 32767, 32767, + 32767, 32767, 15, 32767, 32767, 0, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 145, 32767, 144, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, -148, 32767, 32767, 32767, 32767, + 32767, 32767, 2009, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 32767, 32767, 135, -918, 32767, + 151, 32767, 32767, 0, 1, 2, 3, 4, + 133, 5, 6, 7, 8, 9, 10, 11, + 32767, 32767, -1248, 32767, 13, 154, 188, 188, + 32767, 32767, 32767, 32767, 32767, 155, 16, 32767, + 32767, 32767, 32767, 32767, 32767, -1853, -1054, 18, + -1052, -1051, -1036, 22, 32767, 157, 32767, 28, + 23, 1077, 673, 25, -2930, 0, 32767, 32767, + 32767, 32767, 32767, 27, 32767, 155, 32767, 154, + 32767, 32767, -62, 28, -42, 30, -1051, 32, + -1050, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 34, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 129, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 672, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 0, 32767, + 32767, 32767, 32767, 32767, -156, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, -155, 32767, 32767, + 32767, 0, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 73, 32767, 32767, 32767, 32767, 74, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 675, + 32767, 32767, 32767, 32767, 32767, 75, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 165, 32767, 32767, 32767, 166, 167, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 170, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 689, 690, 691, 692, 693, 694, 695, + 696, 697, 698, 699, 700, 701, 702, 703, + 704, 705, 706, 707, 708, 709, 710, 711, + 712, 713, 714, 715, 716, 717, 718, 719, + 720, 721, 722, -304, -303, -302, -301, -300, + -299, -298, -297, 930, -295, -294, -293, -292, + -291, -290, -289, -288, -287, -286, -285, -284, + -283, -282, -281, -280, -279, -278, -277, -276, + -275, 753, 754, 755, 646, 757, -712, -1765, + 952, -712, 2244, -712, 2245, 765, 766, 767, + 768, 125, 770, 771, 772, 773, 774, 775, + 603, 777, 778, 779, 780, 781, 782, 783, + 784, 2011, 786, 787, 788, 789, 790, 791, + 792, 793, 794, 795, 796, 797, 798, 799, + 800, 801, 802, 803, 804, 805, 806, 603, + 603, 809, 603, 811, 603, 603, 814, 815, + 816, 817, 435, 819, 820, 821, 3539, 823, + 603, -468, 603, -468, 603, 603, 589, 831, + 603, 603, 603, 835, 836, 837, 838, 839, + 840, 841, 842, 843, 844, 845, 846, 847, + 848, 849, 850, 851, 852, 1239, 854, 855, + 856, 857, 858, 859, 860, 1024, 862, 863, + 864, 865, 866, 867, 868, 869, 870, 871, + 872, 873, 874, 875, 876, 877, 878, 879, + 880, 881, 882, 883, 884, 1131, 1061, -373, + 888, 889, 890, 1065, 1065, 893, 1066, 895, + 896, 897, 898, 899, 900, -148, 902, 603, + 603, -166, 906, -164, 908, -162, -161, -146, + 912, 913, 914, 915, -145, 917, 1971, -745, + 920, -2035, 922, 923, 937, 925, 926, 927, + 928, -1227, 930, 931, 932, 933, 934, 935, + 936, 866, 938, -143, 940, -142, -141, 943, + -140, 32767, 945, 946, 947, 948, 949, 950, + 951, 952, 953, 954, 955, 956, 957, 958, + 959, 960, 961, -65, -64, -63, -62, -61, + -60, -59, -58, 1169, -56, -55, -54, -53, + -52, -51, -50, -49, -48, -47, -46, -45, + -44, -43, -42, -41, -40, -39, -38, -37, + -36, 992, 993, 994, 885, 996, -473, -1526, + 1191, -473, 2483, -473, 2484, 1004, 1005, 1006, + 1007, 364, 1009, 1010, 1011, 1012, 1013, 1014, + 842, 1016, 1017, 1018, 1019, 1020, 1021, 1022, + 1023, 2250, 1025, 1026, 1027, 1028, 1029, 1030, + 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 1040, 1041, 1042, 1043, 1044, 1045, 842, + 842, 1048, 842, 1050, 842, 842, 1053, 1054, + 1055, 1056, 674, 1058, 1059, 1060, 3778, 1062, + 842, -229, 842, -229, 842, 842, 828, 1070, + 842, 842, 842, 1074, 1075, 1076, 1077, 1078, + 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, + 1087, 1088, 1089, 1090, 1091, 1478, 1093, 1094, + 1095, 1096, 1097, 1098, 1099, 1263, 1101, 1102, + 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, + 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, + 1119, 1120, 1121, 1122, 1123, 1370, 1300, -134, + 1127, 1128, 1129, 1304, 1304, 1132, 1305, 1134, + 1135, 1136, 1137, 1138, 1139, 91, 1141, 842, + 842, 73, 1145, 75, 1147, 77, 78, 93, + 1151, 1152, 1153, 1154, 94, 1156, 2210, -506, + 1159, -1796, 1161, 1162, 1176, 1164, 1165, 1166, + 1167, -988, 1169, 1170, 1171, 1172, 1173, 1174, + 1175, 1105, 1177, 96, 1179, 97, 98, 1182, + 99, 1184, 1185, 1186, 1187, 1188, 1189, 1190, + 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, + 1199, 1200, 0, 174, 175, 176, 177, 178, + 179, 180, 181, 1408, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, + 203, 0, 0, 206, 0, 208, 0, 0, + 211, 212, 213, 214, -168, 216, 217, 218, + 2936, 220, 0, -1071, 0, -1071, 0, 0, + -14, 228, 0, 0, 0, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 636, + 251, 252, 253, 254, 255, 256, 257, 421, + 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 528, + 458, -976, 285, 286, 287, 462, 462, 290, + 463, 292, 293, 294, 295, 296, 297, -751, + 299, 0, 0, -769, 303, -767, 305, -765, + -764, -749, 309, 310, 311, 312, -748, 314, + 1368, -1348, 317, -2638, 319, 320, 334, 322, + 323, 324, 325, -1830, 327, 328, 329, 330, + 331, 332, 333, 263, 335, -746, 337, -745, + -744, 340, -743, 342, 343, 344, 345, 346, + 347, 348, 349, 350, 351, 352, 353, 354, + 355, 356, 357, 358, 0, 0, 0, 1453, + 0, 1126, 495, 495, 495, 495, 495, 233, + 495, 1080, 1080, 1080, 495, 561, 1082, 495, + 563, 1687, 495, 495, 495, 495, 385, 0, + 0, 0, 0, 480, 0, 221, 221, 0, + 489, 579, 0, 0, 0, 498, 626, 0, + 1422, 1040, 1424, 631, 0, 0, 0, 0, + 0, -262, 0, 585, 585, 585, 0, 66, + 587, 0, 68, 1192, 0, 0, 0, 0, + 0, 0, 32767, 32767, 32767, 32767, 669, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 670, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 142, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1027, 1027, 1027, + 1027, 1027, 1027, 1027, 1027, -199, 1027, 1027, + 1027, 1027, 1027, 1027, 1027, 1027, 1027, 1027, + 1027, 1027, 1027, 1027, 1027, 1027, 1027, 1027, + 1027, 1027, 1027, 0, 0, 0, 110, 0, + 1470, 2524, -192, 1473, -1482, 1475, -1481, 0, + 0, 0, 0, 644, 0, 0, 0, 0, + 0, 0, 173, 0, 0, 0, 0, 0, + 0, 0, 0, -1226, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 204, 205, 0, 207, 0, 209, 210, + 0, 0, 0, 0, 383, 0, 0 + }; + + const unsigned char *k = (const unsigned char *) key; + size_t keylen = 4; + uint32 a = 0; + uint32 b = 0; + + while (keylen--) + { + unsigned char c = *k++; + + a = a * 257 + c; + b = b * 17 + c; + } + return h[a % 2463] + h[b % 2463]; +} + +/* Hash lookup information for NFC_QC */ +static const pg_unicode_norminfo UnicodeNormInfo_NFC_QC = { + UnicodeNormProps_NFC_QC, + NFC_QC_hash_func, + 1231 +}; + static const pg_unicode_normprops UnicodeNormProps_NFKC_QC[] = { {0x00A0, UNICODE_NORM_QC_NO}, {0x00A8, UNICODE_NORM_QC_NO}, @@ -6165,3 +6514,1262 @@ static const pg_unicode_normprops UnicodeNormProps_NFKC_QC[] = { {0x2FA1C, UNICODE_NORM_QC_NO}, {0x2FA1D, UNICODE_NORM_QC_NO}, }; + +/* Perfect hash function for NFKC_QC */ +static int +NFKC_QC_hash_func(const void *key) +{ + static const int16 h[9837] = { + -2472, -2472, -2472, -2472, -2472, -2472, -2472, -2472, + -2472, -2472, -2472, -2472, -2472, -2472, -2472, -2472, + -2472, -2472, -2472, -2472, -2472, -2472, -2472, -2472, + -2472, -2472, -2472, -2472, -2472, 32767, 32767, 32767, + -2475, -2475, -2475, -2475, -2475, -2475, -2475, -2475, + -2475, -2475, -2475, -2475, -2475, -2475, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 865, 865, 865, 865, 865, 865, 865, + 865, 865, 865, 865, -2255, 32767, -5207, 32767, + -5207, 860, 860, 860, 860, 860, 860, 860, + 860, 860, 4250, 861, 861, 861, 3339, 3339, + 3339, 3339, 3339, 3339, 3339, 3339, 3339, 3339, + 3339, 3339, 3339, 3339, 3339, 3339, 3339, 3339, + 32767, 3338, 3338, 3338, 3338, 3338, 3338, 3338, + 3338, 3338, 3338, 3338, 3338, 3338, 3338, 3338, + 3338, 3338, 3338, 3338, 3338, 3338, 3338, 3338, + 3338, 3338, 3338, 3338, 3338, 3338, 3338, 3338, + 3338, 9, 10, 32767, 11, 12, 0, 32767, + 0, 2913, 2914, 2915, 2916, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 2917, 32767, 2918, -100, + 2919, 2920, 2921, 840, 840, 840, 2922, 0, + 0, 0, 0, 0, 2206, 0, 2923, 0, + 2924, 2925, 2926, 0, 0, 0, -2590, 0, + 0, 0, 0, 0, 0, 0, 2934, 0, + 2474, 2931, 2932, 0, 0, 0, 0, 0, + 14, 805, 0, 0, 2933, 0, 2934, 0, + 2935, 2936, 0, 0, 0, 16, 17, 0, + 0, 0, 0, 0, 0, 0, 0, 18, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, -790, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, -1675, 0, 0, 19, 0, -1679, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, -1694, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 29, 30, 31, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 724, 2668, 724, 4350, -2633, -2633, + 2533, 2534, 2535, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 2518, 2519, 2520, 1431, 45, 46, + 32767, 32767, 47, 48, 49, 50, 51, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -3011, 53, -1125, -3010, -3010, + 32767, -3334, -1123, -3011, 60, 61, 62, 63, + 32767, 32767, 64, 32767, 65, 32767, 66, 67, + 32767, 32767, 32767, 32767, 32767, 32767, 2268, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 69, 70, + 71, 72, 73, 74, 32767, 32767, 32767, 32767, + 75, 76, 32767, 77, 281, 32767, 32767, 32767, + 32767, 32767, 32767, 811, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 1341, 1342, 1343, 1344, 1345, + 1346, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 86, + 32767, 32767, 32767, 32767, 32767, 4550, 32767, 32767, + 32767, 1135, 32767, 32767, 32767, 32767, 32767, 1130, + 3016, 32767, 3017, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 677, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 2858, 2859, 651, 2861, -438, + 2863, 2864, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -5305, -5305, -5305, 32767, -5306, + -5306, 32767, 32767, 32767, 2871, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 3022, 3023, 680, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, -272, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 4308, 4309, 4310, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 4311, 4312, 4313, + 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, + 4322, 4323, 4324, 4325, 4326, 4307, 4307, 4307, + 4307, 4307, 4307, 4307, 4307, 4307, 4336, 4337, + 4338, 4339, 4340, 4341, 4342, 4343, 4344, 4345, + 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, + 4354, 32767, 32767, 32767, 32767, 4355, 4356, 4357, + 4358, 4359, 4360, 4361, 4362, 4363, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 4364, 4365, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2202, 0, 0, 0, 59, 0, + 0, 35, 0, 0, 0, 3549, 0, 0, + 0, 0, 0, 3394, 0, 0, 3399, 0, + 0, 0, 0, 0, 0, 0, 0, 2012, + 0, 0, 0, 0, 87, 2022, 0, 7490, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 2255, 0, 2256, 2256, 2256, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 32767, 0, 0, + 0, 0, 0, 0, -1759, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 4767, 0, 0, 4772, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 32767, 5977, 0, + 892, 32767, 0, 32767, 32767, 0, 0, 32767, + 32767, 2344, 4834, 4835, 4836, 32767, 0, 4840, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 32767, 0, 32767, 0, 0, 0, + 0, 0, 0, 0, 32767, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 32767, 32767, 0, 32767, 0, 0, 0, 32767, + 32767, 32767, 32767, 3261, 3262, 32767, 3007, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 106, 107, 108, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 109, 110, 111, 112, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 0, -2344, + -2344, 0, 32767, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, -1642, 1469, -1641, 1469, -1640, 1469, + 1469, 1457, 1469, 1469, 1469, -4254, -4254, -4254, + -4254, -4254, -4254, -4254, -4254, -4254, -4254, -4254, + -4254, -4254, -4254, -4254, -4254, -4254, -3359, -4254, + -4254, -4254, -4254, -4254, -4254, -4254, -4254, -4254, + -4254, -4254, -4254, -4254, -4254, -4254, -4254, -4254, + -4254, -4254, -4254, -4254, -4254, -4254, -4254, -4254, + -4254, -4254, -4254, -4254, -4254, -4254, -4254, -4254, + -4254, -4254, -4254, -4254, -4254, -4254, -4254, -4103, + -1478, 0, -4254, -4254, -4254, -4254, -4254, -4254, + -4254, -4254, -4254, -2433, -4254, -4254, -4254, -3658, + -4254, -4254, -4254, -4254, -4254, -4254, -4254, -4254, + -4254, -4254, 0, -4253, -4253, -4253, -4253, -4253, + -4253, -4253, -4253, -4253, -678, -677, -676, -675, + -674, -673, -672, -4253, 314, -4253, -4253, -4253, + -4253, -4253, -4253, -4253, -4253, -4253, -4253, -4253, + -4253, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 1464, 1465, 1466, 1467, + 1468, 1469, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 0, + 0, 0, 0, 0, 32767, 32767, 32767, 32767, + 32767, 0, 32767, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 827, 828, 829, -2469, -2469, -260, 0, + 0, 32767, 0, 32767, 0, 0, 32767, 0, + 0, 32767, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 3575, 3576, 3577, 3578, 3579, 3580, 3581, 0, + 4567, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 2201, 4411, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, -3338, 0, 0, 0, + 0, 0, 0, 0, -3337, 0, -3336, 0, + 0, 0, 0, -3335, 0, 0, -3334, -3333, + -3332, -3331, 0, 0, -3330, 0, 0, 32767, + 0, 0, 13, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 3073, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + -2556, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 3074, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 2355, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, -488, -488, -488, -302, -3067, -3067, + -3067, -3067, -488, -488, -488, -488, 2999, -488, + 2999, -488, -488, -488, -3067, -3067, -3067, -488, + -488, -3067, -3067, -3067, -488, -488, -488, 2463, + -488, -488, -488, -301, 2465, -488, 2466, 2467, + -3600, -493, -3599, -488, -3598, -488, -3597, -488, + -488, -500, -488, -488, -488, -488, -488, 2470, + 2471, 2472, -488, -488, -254, -488, -488, -488, + -488, -488, -104, -488, -488, -488, -102, -101, + -100, -99, -98, -97, -96, -95, -94, -93, + -92, -488, -488, -488, -488, -488, -488, -488, + -488, -488, -2194, -2194, -2194, -2194, -2194, -2194, + -2194, -2194, -2194, -2194, 5211, 3269, 5213, 3269, + 6895, -88, -88, 5078, 5079, 5080, 1773, -92, + -92, 1773, 1773, 1773, 1773, 1773, 1773, 5072, + 5073, 2865, 5075, 1776, 5077, 5078, 1778, 1778, + 6942, 6943, 1778, 1778, 1778, 5086, 6952, 6953, + 5089, 5090, 5091, 5092, 5093, 5094, 5095, 5096, + 4007, 5098, 2333, 2334, 2335, 2336, 2337, -3066, + -3066, -3066, 2341, -3066, -3066, 2344, 2345, 2346, + 5114, 317, 2349, 848, 849, 850, 2353, 852, + 853, 854, 855, 856, 857, 858, 859, 860, + 861, 692, 692, 692, 692, 692, 692, 692, + 692, 692, 692, 692, 692, 692, 692, 692, + 692, 692, 692, 692, 692, 692, 692, 692, + 692, 692, 692, 692, 692, 692, 692, 692, + 692, 692, 692, 692, 692, 692, 692, 692, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 3093, 3094, 3095, 3096, 3097, 3098, 3099, + 3100, 3101, 3102, 901, 3104, 3105, 3106, 3048, + 3108, 3109, 3075, 3111, 3112, 3113, -435, 3115, + 3116, 3117, 3118, 3119, -274, 3121, 3122, -276, + 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, + 1120, 3133, 3134, 3135, 3136, 3050, 1116, 3139, + -4350, 3141, 3142, 3143, 3144, 3145, 3146, 3147, + 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, + 3156, 902, 3158, 903, 904, 905, 3162, 3163, + 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, + 3172, 3173, 3174, 3175, 3176, 3177, 32767, 3178, + 3179, 3180, 3181, 3182, 3183, 4943, 3185, 3186, + 3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, + 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202, + 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, + 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, + 3219, 3220, 3221, 3222, 3223, -1543, 3225, 3226, + -1545, 3228, 3229, 3230, 3231, 3232, 3233, 3234, + 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, + 3243, 3244, 3245, 3246, 3247, 3248, -1251, -2728, + 3250, 32767, 32767, 3251, 906, 907, 3252, 3253, + 32767, 32767, 910, -1579, -1579, -1579, 32767, 3258, + -1581, 3260, 3261, 3262, 3263, 3264, 3265, 3266, + 3267, 3268, 3269, 32767, 3270, 32767, 3271, 3272, + 3273, 3274, 3275, 3276, 3277, 32767, 3278, 3279, + 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, + 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, + 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, + 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, + 3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, + 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, + 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, + 3336, 32767, 3337, 3338, 3339, 3340, 3341, 3342, + 0, 3343, 3344, 3345, 3346, 32767, 32767, 3347, + 3348, 3349, 3350, 3351, 3352, 3353, 3354, 32767, + 3355, 3356, 3357, 3358, 3359, 3360, 3361, 32767, + 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, + 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, + 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, + 3386, 3387, 3388, 3389, 0, 3390, 3391, 3392, + 915, 916, 917, 918, 919, 920, 921, 922, + 923, 924, 925, 926, 927, 928, 929, 930, + 931, 932, 933, 934, 935, 936, 937, 938, + 939, 940, 941, 942, 943, 944, 945, 946, + 947, 948, 949, 950, 951, 952, 953, 954, + 955, 956, 957, 958, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 973, 974, 975, 976, 3449, 3450, + 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, + 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, + 3467, 3468, 3469, 3470, 3471, 3472, 3473, 3474, + 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, + 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, + 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, + 3499, 3500, 3501, 3502, 3503, 3504, 3505, 3506, + 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, + 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, + 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, + 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, + 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, + 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, + 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, + 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, + 3571, 3572, 3573, 3574, 3575, 3576, 3577, 6056, + 6057, 6058, 32767, 3581, 3582, 3583, 3584, 3585, + 4157, 4158, 4159, 3589, 4162, -4510, -1558, -1557, + -1556, -1742, -4507, -1553, -4506, -4506, 1562, -1544, + 1563, -1547, 1564, -1545, 1565, -1543, -1542, -1529, + -1540, -1539, -1538, -1537, -1536, -4493, -4493, -4493, + -1532, -1531, -1764, -1529, 3622, -1528, -1527, -1526, + -1909, -1524, -1523, -1522, -1907, -1907, -1907, -1907, + -1907, -1907, -1907, -1907, -1907, -1907, -1907, -1510, + -1509, 1071, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 3663, 3664, 3665, + 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, + 3674, 1095, 1096, 1097, 1098, 1099, 1100, 1101, + 3682, 1103, 3684, 1105, 3686, 3687, 3688, 1109, + 1110, 1111, 3692, 1113, 1114, 1115, 1116, 1117, + 1118, 1119, 3700, 1121, 3702, 3703, 3704, 1125, + 1126, 1127, -1809, -1809, -1809, -1809, -1809, -1809, + 3720, 3721, 3722, 3717, 3718, 3719, 3720, 1140, + 1141, 1142, 1143, -1802, 1145, 1146, 1147, 1148, + 3730, -1797, 3732, 1152, 3734, 3735, 1155, 1156, + 3738, 3739, 3740, 3741, 3742, 3743, -1785, -1785, + -1785, -1779, -1324, 1168, 1169, 1170, 1171, 1172, + 3752, 3753, 1175, 1176, 1177, 992, 3758, 3759, + 3760, 3761, 1183, 1184, 1185, 1186, -2300, 1188, + -2298, 1190, 1191, 1192, 3772, 3773, 3774, 1196, + 1197, 3777, 3778, 3779, 1201, 1202, 1203, -1747, + 1205, 1206, 1207, 1021, -1744, 1210, -1743, -1743, + 4325, 1219, 4326, 1216, 4327, 1218, 4328, 1220, + 1221, 1234, 1223, 1224, 1225, 1226, 1227, -1730, + -1730, -1730, 1231, 1232, 999, 1234, 1235, 1236, + 1237, 1238, 855, 1240, 1241, 1242, 857, 857, + 857, 857, 857, 857, 857, 857, 857, 857, + 857, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1262, 2969, 2970, 2971, 2972, 2973, 2974, + 2975, 2976, 2977, 2978, -4426, -2483, -4426, -2481, + -6106, 878, 879, -4286, -4286, -4286, -978, 888, + 889, -975, -974, -973, -972, -971, -970, -4268, + -4268, -2059, -4268, -968, -4268, -4268, -967, -966, + -6129, -6129, -963, -962, -961, -4268, -6133, -6133, + -4268, -4268, -4268, -4268, -4268, -4268, -4268, -4268, + -3178, -4268, -1502, -1502, -1502, -1502, -1502, 3902, + 3903, 3904, -1502, 3906, 3907, -1502, -1502, -1502, + -4269, 529, -1502, 0, 0, 0, -1502, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, -3194, 221, 222, 223, 224, + -1657, 226, 227, -1657, 229, 230, -1655, 555, + -1655, 234, 235, 236, 732, 238, 239, 240, + 241, 242, 243, -1655, 245, 246, 247, 248, + -1655, 250, -1655, 252, -1655, -1655, -1655, -1655, + -1655, -1655, 259, -1655, -1655, -1655, -1655, 264, + -1655, 266, -1655, 268, -1655, -3620, 271, 272, + -1655, 274, 275, -1655, 277, -1655, -1655, 280, + -1655, 282, 5746, 5747, 5748, 5749, -1655, 288, + -1655, 290, -3335, 3649, 3650, -1515, -1515, -1515, + 1793, 3659, 3660, 1796, 1797, 1798, 1799, 1800, + 1801, -1497, -1497, 712, -1497, 1803, -1497, -1497, + 1804, 1805, -3358, -3358, 1808, 1809, 1810, -1497, + -3362, -3362, -1497, -1497, -1497, -1497, -1497, -1497, + -1497, -1497, -407, -1497, -1497, -1497, -1497, -1497, + -1497, 3667, 3668, -1497, -1497, -1497, 1811, 3677, + 3678, 32767, 1814, 32767, 1815, 32767, 32767, 1816, + 1817, 32767, 32767, 32767, 1818, 1819, 1820, 1821, + -3342, -3342, 1824, 1825, 1826, 1827, 1828, 1829, + 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, + 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, + 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, + 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, + 1870, 1871, 1872, 1873, 1874, 1875, 1876, -1537, + 1878, 1879, 1880, 1881, 0, 1883, 1884, 0, + 529, 0, 0, 2210, 0, 1889, 1890, 1891, + 2387, 1893, 1894, 1895, 1896, 1897, 1898, 0, + 1900, 1901, 1902, 1903, 0, 1905, 0, 1907, + 0, 0, 0, 0, 0, 0, 1914, 0, + 0, 0, 0, 1919, 0, 1921, 0, 1923, + 0, -1965, 1926, 1927, 0, 1929, 1930, 0, + 1932, 0, 0, 1935, 0, 1937, 7401, 7402, + 7403, 7404, 0, 1943, 0, 1945, 1946, 0, + 1948, 0, 0, 1951, 1952, 1953, 1954, 0, + 1956, 1957, 1958, 1959, 1960, 1961, 1962, 0, + 1964, 1965, 1966, 1967, 0, 1969, 1970, 1971, + 1972, 0, 1974, 0, 1976, 1977, 1978, 1979, + 1980, 1981, 1982, 1983, 1984, 1985, 0, 1987, + 1988, 1989, 1990, 1991, 566, 566, 566, 5141, + 5142, 566, 566, 566, 566, 566, 566, 566, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 8673, 5722, 5722, 5722, 0, 8676, + 5723, 8677, 8678, 2611, 5718, 2612, 5723, 2613, + 5723, 2614, 5723, 5723, 5711, 5723, 5723, 5723, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 895, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 151, 2776, 4254, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1821, 0, + 0, 0, 596, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -2856, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, -2901, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, -1025, 32767, 32767, 32767, + 32767, -2910, 32767, 32767, 32767, 32767, 157, 32767, + 32767, 32767, 32767, 158, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 2359, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 160, 32767, 161, 162, 163, 164, + 165, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 898, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 1428, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 1254, 32767, 32767, 32767, + 32767, 1250, 32767, 32767, 32767, 32767, 1246, 32767, + 32767, 32767, 32767, 1243, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 1231, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 1842, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 3177, 1235, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, -4323, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 0, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 174, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 1830, -112, 1832, -112, 3514, -3469, + -3469, 1697, 1698, 1699, -1608, -3473, -3473, -1608, + -1608, -1608, -1608, -1608, -1608, 1691, 1692, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, -1623, -1623, -1623, 3541, 3542, -1623, -1623, + -1623, -1623, -1623, -1623, -1623, -1623, -1623, -1623, + -1623, -1623, -1623, -1623, -1623, -1623, -1623, -1623, + -1623, -1623, -1623, -1623, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -766, 2253, 2254, 2255, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 1531, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 0, 0, 32767, 0, 0, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -173, -173, -173, -173, -173, + -173, -173, -173, -173, -173, -173, -173, 3241, + -173, -173, -173, -173, 1709, -173, -173, 1712, + -173, -173, 1713, -496, 1715, -173, -173, -173, + -668, -173, -173, -173, -173, -173, -173, 1726, + -173, -173, -173, -173, 1731, -173, 1733, -173, + 1735, 1736, 1737, 1738, 1739, 1740, -173, 1742, + 1743, 1744, 1745, -173, 1747, -173, 1749, -173, + 1751, 3717, -173, -173, 1755, -173, -173, 1758, + -173, 1760, 1761, -173, 1763, -173, -5636, -5636, + -5636, -5636, 1769, -173, 1771, -173, 3453, -3530, + -3530, 1636, 1637, 1638, -1669, -3534, -3534, -1669, + -1669, -1669, -1669, -1669, -1669, 1630, 1631, -577, + 1633, -1666, 1635, 1636, -1664, -1664, 3500, 3501, + -1664, -1664, -1664, 1644, 3510, 3511, 1647, 1648, + 1649, 1650, 1651, 1652, 1653, 1654, 565, 1656, + 1657, 1658, 1659, 1660, 1661, -3502, -3502, 1664, + 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, + 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, + 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, + 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, + 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, + 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, + 1713, 1714, 1715, 1716, -1697, 1718, 1719, 1720, + 1721, -160, 1723, 1724, -160, 1726, 1727, -158, + 2052, -158, 1731, 1732, 1733, 2229, 1735, 1736, + 1737, 1738, 1739, 1740, -158, 1742, 1743, 1744, + 1745, -158, 1747, -158, 1749, -158, -158, -158, + -158, -158, -158, 1756, -158, -158, -158, -158, + 1761, -158, 1763, -158, 1765, -158, -2123, 1768, + 1769, -158, 1771, 1772, -158, 1774, -158, -158, + 1777, -158, 1779, 7243, 7244, 7245, 7246, -158, + 1785, -158, 1787, -1838, 5146, 5147, -18, -18, + -18, 3290, 5156, 5157, 3293, 3294, 3295, 3296, + 3297, 3298, 0, 0, 2209, 0, 3300, 0, + 0, 3301, 3302, -1861, -1861, 3305, 3306, 3307, + 0, -1865, -1865, 0, 0, 0, 0, 0, + 0, 0, 0, 1090, 0, 0, 0, 0, + 0, 0, 5164, 5165, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3414, 0, 0, 0, 0, 1882, 0, + 0, 1885, 0, 0, 1886, -323, 1888, 0, + 0, 0, -495, 0, 0, 0, 0, 0, + 0, 1899, 0, 0, 0, 0, 1904, 0, + 1906, 0, 1908, 1909, 1910, 1911, 1912, 1913, + 0, 1915, 1916, 1917, 1918, 0, 1920, 0, + 1922, 0, 1924, 3890, 0, 0, 1928, 0, + 0, 1931, 0, 1933, 1934, 0, 1936, 0, + -5463, -5463, -5463, -5463, 1942, 0, 1944, 0, + 0, 1947, 0, 1949, 1950, 0, 0, 0, + 0, 1955, 0, 0, 0, 0, 0, 0, + 0, 1963, 0, 0, 0, 0, 1968, 0, + 0, 0, 0, 1973, 0, 1975, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1986, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 527, 527, 527, 527, 0, + 528, 528, 528, 528, 528, 528, 528, 528, + 528, 528, 528, 1998, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 1999, 2000, 2001, 2002, 2003, 32767, 32767, 32767, + 32767, 32767, 2004, 32767, 2005, 2006, 2007, 2008, + 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, + 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, + 2025, 2026, 1200, 1200, 32767, 4498, 4499, 2291, + 2032, 2033, 32767, 2034, 32767, 2035, 2036, 32767, + 2037, 2038, 32767, 2039, 2040, 2041, 2042, 2043, + 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, + 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, + 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, + 2068, -1506, -1506, -1506, -1506, -1506, -1506, -1506, + 2076, -2490, 2078, 2079, 2080, 2081, 2082, 2083, + 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, + 2092, 2093, 2094, 2095, -105, -2314, 2098, 2099, + 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, + 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, + 2116, 2117, 2118, 2119, 2120, 5459, 2122, 2123, + 2124, 2125, 2126, 2127, 2128, 5466, 2130, 5467, + 2132, 2133, 2134, 2135, 5471, 2137, 2138, 5473, + 5473, 5473, 5473, 2143, 2144, 5475, 2146, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, + 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, + 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, + 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, + 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, + 2187, 2188, 2189, 2190, 2191, 32767, -726, 2293, + -725, -725, -725, 1357, 1358, 1359, -722, 2201, + 2202, 2203, 2204, 2205, 0, 2207, -715, 2209, + -714, -714, -714, 2213, 2214, 2215, 4806, 2217, + 2218, 2219, 2220, 2221, 2222, 2223, -710, 2225, + -248, -704, -704, 2229, 2230, 2231, 2232, 2233, + 2220, 1430, 2236, 2237, -695, 2239, -694, 2241, + -693, -693, 2244, 2245, 2246, 2231, 2231, 2249, + 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2239, + 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, + 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, + 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, + 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, + 2290, 2291, 2292, 2293, 3084, 2295, 2296, 2297, + 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, + 2306, 2307, 3983, 2309, 2310, 2292, 2312, 3992, + 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, + 2322, 2323, 2324, 2325, 2326, 2327, 2328, 4023, + 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, + 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, + 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, + 2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, + 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, + 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, + 2378, 2379, 2360, 2360, 2360, 2360, 2360, 2360, + 2360, 2360, 2360, 2389, 2390, 2391, 2392, 2393, + 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, + 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, + 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, + 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, + 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, + 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, + 2442, 2443, 2444, 2445, 2446, 2447, 32767, 2448, + 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, + 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, + 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, + 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, + 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, + 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, + 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, + 2505, 2506, 2507, 2508, 2509, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 2510, + 2511, 2512, 2513, 3266, 3266, 3266, 3266, 2518, + 3267, 3267, 3267, 2522, 3268, 3268, 3268, 3268, + 3268, 3268, 3268, 6682, 3268, 3268, 3268, 2534, + 5151, 3269, 2537, 2538, 3271, 3271, 5157, 2948, + 5159, 2544, 2545, 3273, 2778, 3273, 2549, 3274, + 2551, 3275, 2553, 5175, 2555, 3277, 3277, 3277, + 5181, 2560, 5184, 3278, 5186, 2564, 5189, 5190, + 5191, 5192, 3279, 5194, 5195, 2572, 5198, 32767, + 32767, 3278, 5200, 3278, 2577, 2578, 2579, 2580, + 5210, 3282, 3282, 5213, 3282, 2586, 2587, 2588, + 2589, 2590, 2591, -2175, -2175, -2175, 5230, 3288, + 5232, 3288, 6914, -69, -69, 5097, 5098, 5099, + 1792, -73, -73, 1792, 1792, 1792, 1792, 1792, + 1792, 5091, 5092, 2884, 5094, 1795, 5096, 5097, + 1797, 1797, 6961, 6962, 1797, 1797, 1797, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 2578, 2578, 2578, 2578, 2578, + 2578, 872, 872, 872, 872, 872, 872, 872, + 872, 872, 872, 8277, 6335, 8279, 6335, 9961, + 2978, 2978, 8144, 8145, 8146, 4839, 2974, 2974, + 4839, 4839, 4839, 4839, 4839, 4839, 8138, 8139, + 5931, 8141, 4842, 8143, 8144, 4844, 4844, 10008, + 10009, 4844, 4844, 4844, 8152, 10018, 10019, 8155, + 8156, 8157, 8158, 8159, 8160, 8161, 8162, 7073, + 8164, 5399, 5400, 5401, 5402, 5403, 0, 0, + 0, 5407, 0, 0, 5410, 5411, 5412, 8180, + 3383, 5415, 3914, 3915, 3916, 5419, 3918, 3919, + 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, + 3758, 3758, 3758, 3758, 3758, 3758, 3758, 3758, + 3758, 3758, 3758, 3758, 3758, 3758, 3758, 3758, + 3758, 3758, 3758, 3758, 3758, 3758, 3758, 3758, + 3758, 3758, 3758, 3758, 3758, 3758, 3758, 3758, + 3758, 3758, 3758, 3758, 3758, 3758, 3758, 3758, + 3758, 3758, 3758, 3758, 3758, 3758, 3758, 3758, + 3758, 3758, 7172, 3758, 3758, 3758, 3758, 5640, + 3758, 3758, 5643, 3758, 3758, 5644, 3435, 5646, + 3758, 3758, 3758, 3263, 3758, 3758, 3758, 3758, + 3758, 3758, 5657, 3758, 3758, 3758, 3758, 5662, + 3758, 5664, 3758, 5666, 5667, 5668, 5669, 5670, + 5671, 3758, 5673, 5674, 5675, 5676, 3758, 5678, + 3758, 5680, 3758, 5682, 7648, 3758, 3758, 5686, + 3758, 3758, 5689, 3758, 5691, 5692, 3758, -1707, + -1707, -1707, -1707, -1707, -1707, 5698, 3756, 5700, + 3756, 7382, 399, 399, 5565, 5566, 5567, 2260, + 395, 395, 2260, 2260, 2260, 2260, 2260, 2260, + 5559, 5560, 3352, 5562, 2263, 5564, 5565, 2265, + 2265, 7429, 7430, 2265, 2265, 2265, 5573, 7439, + 7440, 5576, 5577, 5578, 5579, 5580, 5581, 5582, + 5583, 4494, 5585, 2820, 2821, 2822, 2823, 2824, + -2579, -2579, -2579, 2828, -2579, -2579, 2831, 2832, + 2833, 5601, 804, 2836, 1335, 1336, 1337, 2840, + 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, + 1347, 1348, 1179, 1179, 1179, 1179, 1179, 1179, + 1179, 1179, 1179, 1179, 1179, 1179, 1179, 1179, + 1179, 1179, 1179, 1179, 1179, 1179, 1179, 1179, + 1179, 1179, 1179, 1179, 1179, 1179, 1179, 1179, + 1179, 1179, 1179, 1179, 1179, 1179, 1179, 1179, + 1179, 1179, 1179, 1179, 1179, 1179, 1179, 1179, + 1179, 1179, 1179, 1179, 4593, 1179, 1179, 1179, + 1179, 3061, 1179, 1179, 3064, 1179, 1179, 3065, + 856, 3067, 1179, 1179, 1179, 684, 1179, 1179, + 1179, 1179, 1179, 1179, 3078, 1179, 1179, 1179, + 1179, 3083, 1179, 3085, 1179, 3087, 3088, 3089, + 3090, 3091, 3092, 1179, 3094, 3095, 3096, 3097, + 1179, 3099, 1179, 3101, 1179, 3103, 5069, 1179, + 1179, 3107, 1179, 1179, 3110, 1179, 3112, 3113, + 1179, 3115, 1179, -4284, -4284, -4284, -4284, 3121, + 1179, 3123, 1179, 4805, -2178, -2178, 2988, 2989, + 2990, -317, -2182, -2182, -317, -317, -317, -317, + -317, -317, 2982, 2983, 775, 2985, -314, 2987, + 2988, -312, -312, 4852, 4853, -312, -312, -312, + 2996, 4862, 4863, 2999, 3000, 3001, 3002, 3003, + 3004, 3005, 3006, 1917, 3008, 3009, 3010, 3011, + 3012, 3013, -2150, -2150, 3016, 3017, 3018, 3019, + 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, + 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, + 32767, 32767, 32767, 3036, 3037, 3038, 3039, 3040, + 3041, 32767, 32767, 3042, 3043, 3044, 3045, 3046, + 3047, 32767, 32767, 3048, 3049, 3050, 3051, 3052, + 3053, 32767, 32767, 3054, 3055, 3056, 32767, 32767, + 32767, -357, 3058, 3059, 3060, 3061, 1180, 3063, + 0, 1179, 3065, 3066, 1181, 3391, 1181, 3070, + 0, 0, 0, 0, 32767, 0, 0, 32767, + 0, 32767, 0, 0, -4973, 32767, 32767, -7368, + -2202, -2201, -2200, -5507, -7372, -7372, -5507, -5507, + -5507, 32767, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 32767, 0, 0, 32767, 0, + -203, -2234, -732, -732, -732, -2234, -732, -732, + -2763, -1261, -1261, -1261, -2763, -1261, -1261, -1261, + -1261, -1261, -1261, -1261, -1261, -1261, -1261, -1091, + -1090, -1089, -1088, -1087, 32767, 32767, -1086, -1085, + -1084, -1083, -1082, -1081, -1080, -1079, -1078, -1077, + -1076, -1075, 32767, -1074, -1073, -1072, -1071, -1070, + -1069, -1068, -1067, -1066, -1065, -1064, -1063, -1062, + -1061, -1060, -1059, -1058, -1057, -1056, 32767, -1055, + -1054, -1053, -1052, 0, 32767, 32767, 32767, -1051, + -1050, -4463, 32767, -1048, 32767, -1047, -2928, -1045, + -1044, -2928, -1042, -1041, -2926, -716, -2926, -1037, + -1036, -1035, -539, -1033, -1032, -1031, -1030, -1029, + -1028, -2926, -1026, -1025, -1024, -1023, -2926, -1021, + -2926, -1019, -2926, -2926, -2926, -2926, -2926, -2926, + -1012, -2926, -2926, -2926, -2926, -1007, -2926, -1005, + -2926, -1003, -2926, -4891, -1000, -999, -2926, -997, + -996, -2926, -994, -2926, -2926, -991, 4475, 4476, + 4477, 4478, 4479, 4480, -2924, -981, -2924, -979, + -4604, 2380, 2381, -2784, -2784, -2784, 524, 2390, + 2391, 527, 528, 529, 530, 531, 532, -2766, + -2766, -557, -2766, 534, -2766, -2766, 535, 536, + -4627, -4627, 539, 540, 541, -2766, -4631, -4631, + -2766, -2766, -2766, -2766, -2766, -2766, -2766, -2766, + -1676, -2766, 0, 0, 0, 0, 0, 5404, + 5405, 5406, 0, 5408, 5409, 0, 0, 0, + -2767, 2031, 0, 1502, 1502, 1502, 0, 1502, + 1502, 1502, 1502, 1502, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 224, 225, 226, 32767, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 67, + 32767, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 32767, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, -271, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 1940, 18, 1942, 3908, 18, 18, 1946, 18, + 18, 1949, 18, 1951, 1952, 18, 1954, 18, + -5445, -5445, -5445, -5445, 1960, 18, 1962, 18, + 3644, -3339, -3339, 1827, 1828, 1829, -1478, -3343, + -3343, -1478, -1478, -1478, -1478, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 0, 0, + 0, 32767, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1340, 1341, + 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, + -2064, 1351, 1352, 1353, 1354, 32767, 1355, 1356, + 32767, 0, 32767, 32767, 1679, 32767, 1357, 1358, + 1359, 1855, 1361, 1362, 1363, 1364, 1365, 1366, + 32767, 1367, 1368, 1369, 1370, 32767, 1371, 32767, + 1372, 32767, 32767, 32767, 32767, 32767, 32767, 1373, + 32767, 32767, 32767, 32767, 1374, 32767, 1375, 32767, + 1376, 32767, -2513, 1378, 1379, 32767, 1380, 1381, + 32767, 1382, 32767, 32767, 1383, 32767, 1384, 32767, + 6848, 32767, 6849, 32767, 1387, 32767, 1388, 1389, + 32767, 1390, 32767, 32767, 1391, 1392, 1393, 1394, + 32767, 1395, 1396, 1397, 1398, 1399, 1400, 1401, + 32767, 1402, 1403, 1404, 1405, 32767, 1406, 1407, + 1408, 1409, 32767, 1410, 32767, 1411, 1412, 1413, + 1414, 1415, 1416, 1417, 1418, 1419, 1420, 32767, + 1421, 1422, 1423, 1424, 1425, 0, 0, 0, + 4575, 4576, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, -571, -571, + -571, 0, -572, 8101, 5150, 5150, 5150, 5337, + 8103, 5150, 8104, 8105, 2038, 5145, 2039, 5150, + 2040, 5150, 2041, 5150, 5150, 5138, 5150, 5150, + 5150, 5150, 5150, 8108, 8109, 8110, 5150, 5150, + 5384, 5150, 0, 5151, 5151, 5151, 5535, 5151, + 5151, 5151, 5537, 5538, 5539, 5540, 5541, 5542, + 5543, 5544, 5545, 5546, 5547, 5151, 5151, 2572, + 2572, 2572, 2572, 2572, 2572, 2572, 2572, 2572, + 2572, 2572, 2572, 2572, 2572, 2572, 2572, 2572, + 2572, 2572, 2572, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2580, + 2580, 2580, 2580, 2580, 2580, 2580, 0, 2580, + 0, 2580, 0, 0, 0, 2580, 2580, 2580, + 0, 2580, 2580, 2580, 2580, 2580, 2580, 2580, + 0, 2580, 0, 0, 0, 2580, 2580, 2580, + 5517, 5518, 5519, 5520, 5521, 5522, -6, -6, + -6, 0, 0, 0, 0, 2581, 2581, 2581, + 2581, 5527, 2581, 2581, 2581, 2581, 0, 5528, + 0, 2581, 0, 0, 2581, 2581, 0, 0, + 0, 0, 0, 0, 5529, 5530, 5531, 32767, + 32767, 2579, 2579, 2579, 2579, 2579, 0, 0, + 2579, 2579, 2579, 2765, 0, 0, 0, 0, + 2579, 2579, 2579, 2579, 6066, 2579, 6066, 2579, + 2579, 2579, 0, 0, 0, 2579, 2579, 0, + 0, 0, 2579, 2579, 2579, 5530, 2579, 2579, + 2579, 2766, 5532, 2579, 5533, 5534, -533, 2574, + -532, 2579, -531, 2579, -530, 2579, 2579, 2567, + 2579, 2579, 2579, 2579, 2579, 5537, 5538, 5539, + 2579, 2579, 2813, 2579, 2579, 2579, 2579, 2579, + 2963, 2579, 2579, 2579, 2965, 2966, 2967, 2968, + 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2579, + 2579, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 32767, 32767, 32767, + 32767, 32767, 331, 32767, 332, -2580, -2580, -2580, + -2580, 0, 0, 0, 0, 0, 0, 0, + -2580, 0, -2580, 0, -2580, -2580, -2580, 0, + 0, 0, -2580, 0, 0, 0, 0, 0, + 0, 0, -2580, 0, -2580, -2580, -2580, 0, + 0, 0, 2937, 2938, 2939, 2940, 2941, 2942, + -2586, -2586, -2586, -2580, -2125, -2581, -2581, 0, + 0, 0, 0, 2946, 0, 0, 0, 0, + -2581, 2947, -2581, 0, -2581, -2581, 0, 0, + -2581, -2581, -2581, -2581, -2581, -2581, 2948, 2949, + 2950, 2945, 2491, 0, 0, 0, 0, 0, + -2579, -2579, 0, 0, 0, 186, -2579, -2579, + -2579, -2579, 0, 0, 0, 0, 3487, 0, + 3487, 0, 0, 0, -2579, -2579, -2579, 0, + 0, -2579, -2579, -2579, 0, 0, 0, 2951, + 0, 0, 0, 187, 2953, 0, 2954, 2955, + -3112, -5, -3111, 0, -3110, 0, -3109, 0, + 0, -12, 0, 0, 0, 0, 0, 2958, + 2959, 2960, 0, 0, 234, 0, 0, 0, + 0, 0, 384, 0, 0, 0, 386, 387, + 388, 389, 390, 391, 392, 393, 394, 395, + 396, 0, 0, 0, 0, 0, 0, 0, + 0, 0, -1706, -1706, -1706, 0, 0, 0, + 0, 385, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 397, + 398, 399, 400, 401, 402, 403, 404, 405, + 2112, 2113, 2114, 409, 410, 411, 412, 32767, + 413, 414, 415, 416, 417, 418, 419, 420, + 421, 422, 423, 424, 425, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + -1688, 32767, 32767, 32767, 32767, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 0, 0, 0, + 0, -752, -751, -750, -749, 0, -748, -747, + -746, 0, -745, -744, -743, -742, -741, -740, + -739, -4152, -737, -736, -735, 0, -2616, -733, + 0, 0, -732, -731, -2616, -406, -2616, 0, + 0, -727, -231, -725, 0, -724, 0, -723, + 0, -2621, 0, -721, -720, -719, -2622, 0, + -2623, -716, -2623, 0, -2624, -2624, -2624, -2624, + -710, -2624, -2624, 0, -2625, -706, -2625, -704, + -2625, -702, 0, 0, 0, 0, -2629, -700, + -699, -2629, -697, 0, 0, 0, 0, 0, + 0, 4767, 4768, 4769, -2635, -692, -2635, -690, + -4315, 2669, 2670, -2495, -2495, -2495, 813, 2679, + 2680, 816, 817, 818, 819, 820, 821, -2477, + -2477, -268, -2477, 823, -2477, -2477, 824, 825, + -4338, -4338, 828, 829, 830, -2477, -4342, -4342, + -2477, -2477, -2477, -2477, -2477, -2477, -2477, -2477, + -1387, 0, 0, 32767, 32767, 0, 0, 0, + 0, 0, -2486, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 1756, 1757, 1758, + 1759, -5645, -3702, -5645, -3700, -7325, -341, -340, + -5505, -5505, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 532, 533, + 32767, 534, 535, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, -781, 1084, 1084, 1084, 1084, + 1084, 1084, 4383, 4384, 2176, 4386, 1087, 4388, + 4389, 1089, 1089, 6253, 6254, 1089, 1089, 1089, + 4397, 6263, 6264, 4400, 4401, 4402, 4403, 4404, + 4405, 4406, 4407, 3318, 4409, 4410, 4411, 4412, + 4413, 4414, -749, -749, 4417, 4418, 4419, 4420, + 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, + 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, + 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, + 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, + 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, + 4461, 4462, 4463, 4464, 4465, 4466, 4467, 4468, + 4469, 1056, 4471, 4472, 4473, 4474, 2593, 4476, + 4477, 2593, 4479, 4480, 2595, 4805, 2595, 4484, + 4485, 4486, 4982, 4488, 4489, 4490, 4491, 4492, + 4493, 2595, 4495, 4496, 4497, 4498, 2595, 4500, + 2595, 4502, 2595, 2595, 2595, 2595, 2595, 2595, + 4509, 2595, 2595, 2595, 2595, 4514, 2595, 4516, + 2595, 4518, 2595, 630, 4521, 4522, 2595, 4524, + 4525, 2595, 4527, 2595, 2595, 4530, 2595, 4532, + 9996, 9997, 9998, 9999, 2595, 4538, 2595, 4540, + 4541, 2595, 4543, 2595, 2595, 4546, 4547, 4548, + 4549, 2595, 4551, 4552, 4553, 4554, 4555, 4556, + 4557, 2595, 4559, 4560, 4561, 4562, 2595, 4564, + 4565, 4566, 4567, 2595, 4569, 2595, 4571, 4572, + 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, + 2595, 4582, 4583, 4584, 4585, 4586, 4587, 4588, + 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, + 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, + 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, + 4613, 4614, 4615, 4089, 4090, 4091, 4092, 4620, + 4093, 4094, 4095, 4096, 4097, 4098, 4099, 4100, + 4101, 4102, 4103, 4104, 2765, 2765, 2765, 2765, + 2765, 2765, 2765, 2765, 2765, 2765, 6179, 2765, + 2765, 2765, 2765, 4647, 2765, 2765, 4650, 4122, + 4652, 4653, 2444, 4655, 2767, 2767, 2767, 2272, + 2767, 2767, 2767, 2767, 2767, 2767, 4666, 2767, + 2767, 2767, 2767, 4671, 2767, 4673, 2767, 4675, + 4676, 4677, 4678, 4679, 4680, 2767, 4682, 4683, + 4684, 4685, 2767, 4687, 2767, 4689, 2767, 4691, + 6657, 2767, 2767, 4695, 2767, 2767, 4698, 2767, + 4700, 4701, 2767, 4703, 2767, -2696, -2696, -2696, + -2696, 4709, 2767, 4711, 2767, 2767, 4714, 2767, + 4716, 4717, 2767, 2767, 2767, 2767, 4722, 2767, + 2767, 2767, 2767, 2767, 2767, 2767, 4730, 2767, + 2767, 2767, 2767, 4735, 2767, 2767, 2767, 2767, + 4740, 2767, 4742, 2767, 2767, 2767, 2767, 2767, + 2767, 2767, 2767, 2767, 2767, 4753, 2767, 2767, + 2767, 2767, 2767, 4193, 4194, 4195, -379, -379, + 4198, 4199, 4200, 4201, 4202, 4203, 4204, 4771, + 4772, 4773, 4774, 4775, 4776, 4777, 4778, 4779, + 4780, -3892, -940, -939, -938, 4785, -3890, -936, + -3889, -3889, 2179, -927, 2180, -930, 2181, -928, + 2182, -926, -925, -912, -923, -922, -921, 4803, + 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, + 4812, 4813, 4814, 4815, 4816, 4817, 4818, 4819, + 3925, 4821, 4822, 4823, 4824, 4825, 4826, 4827, + 4828, 4829, 4830, 4831, 4832, 4833, 4834, 4835, + 4836, 4837, 4838, 4839, 4840, 4841, 4842, 4843, + 4844, 4845, 4846, 4847, 4848, 4849, 4850, 4851, + 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, + 4860, 4710, 2086, 609, 4864, 4865, 4866, 4867, + 4868, 4869, 4870, 4871, 4872, 3052, 4874, 4875, + 4876, 4281, 4878, 4879, 4880, 4881, 4882, 4883, + 4884, 4885, 4886, 4887, 634, 4888, 4889, 4890, + 4891, 4892, 4893, 4894, 4895, 4896, 1322, 1322, + 1322, 1322, 1322, 1322, 1322, 4904, 338, 4906, + 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, + 4915, 4916, 4917, 665, 666, 667, 668, 669, + 670, 671, 672, 673, 674, 675, 676, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 32767, + 32767, 32767, 32767, 32767, 32767, 32767, 32767, 0, + 0, 0, 0, 0, 0, 32767, 0, 0, + 0, 0, 32767, 32767, 0, 0, 0, 0, + 0, 0, 0, 0, 32767, 0, 0, 0, + 0, 0, 0, 0, 32767, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 32767, 0, 0, 0, 2478, 32767, 2477, + 2477, 2477, 2477, 2477, 32767, 2476, 32767, 32767, + 32767, 2473, 2473, 2473, 2473, 2473, 2473, 2473, + 32767, 2472, 2472, 2472, 2472, 2472, 2472, 2472, + 2472, 2472, 2472, 2472, 2472, 2472, 2472, 2472, + 2472, 2472, 2472, 2472, 2472, 2472, 2472, 2472, + 2472, 2472, 2472, 2472, 2472, 2472, 2472, 2472, + 2472, 2472, 2472, 2472, 2472, 2472, 2472, 2472, + 2472, 2472, 2472, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, -2478, -2478, -2478, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + }; + + const unsigned char *k = (const unsigned char *) key; + size_t keylen = 4; + uint32 a = 0; + uint32 b = 1; + + while (keylen--) + { + unsigned char c = *k++; + + a = a * 257 + c; + b = b * 8191 + c; + } + return h[a % 9837] + h[b % 9837]; +} + +/* Hash lookup information for NFKC_QC */ +static const pg_unicode_norminfo UnicodeNormInfo_NFKC_QC = { + UnicodeNormProps_NFKC_QC, + NFKC_QC_hash_func, + 4918 +}; diff --git a/src/include/executor/execExpr.h b/src/include/executor/execExpr.h index dbe8649a5763f..b792de1bc95e8 100644 --- a/src/include/executor/execExpr.h +++ b/src/include/executor/execExpr.h @@ -218,7 +218,6 @@ typedef enum ExprEvalOp EEOP_GROUPING_FUNC, EEOP_WINDOW_FUNC, EEOP_SUBPLAN, - EEOP_ALTERNATIVE_SUBPLAN, /* aggregation related nodes */ EEOP_AGG_STRICT_DESERIALIZE, @@ -589,13 +588,6 @@ typedef struct ExprEvalStep SubPlanState *sstate; } subplan; - /* for EEOP_ALTERNATIVE_SUBPLAN */ - struct - { - /* out-of-line state, created by nodeSubplan.c */ - AlternativeSubPlanState *asstate; - } alternative_subplan; - /* for EEOP_AGG_*DESERIALIZE */ struct { @@ -734,8 +726,6 @@ extern void ExecEvalXmlExpr(ExprState *state, ExprEvalStep *op); extern void ExecEvalGroupingFunc(ExprState *state, ExprEvalStep *op); extern void ExecEvalSubPlan(ExprState *state, ExprEvalStep *op, ExprContext *econtext); -extern void ExecEvalAlternativeSubPlan(ExprState *state, ExprEvalStep *op, - ExprContext *econtext); extern void ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext); extern void ExecEvalSysVar(ExprState *state, ExprEvalStep *op, diff --git a/src/include/executor/execPartition.h b/src/include/executor/execPartition.h index 6d1b722198729..473c4cd84fc64 100644 --- a/src/include/executor/execPartition.h +++ b/src/include/executor/execPartition.h @@ -22,33 +22,6 @@ typedef struct PartitionDispatchData *PartitionDispatch; typedef struct PartitionTupleRouting PartitionTupleRouting; -/* - * PartitionRoutingInfo - * - * Additional result relation information specific to routing tuples to a - * table partition. - */ -typedef struct PartitionRoutingInfo -{ - /* - * Map for converting tuples in root partitioned table format into - * partition format, or NULL if no conversion is required. - */ - TupleConversionMap *pi_RootToPartitionMap; - - /* - * Map for converting tuples in partition format into the root partitioned - * table format, or NULL if no conversion is required. - */ - TupleConversionMap *pi_PartitionToRootMap; - - /* - * Slot to store tuples in partition format, or NULL when no translation - * is required between root and partition. - */ - TupleTableSlot *pi_PartitionTupleSlot; -} PartitionRoutingInfo; - /* * PartitionedRelPruningData - Per-partitioned-table data for run-time pruning * of partitions. For a multilevel partitioned table, we have one of these diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index 415e117407c96..b7978cd22ebc6 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -191,7 +191,6 @@ extern void InitResultRelInfo(ResultRelInfo *resultRelInfo, Relation partition_root, int instrument_options); extern ResultRelInfo *ExecGetTriggerResultRel(EState *estate, Oid relid); -extern void ExecCleanUpTriggerState(EState *estate); extern void ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate); extern bool ExecPartitionCheck(ResultRelInfo *resultRelInfo, @@ -538,6 +537,8 @@ extern bool ExecRelationIsTargetRelation(EState *estate, Index scanrelid); extern Relation ExecOpenScanRelation(EState *estate, Index scanrelid, int eflags); extern void ExecInitRangeTable(EState *estate, List *rangeTable); +extern void ExecCloseRangeTableRelations(EState *estate); +extern void ExecCloseResultRelations(EState *estate); static inline RangeTblEntry * exec_rt_fetch(Index rti, EState *estate) @@ -546,6 +547,8 @@ exec_rt_fetch(Index rti, EState *estate) } extern Relation ExecGetRangeTableRelation(EState *estate, Index rti); +extern void ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo, + Index rti); extern int executor_errposition(EState *estate, int location); @@ -573,10 +576,14 @@ extern TupleTableSlot *ExecGetReturningSlot(EState *estate, ResultRelInfo *relIn */ extern void ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative); extern void ExecCloseIndices(ResultRelInfo *resultRelInfo); -extern List *ExecInsertIndexTuples(TupleTableSlot *slot, EState *estate, bool noDupErr, +extern List *ExecInsertIndexTuples(ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, EState *estate, + bool noDupErr, bool *specConflict, List *arbiterIndexes); -extern bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate, - ItemPointer conflictTid, List *arbiterIndexes); +extern bool ExecCheckIndexConstraints(ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, + EState *estate, ItemPointer conflictTid, + List *arbiterIndexes); extern void check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo, ItemPointer tupleid, @@ -593,10 +600,13 @@ extern bool RelationFindReplTupleByIndex(Relation rel, Oid idxoid, extern bool RelationFindReplTupleSeq(Relation rel, LockTupleMode lockmode, TupleTableSlot *searchslot, TupleTableSlot *outslot); -extern void ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot); -extern void ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate, +extern void ExecSimpleRelationInsert(ResultRelInfo *resultRelInfo, + EState *estate, TupleTableSlot *slot); +extern void ExecSimpleRelationUpdate(ResultRelInfo *resultRelInfo, + EState *estate, EPQState *epqstate, TupleTableSlot *searchslot, TupleTableSlot *slot); -extern void ExecSimpleRelationDelete(EState *estate, EPQState *epqstate, +extern void ExecSimpleRelationDelete(ResultRelInfo *resultRelInfo, + EState *estate, EPQState *epqstate, TupleTableSlot *searchslot); extern void CheckCmdReplicaIdentity(Relation rel, CmdType cmd); diff --git a/src/include/executor/functions.h b/src/include/executor/functions.h index cb13428a5a884..a0db24bde699f 100644 --- a/src/include/executor/functions.h +++ b/src/include/executor/functions.h @@ -29,9 +29,9 @@ extern SQLFunctionParseInfoPtr prepare_sql_fn_parse_info(HeapTuple procedureTupl extern void sql_fn_parser_setup(struct ParseState *pstate, SQLFunctionParseInfoPtr pinfo); -extern void check_sql_fn_statements(List *queryTreeList); +extern void check_sql_fn_statements(List *queryTreeLists); -extern bool check_sql_fn_retval(List *queryTreeList, +extern bool check_sql_fn_retval(List *queryTreeLists, Oid rettype, TupleDesc rettupdesc, bool insertDroppedCols, List **resultTargetList); diff --git a/src/include/executor/nodeModifyTable.h b/src/include/executor/nodeModifyTable.h index 4ec4ebdabc13e..46a2dc9511889 100644 --- a/src/include/executor/nodeModifyTable.h +++ b/src/include/executor/nodeModifyTable.h @@ -15,7 +15,9 @@ #include "nodes/execnodes.h" -extern void ExecComputeStoredGenerated(EState *estate, TupleTableSlot *slot, CmdType cmdtype); +extern void ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo, + EState *estate, TupleTableSlot *slot, + CmdType cmdtype); extern ModifyTableState *ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags); extern void ExecEndModifyTable(ModifyTableState *node); diff --git a/src/include/executor/nodeSubplan.h b/src/include/executor/nodeSubplan.h index 83e90b3d07b94..b629af1f5fb92 100644 --- a/src/include/executor/nodeSubplan.h +++ b/src/include/executor/nodeSubplan.h @@ -18,12 +18,8 @@ extern SubPlanState *ExecInitSubPlan(SubPlan *subplan, PlanState *parent); -extern AlternativeSubPlanState *ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent); - extern Datum ExecSubPlan(SubPlanState *node, ExprContext *econtext, bool *isNull); -extern Datum ExecAlternativeSubPlan(AlternativeSubPlanState *node, ExprContext *econtext, bool *isNull); - extern void ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent); extern void ExecSetParamPlan(SubPlanState *node, ExprContext *econtext); diff --git a/src/include/funcapi.h b/src/include/funcapi.h index b047acdc1a85f..2f46442087e2a 100644 --- a/src/include/funcapi.h +++ b/src/include/funcapi.h @@ -172,7 +172,8 @@ extern int get_func_arg_info(HeapTuple procTup, Oid **p_argtypes, char ***p_argnames, char **p_argmodes); -extern int get_func_input_arg_names(Datum proargnames, Datum proargmodes, +extern int get_func_input_arg_names(char prokind, + Datum proargnames, Datum proargmodes, char ***arg_names); extern int get_func_trftypes(HeapTuple procTup, Oid **p_trftypes); diff --git a/src/include/jit/llvmjit.h b/src/include/jit/llvmjit.h index 706906c1cc833..325409acd5c57 100644 --- a/src/include/jit/llvmjit.h +++ b/src/include/jit/llvmjit.h @@ -129,6 +129,8 @@ extern char *LLVMGetHostCPUName(void); extern char *LLVMGetHostCPUFeatures(void); #endif +extern unsigned LLVMGetAttributeCountAtIndexPG(LLVMValueRef F, uint32 Idx); + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 0b42dd6f94410..6c0a7d68d6152 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -33,7 +33,6 @@ #include "utils/tuplestore.h" struct PlanState; /* forward references in this file */ -struct PartitionRoutingInfo; struct ParallelHashJoinState; struct ExecRowMark; struct ExprState; @@ -477,19 +476,29 @@ typedef struct ResultRelInfo /* ON CONFLICT evaluation state */ OnConflictSetState *ri_onConflict; - /* partition check expression */ - List *ri_PartitionCheck; - - /* partition check expression state */ + /* partition check expression state (NULL if not set up yet) */ ExprState *ri_PartitionCheckExpr; - /* relation descriptor for root partitioned table */ + /* + * Information needed by tuple routing target relations + * + * PartitionRoot gives the target relation mentioned in the query. + * RootToPartitionMap and PartitionTupleSlot, initialized by + * ExecInitRoutingInfo, are non-NULL if partition has a different tuple + * format than the root table. + */ Relation ri_PartitionRoot; + TupleConversionMap *ri_RootToPartitionMap; + TupleTableSlot *ri_PartitionTupleSlot; - /* Additional information specific to partition tuple routing */ - struct PartitionRoutingInfo *ri_PartitionInfo; + /* + * Map to convert child result relation tuples to the format of the table + * actually mentioned in the query (called "root"). Set only if + * transition tuple capture or update partition row movement is active. + */ + TupleConversionMap *ri_ChildToRootMap; - /* For use by copy.c when performing multi-inserts */ + /* for use by copy.c when performing multi-inserts */ struct CopyMultiInsertBuffer *ri_CopyMultiInsertBuffer; } ResultRelInfo; @@ -522,23 +531,18 @@ typedef struct EState CommandId es_output_cid; /* Info about target table(s) for insert/update/delete queries: */ - ResultRelInfo *es_result_relations; /* array of ResultRelInfos */ - int es_num_result_relations; /* length of array */ - ResultRelInfo *es_result_relation_info; /* currently active array elt */ + ResultRelInfo **es_result_relations; /* Array of per-range-table-entry + * ResultRelInfo pointers, or NULL + * if not a target table */ + List *es_opened_result_relations; /* List of non-NULL entries in + * es_result_relations in no + * specific order */ - /* - * Info about the partition root table(s) for insert/update/delete queries - * targeting partitioned tables. Only leaf partitions are mentioned in - * es_result_relations, but we need access to the roots for firing - * triggers and for runtime tuple routing. - */ - ResultRelInfo *es_root_result_relations; /* array of ResultRelInfos */ - int es_num_root_result_relations; /* length of the array */ PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ /* * The following list contains ResultRelInfos created by the tuple routing - * code for partitions that don't already have one. + * code for partitions that aren't found in the es_result_relations array. */ List *es_tuple_routing_result_relations; @@ -880,18 +884,6 @@ typedef struct SubPlanState ExprState *cur_eq_comp; /* equality comparator for LHS vs. table */ } SubPlanState; -/* ---------------- - * AlternativeSubPlanState node - * ---------------- - */ -typedef struct AlternativeSubPlanState -{ - NodeTag type; - AlternativeSubPlan *subplan; /* expression plan node */ - List *subplans; /* SubPlanStates of alternative subplans */ - int active; /* list index of the one we're using */ -} AlternativeSubPlanState; - /* * DomainConstraintState - one item to check during CoerceToDomain * @@ -1174,8 +1166,13 @@ typedef struct ModifyTableState TupleTableSlot **mt_scans; /* input tuple corresponding to underlying * plans */ ResultRelInfo *resultRelInfo; /* per-subplan target relations */ - ResultRelInfo *rootResultRelInfo; /* root target relation (partitioned - * table root) */ + + /* + * Target relation mentioned in the original statement, used to fire + * statement-level triggers and as the root for tuple routing. + */ + ResultRelInfo *rootResultRelInfo; + List **mt_arowmarks; /* per-subplan ExecAuxRowMark lists */ EPQState mt_epqstate; /* for evaluating EvalPlanQual rechecks */ bool fireBSTriggers; /* do we need to fire stmt triggers? */ @@ -1194,9 +1191,6 @@ typedef struct ModifyTableState /* controls transition table population for INSERT...ON CONFLICT UPDATE */ struct TransitionCaptureState *mt_oc_transition_capture; - - /* Per plan map for tuple conversion from child to root */ - TupleConversionMap **mt_per_subplan_tupconv_maps; } ModifyTableState; /* ---------------- @@ -1796,6 +1790,7 @@ typedef struct ForeignScanState ScanState ss; /* its first field is NodeTag */ ExprState *fdw_recheck_quals; /* original quals not in ss.ps.qual */ Size pscan_len; /* size of parallel coordination information */ + ResultRelInfo *resultRelInfo; /* result rel info, if UPDATE or DELETE */ /* use struct pointer to avoid including fdwapi.h here */ struct FdwRoutine *fdwroutine; void *fdw_state; /* foreign-data wrapper can keep state here */ diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 381d84b4e4f85..7ddd8c011bfc3 100644 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -213,7 +213,6 @@ typedef enum NodeTag T_WindowFuncExprState, T_SetExprState, T_SubPlanState, - T_AlternativeSubPlanState, T_DomainConstraintState, /* diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index e83329fd6d10a..60c2f45466049 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -318,6 +318,7 @@ typedef struct CollateClause typedef enum RoleSpecType { ROLESPEC_CSTRING, /* role name is stored as a C string */ + ROLESPEC_CURRENT_ROLE, /* role spec is CURRENT_ROLE */ ROLESPEC_CURRENT_USER, /* role spec is CURRENT_USER */ ROLESPEC_SESSION_USER, /* role spec is SESSION_USER */ ROLESPEC_PUBLIC /* role name is "public" */ diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index 485d1b06c910f..3dd16b9ad534f 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -120,8 +120,6 @@ typedef struct PlannerGlobal List *resultRelations; /* "flat" list of integer RT indexes */ - List *rootResultRelations; /* "flat" list of integer RT indexes */ - List *appendRelations; /* "flat" list of AppendRelInfos */ List *relationOids; /* OIDs of relations the plan depends on */ @@ -347,6 +345,7 @@ struct PlannerInfo bool hasHavingQual; /* true if havingQual was non-null */ bool hasPseudoConstantQuals; /* true if any RestrictInfo has * pseudoconstant = true */ + bool hasAlternativeSubPlans; /* true if we've made any of those */ bool hasRecursion; /* true if planning a recursive WITH item */ /* These fields are used only when hasRecursion is true: */ diff --git a/src/include/nodes/pg_list.h b/src/include/nodes/pg_list.h index 14ea2766adb30..ec231010ce4bd 100644 --- a/src/include/nodes/pg_list.h +++ b/src/include/nodes/pg_list.h @@ -144,26 +144,6 @@ list_second_cell(const List *l) return NULL; } -/* Fetch address of list's third cell, if it has one, else NULL */ -static inline ListCell * -list_third_cell(const List *l) -{ - if (l && l->length >= 3) - return &l->elements[2]; - else - return NULL; -} - -/* Fetch address of list's fourth cell, if it has one, else NULL */ -static inline ListCell * -list_fourth_cell(const List *l) -{ - if (l && l->length >= 4) - return &l->elements[3]; - else - return NULL; -} - /* Fetch list's length */ static inline int list_length(const List *l) @@ -186,35 +166,34 @@ list_length(const List *l) * linitial() than lfirst(): given a List, lsecond() returns the data * in the second list cell. */ - #define lfirst(lc) ((lc)->ptr_value) #define lfirst_int(lc) ((lc)->int_value) #define lfirst_oid(lc) ((lc)->oid_value) #define lfirst_node(type,lc) castNode(type, lfirst(lc)) -#define linitial(l) lfirst(list_head(l)) -#define linitial_int(l) lfirst_int(list_head(l)) -#define linitial_oid(l) lfirst_oid(list_head(l)) +#define linitial(l) lfirst(list_nth_cell(l, 0)) +#define linitial_int(l) lfirst_int(list_nth_cell(l, 0)) +#define linitial_oid(l) lfirst_oid(list_nth_cell(l, 0)) #define linitial_node(type,l) castNode(type, linitial(l)) -#define lsecond(l) lfirst(list_second_cell(l)) -#define lsecond_int(l) lfirst_int(list_second_cell(l)) -#define lsecond_oid(l) lfirst_oid(list_second_cell(l)) +#define lsecond(l) lfirst(list_nth_cell(l, 1)) +#define lsecond_int(l) lfirst_int(list_nth_cell(l, 1)) +#define lsecond_oid(l) lfirst_oid(list_nth_cell(l, 1)) #define lsecond_node(type,l) castNode(type, lsecond(l)) -#define lthird(l) lfirst(list_third_cell(l)) -#define lthird_int(l) lfirst_int(list_third_cell(l)) -#define lthird_oid(l) lfirst_oid(list_third_cell(l)) +#define lthird(l) lfirst(list_nth_cell(l, 2)) +#define lthird_int(l) lfirst_int(list_nth_cell(l, 2)) +#define lthird_oid(l) lfirst_oid(list_nth_cell(l, 2)) #define lthird_node(type,l) castNode(type, lthird(l)) -#define lfourth(l) lfirst(list_fourth_cell(l)) -#define lfourth_int(l) lfirst_int(list_fourth_cell(l)) -#define lfourth_oid(l) lfirst_oid(list_fourth_cell(l)) +#define lfourth(l) lfirst(list_nth_cell(l, 3)) +#define lfourth_int(l) lfirst_int(list_nth_cell(l, 3)) +#define lfourth_oid(l) lfirst_oid(list_nth_cell(l, 3)) #define lfourth_node(type,l) castNode(type, lfourth(l)) -#define llast(l) lfirst(list_tail(l)) -#define llast_int(l) lfirst_int(list_tail(l)) -#define llast_oid(l) lfirst_oid(list_tail(l)) +#define llast(l) lfirst(list_last_cell(l)) +#define llast_int(l) lfirst_int(list_last_cell(l)) +#define llast_oid(l) lfirst_oid(list_last_cell(l)) #define llast_node(type,l) castNode(type, llast(l)) /* @@ -269,6 +248,16 @@ list_nth_cell(const List *list, int n) return &list->elements[n]; } +/* + * Return the last cell in a non-NIL List. + */ +static inline ListCell * +list_last_cell(const List *list) +{ + Assert(list != NIL); + return &list->elements[list->length - 1]; +} + /* * Return the pointer value contained in the n'th element of the * specified list. (List elements begin at 0.) @@ -380,6 +369,32 @@ lnext(const List *l, const ListCell *c) */ #define foreach_current_index(cell) (cell##__state.i) +/* + * for_each_from - + * Like foreach(), but start from the N'th (zero-based) list element, + * not necessarily the first one. + * + * It's okay for N to exceed the list length, but not for it to be negative. + * + * The caveats for foreach() apply equally here. + */ +#define for_each_from(cell, lst, N) \ + for (ForEachState cell##__state = for_each_from_setup(lst, N); \ + (cell##__state.l != NIL && \ + cell##__state.i < cell##__state.l->length) ? \ + (cell = &cell##__state.l->elements[cell##__state.i], true) : \ + (cell = NULL, false); \ + cell##__state.i++) + +static inline ForEachState +for_each_from_setup(const List *lst, int N) +{ + ForEachState r = {lst, N}; + + Assert(N >= 0); + return r; +} + /* * for_each_cell - * a convenience macro which loops through a list starting from a @@ -396,7 +411,7 @@ lnext(const List *l, const ListCell *c) cell##__state.i++) static inline ForEachState -for_each_cell_setup(List *lst, ListCell *initcell) +for_each_cell_setup(const List *lst, const ListCell *initcell) { ForEachState r = {lst, initcell ? list_cell_number(lst, initcell) : list_length(lst)}; @@ -447,8 +462,8 @@ for_each_cell_setup(List *lst, ListCell *initcell) cell1##__state.i1++, cell1##__state.i2++) static inline ForBothCellState -for_both_cell_setup(List *list1, ListCell *initcell1, - List *list2, ListCell *initcell2) +for_both_cell_setup(const List *list1, const ListCell *initcell1, + const List *list2, const ListCell *initcell2) { ForBothCellState r = {list1, list2, initcell1 ? list_cell_number(list1, initcell1) : list_length(list1), diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 83e01074ed1d2..7e6b10f86b978 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -68,12 +68,6 @@ typedef struct PlannedStmt /* rtable indexes of target relations for INSERT/UPDATE/DELETE */ List *resultRelations; /* integer list of RT indexes, or NIL */ - /* - * rtable indexes of partitioned table roots that are UPDATE/DELETE - * targets; needed for trigger firing. - */ - List *rootResultRelations; - List *appendRelations; /* list of AppendRelInfo nodes */ List *subplans; /* Plan trees for SubPlan expressions; note @@ -224,8 +218,6 @@ typedef struct ModifyTable Index rootRelation; /* Root RT index, if target is partitioned */ bool partColsUpdated; /* some part key in hierarchy updated */ List *resultRelations; /* integer list of RT indexes */ - int resultRelIndex; /* index of first resultRel in plan's list */ - int rootResultRelIndex; /* index of the partitioned table root */ List *plans; /* plan(s) producing source data */ List *withCheckOptionLists; /* per-target-table WCO lists */ List *returningLists; /* per-target-table RETURNING tlists */ @@ -607,12 +599,20 @@ typedef struct WorkTableScan * When the plan node represents a foreign join, scan.scanrelid is zero and * fs_relids must be consulted to identify the join relation. (fs_relids * is valid for simple scans as well, but will always match scan.scanrelid.) + * + * If the FDW's PlanDirectModify() callback decides to repurpose a ForeignScan + * node to perform the UPDATE or DELETE operation directly in the remote + * server, it sets 'operation' and 'resultRelation' to identify the operation + * type and target relation. Note that these fields are only set if the + * modification is performed *fully* remotely; otherwise, the modification is + * driven by a local ModifyTable node and 'operation' is left to CMD_SELECT. * ---------------- */ typedef struct ForeignScan { Scan scan; CmdType operation; /* SELECT/INSERT/UPDATE/DELETE */ + Index resultRelation; /* direct modification target's RT index */ Oid fs_server; /* OID of foreign server */ List *fdw_exprs; /* expressions that FDW may evaluate */ List *fdw_private; /* private data for FDW */ diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index d73be2ad46cc9..fd65ee8f9c59f 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -736,6 +736,9 @@ typedef struct SubPlan /* * AlternativeSubPlan - expression node for a choice among SubPlans * + * This is used only transiently during planning: by the time the plan + * reaches the executor, all AlternativeSubPlan nodes have been removed. + * * The subplans are given as a List so that the node definition need not * change if there's ever more than two alternatives. For the moment, * though, there are always exactly two; and the first one is the fast-start diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h index 08f22ce211ddb..71dcdf28894d1 100644 --- a/src/include/parser/kwlist.h +++ b/src/include/parser/kwlist.h @@ -19,459 +19,459 @@ /* there is deliberately not an #ifndef KWLIST_H here */ /* - * List of keyword (name, token-value, category) entries. + * List of keyword (name, token-value, category, bare-label-status) entries. * * Note: gen_keywordlist.pl requires the entries to appear in ASCII order. */ -/* name, value, category */ -PG_KEYWORD("abort", ABORT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("absolute", ABSOLUTE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("access", ACCESS, UNRESERVED_KEYWORD) -PG_KEYWORD("action", ACTION, UNRESERVED_KEYWORD) -PG_KEYWORD("add", ADD_P, UNRESERVED_KEYWORD) -PG_KEYWORD("admin", ADMIN, UNRESERVED_KEYWORD) -PG_KEYWORD("after", AFTER, UNRESERVED_KEYWORD) -PG_KEYWORD("aggregate", AGGREGATE, UNRESERVED_KEYWORD) -PG_KEYWORD("all", ALL, RESERVED_KEYWORD) -PG_KEYWORD("also", ALSO, UNRESERVED_KEYWORD) -PG_KEYWORD("alter", ALTER, UNRESERVED_KEYWORD) -PG_KEYWORD("always", ALWAYS, UNRESERVED_KEYWORD) -PG_KEYWORD("analyse", ANALYSE, RESERVED_KEYWORD) /* British spelling */ -PG_KEYWORD("analyze", ANALYZE, RESERVED_KEYWORD) -PG_KEYWORD("and", AND, RESERVED_KEYWORD) -PG_KEYWORD("any", ANY, RESERVED_KEYWORD) -PG_KEYWORD("array", ARRAY, RESERVED_KEYWORD) -PG_KEYWORD("as", AS, RESERVED_KEYWORD) -PG_KEYWORD("asc", ASC, RESERVED_KEYWORD) -PG_KEYWORD("assertion", ASSERTION, UNRESERVED_KEYWORD) -PG_KEYWORD("assignment", ASSIGNMENT, UNRESERVED_KEYWORD) -PG_KEYWORD("asymmetric", ASYMMETRIC, RESERVED_KEYWORD) -PG_KEYWORD("at", AT, UNRESERVED_KEYWORD) -PG_KEYWORD("attach", ATTACH, UNRESERVED_KEYWORD) -PG_KEYWORD("attribute", ATTRIBUTE, UNRESERVED_KEYWORD) -PG_KEYWORD("authorization", AUTHORIZATION, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("backward", BACKWARD, UNRESERVED_KEYWORD) -PG_KEYWORD("before", BEFORE, UNRESERVED_KEYWORD) -PG_KEYWORD("begin", BEGIN_P, UNRESERVED_KEYWORD) -PG_KEYWORD("between", BETWEEN, COL_NAME_KEYWORD) -PG_KEYWORD("bigint", BIGINT, COL_NAME_KEYWORD) -PG_KEYWORD("binary", BINARY, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("bit", BIT, COL_NAME_KEYWORD) -PG_KEYWORD("boolean", BOOLEAN_P, COL_NAME_KEYWORD) -PG_KEYWORD("both", BOTH, RESERVED_KEYWORD) -PG_KEYWORD("by", BY, UNRESERVED_KEYWORD) -PG_KEYWORD("cache", CACHE, UNRESERVED_KEYWORD) -PG_KEYWORD("call", CALL, UNRESERVED_KEYWORD) -PG_KEYWORD("called", CALLED, UNRESERVED_KEYWORD) -PG_KEYWORD("cascade", CASCADE, UNRESERVED_KEYWORD) -PG_KEYWORD("cascaded", CASCADED, UNRESERVED_KEYWORD) -PG_KEYWORD("case", CASE, RESERVED_KEYWORD) -PG_KEYWORD("cast", CAST, RESERVED_KEYWORD) -PG_KEYWORD("catalog", CATALOG_P, UNRESERVED_KEYWORD) -PG_KEYWORD("chain", CHAIN, UNRESERVED_KEYWORD) -PG_KEYWORD("char", CHAR_P, COL_NAME_KEYWORD) -PG_KEYWORD("character", CHARACTER, COL_NAME_KEYWORD) -PG_KEYWORD("characteristics", CHARACTERISTICS, UNRESERVED_KEYWORD) -PG_KEYWORD("check", CHECK, RESERVED_KEYWORD) -PG_KEYWORD("checkpoint", CHECKPOINT, UNRESERVED_KEYWORD) -PG_KEYWORD("class", CLASS, UNRESERVED_KEYWORD) -PG_KEYWORD("close", CLOSE, UNRESERVED_KEYWORD) -PG_KEYWORD("cluster", CLUSTER, UNRESERVED_KEYWORD) -PG_KEYWORD("coalesce", COALESCE, COL_NAME_KEYWORD) -PG_KEYWORD("collate", COLLATE, RESERVED_KEYWORD) -PG_KEYWORD("collation", COLLATION, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("column", COLUMN, RESERVED_KEYWORD) -PG_KEYWORD("columns", COLUMNS, UNRESERVED_KEYWORD) -PG_KEYWORD("comment", COMMENT, UNRESERVED_KEYWORD) -PG_KEYWORD("comments", COMMENTS, UNRESERVED_KEYWORD) -PG_KEYWORD("commit", COMMIT, UNRESERVED_KEYWORD) -PG_KEYWORD("committed", COMMITTED, UNRESERVED_KEYWORD) -PG_KEYWORD("concurrently", CONCURRENTLY, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("configuration", CONFIGURATION, UNRESERVED_KEYWORD) -PG_KEYWORD("conflict", CONFLICT, UNRESERVED_KEYWORD) -PG_KEYWORD("connection", CONNECTION, UNRESERVED_KEYWORD) -PG_KEYWORD("constraint", CONSTRAINT, RESERVED_KEYWORD) -PG_KEYWORD("constraints", CONSTRAINTS, UNRESERVED_KEYWORD) -PG_KEYWORD("content", CONTENT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("continue", CONTINUE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("conversion", CONVERSION_P, UNRESERVED_KEYWORD) -PG_KEYWORD("copy", COPY, UNRESERVED_KEYWORD) -PG_KEYWORD("cost", COST, UNRESERVED_KEYWORD) -PG_KEYWORD("create", CREATE, RESERVED_KEYWORD) -PG_KEYWORD("cross", CROSS, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("csv", CSV, UNRESERVED_KEYWORD) -PG_KEYWORD("cube", CUBE, UNRESERVED_KEYWORD) -PG_KEYWORD("current", CURRENT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("current_catalog", CURRENT_CATALOG, RESERVED_KEYWORD) -PG_KEYWORD("current_date", CURRENT_DATE, RESERVED_KEYWORD) -PG_KEYWORD("current_role", CURRENT_ROLE, RESERVED_KEYWORD) -PG_KEYWORD("current_schema", CURRENT_SCHEMA, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("current_time", CURRENT_TIME, RESERVED_KEYWORD) -PG_KEYWORD("current_timestamp", CURRENT_TIMESTAMP, RESERVED_KEYWORD) -PG_KEYWORD("current_user", CURRENT_USER, RESERVED_KEYWORD) -PG_KEYWORD("cursor", CURSOR, UNRESERVED_KEYWORD) -PG_KEYWORD("cycle", CYCLE, UNRESERVED_KEYWORD) -PG_KEYWORD("data", DATA_P, UNRESERVED_KEYWORD) -PG_KEYWORD("database", DATABASE, UNRESERVED_KEYWORD) -PG_KEYWORD("day", DAY_P, UNRESERVED_KEYWORD) -PG_KEYWORD("deallocate", DEALLOCATE, UNRESERVED_KEYWORD) -PG_KEYWORD("dec", DEC, COL_NAME_KEYWORD) -PG_KEYWORD("decimal", DECIMAL_P, COL_NAME_KEYWORD) -PG_KEYWORD("declare", DECLARE, UNRESERVED_KEYWORD) -PG_KEYWORD("default", DEFAULT, RESERVED_KEYWORD) -PG_KEYWORD("defaults", DEFAULTS, UNRESERVED_KEYWORD) -PG_KEYWORD("deferrable", DEFERRABLE, RESERVED_KEYWORD) -PG_KEYWORD("deferred", DEFERRED, UNRESERVED_KEYWORD) -PG_KEYWORD("definer", DEFINER, UNRESERVED_KEYWORD) -PG_KEYWORD("delete", DELETE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("delimiter", DELIMITER, UNRESERVED_KEYWORD) -PG_KEYWORD("delimiters", DELIMITERS, UNRESERVED_KEYWORD) -PG_KEYWORD("depends", DEPENDS, UNRESERVED_KEYWORD) -PG_KEYWORD("desc", DESC, RESERVED_KEYWORD) -PG_KEYWORD("detach", DETACH, UNRESERVED_KEYWORD) -PG_KEYWORD("dictionary", DICTIONARY, UNRESERVED_KEYWORD) -PG_KEYWORD("disable", DISABLE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("discard", DISCARD, UNRESERVED_KEYWORD) -PG_KEYWORD("distinct", DISTINCT, RESERVED_KEYWORD) -PG_KEYWORD("do", DO, RESERVED_KEYWORD) -PG_KEYWORD("document", DOCUMENT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("domain", DOMAIN_P, UNRESERVED_KEYWORD) -PG_KEYWORD("double", DOUBLE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("drop", DROP, UNRESERVED_KEYWORD) -PG_KEYWORD("each", EACH, UNRESERVED_KEYWORD) -PG_KEYWORD("else", ELSE, RESERVED_KEYWORD) -PG_KEYWORD("enable", ENABLE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("encoding", ENCODING, UNRESERVED_KEYWORD) -PG_KEYWORD("encrypted", ENCRYPTED, UNRESERVED_KEYWORD) -PG_KEYWORD("end", END_P, RESERVED_KEYWORD) -PG_KEYWORD("enum", ENUM_P, UNRESERVED_KEYWORD) -PG_KEYWORD("escape", ESCAPE, UNRESERVED_KEYWORD) -PG_KEYWORD("event", EVENT, UNRESERVED_KEYWORD) -PG_KEYWORD("except", EXCEPT, RESERVED_KEYWORD) -PG_KEYWORD("exclude", EXCLUDE, UNRESERVED_KEYWORD) -PG_KEYWORD("excluding", EXCLUDING, UNRESERVED_KEYWORD) -PG_KEYWORD("exclusive", EXCLUSIVE, UNRESERVED_KEYWORD) -PG_KEYWORD("execute", EXECUTE, UNRESERVED_KEYWORD) -PG_KEYWORD("exists", EXISTS, COL_NAME_KEYWORD) -PG_KEYWORD("explain", EXPLAIN, UNRESERVED_KEYWORD) -PG_KEYWORD("expression", EXPRESSION, UNRESERVED_KEYWORD) -PG_KEYWORD("extension", EXTENSION, UNRESERVED_KEYWORD) -PG_KEYWORD("external", EXTERNAL, UNRESERVED_KEYWORD) -PG_KEYWORD("extract", EXTRACT, COL_NAME_KEYWORD) -PG_KEYWORD("false", FALSE_P, RESERVED_KEYWORD) -PG_KEYWORD("family", FAMILY, UNRESERVED_KEYWORD) -PG_KEYWORD("fetch", FETCH, RESERVED_KEYWORD) -PG_KEYWORD("filter", FILTER, UNRESERVED_KEYWORD) -PG_KEYWORD("first", FIRST_P, UNRESERVED_KEYWORD) -PG_KEYWORD("float", FLOAT_P, COL_NAME_KEYWORD) -PG_KEYWORD("following", FOLLOWING, UNRESERVED_KEYWORD) -PG_KEYWORD("for", FOR, RESERVED_KEYWORD) -PG_KEYWORD("force", FORCE, UNRESERVED_KEYWORD) -PG_KEYWORD("foreign", FOREIGN, RESERVED_KEYWORD) -PG_KEYWORD("forward", FORWARD, UNRESERVED_KEYWORD) -PG_KEYWORD("freeze", FREEZE, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("from", FROM, RESERVED_KEYWORD) -PG_KEYWORD("full", FULL, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("function", FUNCTION, UNRESERVED_KEYWORD) -PG_KEYWORD("functions", FUNCTIONS, UNRESERVED_KEYWORD) -PG_KEYWORD("generated", GENERATED, UNRESERVED_KEYWORD) -PG_KEYWORD("global", GLOBAL, UNRESERVED_KEYWORD) -PG_KEYWORD("grant", GRANT, RESERVED_KEYWORD) -PG_KEYWORD("granted", GRANTED, UNRESERVED_KEYWORD) -PG_KEYWORD("greatest", GREATEST, COL_NAME_KEYWORD) -PG_KEYWORD("group", GROUP_P, RESERVED_KEYWORD) -PG_KEYWORD("grouping", GROUPING, COL_NAME_KEYWORD) -PG_KEYWORD("groups", GROUPS, UNRESERVED_KEYWORD) -PG_KEYWORD("handler", HANDLER, UNRESERVED_KEYWORD) -PG_KEYWORD("having", HAVING, RESERVED_KEYWORD) -PG_KEYWORD("header", HEADER_P, UNRESERVED_KEYWORD) -PG_KEYWORD("hold", HOLD, UNRESERVED_KEYWORD) -PG_KEYWORD("hour", HOUR_P, UNRESERVED_KEYWORD) -PG_KEYWORD("identity", IDENTITY_P, UNRESERVED_KEYWORD) -PG_KEYWORD("if", IF_P, UNRESERVED_KEYWORD) -PG_KEYWORD("ilike", ILIKE, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("immediate", IMMEDIATE, UNRESERVED_KEYWORD) -PG_KEYWORD("immutable", IMMUTABLE, UNRESERVED_KEYWORD) -PG_KEYWORD("implicit", IMPLICIT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("import", IMPORT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("in", IN_P, RESERVED_KEYWORD) -PG_KEYWORD("include", INCLUDE, UNRESERVED_KEYWORD) -PG_KEYWORD("including", INCLUDING, UNRESERVED_KEYWORD) -PG_KEYWORD("increment", INCREMENT, UNRESERVED_KEYWORD) -PG_KEYWORD("index", INDEX, UNRESERVED_KEYWORD) -PG_KEYWORD("indexes", INDEXES, UNRESERVED_KEYWORD) -PG_KEYWORD("inherit", INHERIT, UNRESERVED_KEYWORD) -PG_KEYWORD("inherits", INHERITS, UNRESERVED_KEYWORD) -PG_KEYWORD("initially", INITIALLY, RESERVED_KEYWORD) -PG_KEYWORD("inline", INLINE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("inner", INNER_P, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("inout", INOUT, COL_NAME_KEYWORD) -PG_KEYWORD("input", INPUT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("insensitive", INSENSITIVE, UNRESERVED_KEYWORD) -PG_KEYWORD("insert", INSERT, UNRESERVED_KEYWORD) -PG_KEYWORD("instead", INSTEAD, UNRESERVED_KEYWORD) -PG_KEYWORD("int", INT_P, COL_NAME_KEYWORD) -PG_KEYWORD("integer", INTEGER, COL_NAME_KEYWORD) -PG_KEYWORD("intersect", INTERSECT, RESERVED_KEYWORD) -PG_KEYWORD("interval", INTERVAL, COL_NAME_KEYWORD) -PG_KEYWORD("into", INTO, RESERVED_KEYWORD) -PG_KEYWORD("invoker", INVOKER, UNRESERVED_KEYWORD) -PG_KEYWORD("is", IS, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("isnull", ISNULL, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("isolation", ISOLATION, UNRESERVED_KEYWORD) -PG_KEYWORD("join", JOIN, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("key", KEY, UNRESERVED_KEYWORD) -PG_KEYWORD("label", LABEL, UNRESERVED_KEYWORD) -PG_KEYWORD("language", LANGUAGE, UNRESERVED_KEYWORD) -PG_KEYWORD("large", LARGE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("last", LAST_P, UNRESERVED_KEYWORD) -PG_KEYWORD("lateral", LATERAL_P, RESERVED_KEYWORD) -PG_KEYWORD("leading", LEADING, RESERVED_KEYWORD) -PG_KEYWORD("leakproof", LEAKPROOF, UNRESERVED_KEYWORD) -PG_KEYWORD("least", LEAST, COL_NAME_KEYWORD) -PG_KEYWORD("left", LEFT, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("level", LEVEL, UNRESERVED_KEYWORD) -PG_KEYWORD("like", LIKE, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("limit", LIMIT, RESERVED_KEYWORD) -PG_KEYWORD("listen", LISTEN, UNRESERVED_KEYWORD) -PG_KEYWORD("load", LOAD, UNRESERVED_KEYWORD) -PG_KEYWORD("local", LOCAL, UNRESERVED_KEYWORD) -PG_KEYWORD("localtime", LOCALTIME, RESERVED_KEYWORD) -PG_KEYWORD("localtimestamp", LOCALTIMESTAMP, RESERVED_KEYWORD) -PG_KEYWORD("location", LOCATION, UNRESERVED_KEYWORD) -PG_KEYWORD("lock", LOCK_P, UNRESERVED_KEYWORD) -PG_KEYWORD("locked", LOCKED, UNRESERVED_KEYWORD) -PG_KEYWORD("logged", LOGGED, UNRESERVED_KEYWORD) -PG_KEYWORD("mapping", MAPPING, UNRESERVED_KEYWORD) -PG_KEYWORD("match", MATCH, UNRESERVED_KEYWORD) -PG_KEYWORD("materialized", MATERIALIZED, UNRESERVED_KEYWORD) -PG_KEYWORD("maxvalue", MAXVALUE, UNRESERVED_KEYWORD) -PG_KEYWORD("method", METHOD, UNRESERVED_KEYWORD) -PG_KEYWORD("minute", MINUTE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("minvalue", MINVALUE, UNRESERVED_KEYWORD) -PG_KEYWORD("mode", MODE, UNRESERVED_KEYWORD) -PG_KEYWORD("month", MONTH_P, UNRESERVED_KEYWORD) -PG_KEYWORD("move", MOVE, UNRESERVED_KEYWORD) -PG_KEYWORD("name", NAME_P, UNRESERVED_KEYWORD) -PG_KEYWORD("names", NAMES, UNRESERVED_KEYWORD) -PG_KEYWORD("national", NATIONAL, COL_NAME_KEYWORD) -PG_KEYWORD("natural", NATURAL, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("nchar", NCHAR, COL_NAME_KEYWORD) -PG_KEYWORD("new", NEW, UNRESERVED_KEYWORD) -PG_KEYWORD("next", NEXT, UNRESERVED_KEYWORD) -PG_KEYWORD("nfc", NFC, UNRESERVED_KEYWORD) -PG_KEYWORD("nfd", NFD, UNRESERVED_KEYWORD) -PG_KEYWORD("nfkc", NFKC, UNRESERVED_KEYWORD) -PG_KEYWORD("nfkd", NFKD, UNRESERVED_KEYWORD) -PG_KEYWORD("no", NO, UNRESERVED_KEYWORD) -PG_KEYWORD("none", NONE, COL_NAME_KEYWORD) -PG_KEYWORD("normalize", NORMALIZE, COL_NAME_KEYWORD) -PG_KEYWORD("normalized", NORMALIZED, UNRESERVED_KEYWORD) -PG_KEYWORD("not", NOT, RESERVED_KEYWORD) -PG_KEYWORD("nothing", NOTHING, UNRESERVED_KEYWORD) -PG_KEYWORD("notify", NOTIFY, UNRESERVED_KEYWORD) -PG_KEYWORD("notnull", NOTNULL, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("nowait", NOWAIT, UNRESERVED_KEYWORD) -PG_KEYWORD("null", NULL_P, RESERVED_KEYWORD) -PG_KEYWORD("nullif", NULLIF, COL_NAME_KEYWORD) -PG_KEYWORD("nulls", NULLS_P, UNRESERVED_KEYWORD) -PG_KEYWORD("numeric", NUMERIC, COL_NAME_KEYWORD) -PG_KEYWORD("object", OBJECT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("of", OF, UNRESERVED_KEYWORD) -PG_KEYWORD("off", OFF, UNRESERVED_KEYWORD) -PG_KEYWORD("offset", OFFSET, RESERVED_KEYWORD) -PG_KEYWORD("oids", OIDS, UNRESERVED_KEYWORD) -PG_KEYWORD("old", OLD, UNRESERVED_KEYWORD) -PG_KEYWORD("on", ON, RESERVED_KEYWORD) -PG_KEYWORD("only", ONLY, RESERVED_KEYWORD) -PG_KEYWORD("operator", OPERATOR, UNRESERVED_KEYWORD) -PG_KEYWORD("option", OPTION, UNRESERVED_KEYWORD) -PG_KEYWORD("options", OPTIONS, UNRESERVED_KEYWORD) -PG_KEYWORD("or", OR, RESERVED_KEYWORD) -PG_KEYWORD("order", ORDER, RESERVED_KEYWORD) -PG_KEYWORD("ordinality", ORDINALITY, UNRESERVED_KEYWORD) -PG_KEYWORD("others", OTHERS, UNRESERVED_KEYWORD) -PG_KEYWORD("out", OUT_P, COL_NAME_KEYWORD) -PG_KEYWORD("outer", OUTER_P, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("over", OVER, UNRESERVED_KEYWORD) -PG_KEYWORD("overlaps", OVERLAPS, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("overlay", OVERLAY, COL_NAME_KEYWORD) -PG_KEYWORD("overriding", OVERRIDING, UNRESERVED_KEYWORD) -PG_KEYWORD("owned", OWNED, UNRESERVED_KEYWORD) -PG_KEYWORD("owner", OWNER, UNRESERVED_KEYWORD) -PG_KEYWORD("parallel", PARALLEL, UNRESERVED_KEYWORD) -PG_KEYWORD("parser", PARSER, UNRESERVED_KEYWORD) -PG_KEYWORD("partial", PARTIAL, UNRESERVED_KEYWORD) -PG_KEYWORD("partition", PARTITION, UNRESERVED_KEYWORD) -PG_KEYWORD("passing", PASSING, UNRESERVED_KEYWORD) -PG_KEYWORD("password", PASSWORD, UNRESERVED_KEYWORD) -PG_KEYWORD("placing", PLACING, RESERVED_KEYWORD) -PG_KEYWORD("plans", PLANS, UNRESERVED_KEYWORD) -PG_KEYWORD("policy", POLICY, UNRESERVED_KEYWORD) -PG_KEYWORD("position", POSITION, COL_NAME_KEYWORD) -PG_KEYWORD("preceding", PRECEDING, UNRESERVED_KEYWORD) -PG_KEYWORD("precision", PRECISION, COL_NAME_KEYWORD) -PG_KEYWORD("prepare", PREPARE, UNRESERVED_KEYWORD) -PG_KEYWORD("prepared", PREPARED, UNRESERVED_KEYWORD) -PG_KEYWORD("preserve", PRESERVE, UNRESERVED_KEYWORD) -PG_KEYWORD("primary", PRIMARY, RESERVED_KEYWORD) -PG_KEYWORD("prior", PRIOR, UNRESERVED_KEYWORD) -PG_KEYWORD("privileges", PRIVILEGES, UNRESERVED_KEYWORD) -PG_KEYWORD("procedural", PROCEDURAL, UNRESERVED_KEYWORD) -PG_KEYWORD("procedure", PROCEDURE, UNRESERVED_KEYWORD) -PG_KEYWORD("procedures", PROCEDURES, UNRESERVED_KEYWORD) -PG_KEYWORD("program", PROGRAM, UNRESERVED_KEYWORD) -PG_KEYWORD("publication", PUBLICATION, UNRESERVED_KEYWORD) -PG_KEYWORD("quote", QUOTE, UNRESERVED_KEYWORD) -PG_KEYWORD("range", RANGE, UNRESERVED_KEYWORD) -PG_KEYWORD("read", READ, UNRESERVED_KEYWORD) -PG_KEYWORD("real", REAL, COL_NAME_KEYWORD) -PG_KEYWORD("reassign", REASSIGN, UNRESERVED_KEYWORD) -PG_KEYWORD("recheck", RECHECK, UNRESERVED_KEYWORD) -PG_KEYWORD("recursive", RECURSIVE, UNRESERVED_KEYWORD) -PG_KEYWORD("ref", REF, UNRESERVED_KEYWORD) -PG_KEYWORD("references", REFERENCES, RESERVED_KEYWORD) -PG_KEYWORD("referencing", REFERENCING, UNRESERVED_KEYWORD) -PG_KEYWORD("refresh", REFRESH, UNRESERVED_KEYWORD) -PG_KEYWORD("reindex", REINDEX, UNRESERVED_KEYWORD) -PG_KEYWORD("relative", RELATIVE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("release", RELEASE, UNRESERVED_KEYWORD) -PG_KEYWORD("rename", RENAME, UNRESERVED_KEYWORD) -PG_KEYWORD("repeatable", REPEATABLE, UNRESERVED_KEYWORD) -PG_KEYWORD("replace", REPLACE, UNRESERVED_KEYWORD) -PG_KEYWORD("replica", REPLICA, UNRESERVED_KEYWORD) -PG_KEYWORD("reset", RESET, UNRESERVED_KEYWORD) -PG_KEYWORD("restart", RESTART, UNRESERVED_KEYWORD) -PG_KEYWORD("restrict", RESTRICT, UNRESERVED_KEYWORD) -PG_KEYWORD("returning", RETURNING, RESERVED_KEYWORD) -PG_KEYWORD("returns", RETURNS, UNRESERVED_KEYWORD) -PG_KEYWORD("revoke", REVOKE, UNRESERVED_KEYWORD) -PG_KEYWORD("right", RIGHT, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("role", ROLE, UNRESERVED_KEYWORD) -PG_KEYWORD("rollback", ROLLBACK, UNRESERVED_KEYWORD) -PG_KEYWORD("rollup", ROLLUP, UNRESERVED_KEYWORD) -PG_KEYWORD("routine", ROUTINE, UNRESERVED_KEYWORD) -PG_KEYWORD("routines", ROUTINES, UNRESERVED_KEYWORD) -PG_KEYWORD("row", ROW, COL_NAME_KEYWORD) -PG_KEYWORD("rows", ROWS, UNRESERVED_KEYWORD) -PG_KEYWORD("rule", RULE, UNRESERVED_KEYWORD) -PG_KEYWORD("savepoint", SAVEPOINT, UNRESERVED_KEYWORD) -PG_KEYWORD("schema", SCHEMA, UNRESERVED_KEYWORD) -PG_KEYWORD("schemas", SCHEMAS, UNRESERVED_KEYWORD) -PG_KEYWORD("scroll", SCROLL, UNRESERVED_KEYWORD) -PG_KEYWORD("search", SEARCH, UNRESERVED_KEYWORD) -PG_KEYWORD("second", SECOND_P, UNRESERVED_KEYWORD) -PG_KEYWORD("security", SECURITY, UNRESERVED_KEYWORD) -PG_KEYWORD("select", SELECT, RESERVED_KEYWORD) -PG_KEYWORD("sequence", SEQUENCE, UNRESERVED_KEYWORD) -PG_KEYWORD("sequences", SEQUENCES, UNRESERVED_KEYWORD) -PG_KEYWORD("serializable", SERIALIZABLE, UNRESERVED_KEYWORD) -PG_KEYWORD("server", SERVER, UNRESERVED_KEYWORD) -PG_KEYWORD("session", SESSION, UNRESERVED_KEYWORD) -PG_KEYWORD("session_user", SESSION_USER, RESERVED_KEYWORD) -PG_KEYWORD("set", SET, UNRESERVED_KEYWORD) -PG_KEYWORD("setof", SETOF, COL_NAME_KEYWORD) -PG_KEYWORD("sets", SETS, UNRESERVED_KEYWORD) -PG_KEYWORD("share", SHARE, UNRESERVED_KEYWORD) -PG_KEYWORD("show", SHOW, UNRESERVED_KEYWORD) -PG_KEYWORD("similar", SIMILAR, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("simple", SIMPLE, UNRESERVED_KEYWORD) -PG_KEYWORD("skip", SKIP, UNRESERVED_KEYWORD) -PG_KEYWORD("smallint", SMALLINT, COL_NAME_KEYWORD) -PG_KEYWORD("snapshot", SNAPSHOT, UNRESERVED_KEYWORD) -PG_KEYWORD("some", SOME, RESERVED_KEYWORD) -PG_KEYWORD("sql", SQL_P, UNRESERVED_KEYWORD) -PG_KEYWORD("stable", STABLE, UNRESERVED_KEYWORD) -PG_KEYWORD("standalone", STANDALONE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("start", START, UNRESERVED_KEYWORD) -PG_KEYWORD("statement", STATEMENT, UNRESERVED_KEYWORD) -PG_KEYWORD("statistics", STATISTICS, UNRESERVED_KEYWORD) -PG_KEYWORD("stdin", STDIN, UNRESERVED_KEYWORD) -PG_KEYWORD("stdout", STDOUT, UNRESERVED_KEYWORD) -PG_KEYWORD("storage", STORAGE, UNRESERVED_KEYWORD) -PG_KEYWORD("stored", STORED, UNRESERVED_KEYWORD) -PG_KEYWORD("strict", STRICT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("strip", STRIP_P, UNRESERVED_KEYWORD) -PG_KEYWORD("subscription", SUBSCRIPTION, UNRESERVED_KEYWORD) -PG_KEYWORD("substring", SUBSTRING, COL_NAME_KEYWORD) -PG_KEYWORD("support", SUPPORT, UNRESERVED_KEYWORD) -PG_KEYWORD("symmetric", SYMMETRIC, RESERVED_KEYWORD) -PG_KEYWORD("sysid", SYSID, UNRESERVED_KEYWORD) -PG_KEYWORD("system", SYSTEM_P, UNRESERVED_KEYWORD) -PG_KEYWORD("table", TABLE, RESERVED_KEYWORD) -PG_KEYWORD("tables", TABLES, UNRESERVED_KEYWORD) -PG_KEYWORD("tablesample", TABLESAMPLE, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("tablespace", TABLESPACE, UNRESERVED_KEYWORD) -PG_KEYWORD("temp", TEMP, UNRESERVED_KEYWORD) -PG_KEYWORD("template", TEMPLATE, UNRESERVED_KEYWORD) -PG_KEYWORD("temporary", TEMPORARY, UNRESERVED_KEYWORD) -PG_KEYWORD("text", TEXT_P, UNRESERVED_KEYWORD) -PG_KEYWORD("then", THEN, RESERVED_KEYWORD) -PG_KEYWORD("ties", TIES, UNRESERVED_KEYWORD) -PG_KEYWORD("time", TIME, COL_NAME_KEYWORD) -PG_KEYWORD("timestamp", TIMESTAMP, COL_NAME_KEYWORD) -PG_KEYWORD("to", TO, RESERVED_KEYWORD) -PG_KEYWORD("trailing", TRAILING, RESERVED_KEYWORD) -PG_KEYWORD("transaction", TRANSACTION, UNRESERVED_KEYWORD) -PG_KEYWORD("transform", TRANSFORM, UNRESERVED_KEYWORD) -PG_KEYWORD("treat", TREAT, COL_NAME_KEYWORD) -PG_KEYWORD("trigger", TRIGGER, UNRESERVED_KEYWORD) -PG_KEYWORD("trim", TRIM, COL_NAME_KEYWORD) -PG_KEYWORD("true", TRUE_P, RESERVED_KEYWORD) -PG_KEYWORD("truncate", TRUNCATE, UNRESERVED_KEYWORD) -PG_KEYWORD("trusted", TRUSTED, UNRESERVED_KEYWORD) -PG_KEYWORD("type", TYPE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("types", TYPES_P, UNRESERVED_KEYWORD) -PG_KEYWORD("uescape", UESCAPE, UNRESERVED_KEYWORD) -PG_KEYWORD("unbounded", UNBOUNDED, UNRESERVED_KEYWORD) -PG_KEYWORD("uncommitted", UNCOMMITTED, UNRESERVED_KEYWORD) -PG_KEYWORD("unencrypted", UNENCRYPTED, UNRESERVED_KEYWORD) -PG_KEYWORD("union", UNION, RESERVED_KEYWORD) -PG_KEYWORD("unique", UNIQUE, RESERVED_KEYWORD) -PG_KEYWORD("unknown", UNKNOWN, UNRESERVED_KEYWORD) -PG_KEYWORD("unlisten", UNLISTEN, UNRESERVED_KEYWORD) -PG_KEYWORD("unlogged", UNLOGGED, UNRESERVED_KEYWORD) -PG_KEYWORD("until", UNTIL, UNRESERVED_KEYWORD) -PG_KEYWORD("update", UPDATE, UNRESERVED_KEYWORD) -PG_KEYWORD("user", USER, RESERVED_KEYWORD) -PG_KEYWORD("using", USING, RESERVED_KEYWORD) -PG_KEYWORD("vacuum", VACUUM, UNRESERVED_KEYWORD) -PG_KEYWORD("valid", VALID, UNRESERVED_KEYWORD) -PG_KEYWORD("validate", VALIDATE, UNRESERVED_KEYWORD) -PG_KEYWORD("validator", VALIDATOR, UNRESERVED_KEYWORD) -PG_KEYWORD("value", VALUE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("values", VALUES, COL_NAME_KEYWORD) -PG_KEYWORD("varchar", VARCHAR, COL_NAME_KEYWORD) -PG_KEYWORD("variadic", VARIADIC, RESERVED_KEYWORD) -PG_KEYWORD("varying", VARYING, UNRESERVED_KEYWORD) -PG_KEYWORD("verbose", VERBOSE, TYPE_FUNC_NAME_KEYWORD) -PG_KEYWORD("version", VERSION_P, UNRESERVED_KEYWORD) -PG_KEYWORD("view", VIEW, UNRESERVED_KEYWORD) -PG_KEYWORD("views", VIEWS, UNRESERVED_KEYWORD) -PG_KEYWORD("volatile", VOLATILE, UNRESERVED_KEYWORD) -PG_KEYWORD("when", WHEN, RESERVED_KEYWORD) -PG_KEYWORD("where", WHERE, RESERVED_KEYWORD) -PG_KEYWORD("whitespace", WHITESPACE_P, UNRESERVED_KEYWORD) -PG_KEYWORD("window", WINDOW, RESERVED_KEYWORD) -PG_KEYWORD("with", WITH, RESERVED_KEYWORD) -PG_KEYWORD("within", WITHIN, UNRESERVED_KEYWORD) -PG_KEYWORD("without", WITHOUT, UNRESERVED_KEYWORD) -PG_KEYWORD("work", WORK, UNRESERVED_KEYWORD) -PG_KEYWORD("wrapper", WRAPPER, UNRESERVED_KEYWORD) -PG_KEYWORD("write", WRITE, UNRESERVED_KEYWORD) -PG_KEYWORD("xml", XML_P, UNRESERVED_KEYWORD) -PG_KEYWORD("xmlattributes", XMLATTRIBUTES, COL_NAME_KEYWORD) -PG_KEYWORD("xmlconcat", XMLCONCAT, COL_NAME_KEYWORD) -PG_KEYWORD("xmlelement", XMLELEMENT, COL_NAME_KEYWORD) -PG_KEYWORD("xmlexists", XMLEXISTS, COL_NAME_KEYWORD) -PG_KEYWORD("xmlforest", XMLFOREST, COL_NAME_KEYWORD) -PG_KEYWORD("xmlnamespaces", XMLNAMESPACES, COL_NAME_KEYWORD) -PG_KEYWORD("xmlparse", XMLPARSE, COL_NAME_KEYWORD) -PG_KEYWORD("xmlpi", XMLPI, COL_NAME_KEYWORD) -PG_KEYWORD("xmlroot", XMLROOT, COL_NAME_KEYWORD) -PG_KEYWORD("xmlserialize", XMLSERIALIZE, COL_NAME_KEYWORD) -PG_KEYWORD("xmltable", XMLTABLE, COL_NAME_KEYWORD) -PG_KEYWORD("year", YEAR_P, UNRESERVED_KEYWORD) -PG_KEYWORD("yes", YES_P, UNRESERVED_KEYWORD) -PG_KEYWORD("zone", ZONE, UNRESERVED_KEYWORD) +/* name, value, category, is-bare-label */ +PG_KEYWORD("abort", ABORT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("absolute", ABSOLUTE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("access", ACCESS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("action", ACTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("add", ADD_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("admin", ADMIN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("after", AFTER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("aggregate", AGGREGATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("all", ALL, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("also", ALSO, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("alter", ALTER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("always", ALWAYS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("analyse", ANALYSE, RESERVED_KEYWORD, BARE_LABEL) /* British spelling */ +PG_KEYWORD("analyze", ANALYZE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("and", AND, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("any", ANY, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("array", ARRAY, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("as", AS, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("asc", ASC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("assertion", ASSERTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("assignment", ASSIGNMENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("asymmetric", ASYMMETRIC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("at", AT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("attach", ATTACH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("attribute", ATTRIBUTE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("authorization", AUTHORIZATION, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("backward", BACKWARD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("before", BEFORE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("begin", BEGIN_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("between", BETWEEN, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("bigint", BIGINT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("binary", BINARY, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("bit", BIT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("boolean", BOOLEAN_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("both", BOTH, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("by", BY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cache", CACHE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("call", CALL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("called", CALLED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cascade", CASCADE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cascaded", CASCADED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("case", CASE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cast", CAST, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("catalog", CATALOG_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("chain", CHAIN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("char", CHAR_P, COL_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("character", CHARACTER, COL_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("characteristics", CHARACTERISTICS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("check", CHECK, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("checkpoint", CHECKPOINT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("class", CLASS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("close", CLOSE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cluster", CLUSTER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("coalesce", COALESCE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("collate", COLLATE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("collation", COLLATION, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("column", COLUMN, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("columns", COLUMNS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("comment", COMMENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("comments", COMMENTS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("commit", COMMIT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("committed", COMMITTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("concurrently", CONCURRENTLY, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("configuration", CONFIGURATION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("conflict", CONFLICT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("connection", CONNECTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("constraint", CONSTRAINT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("constraints", CONSTRAINTS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("content", CONTENT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("continue", CONTINUE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("conversion", CONVERSION_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("copy", COPY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cost", COST, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("create", CREATE, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("cross", CROSS, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("csv", CSV, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cube", CUBE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current", CURRENT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_catalog", CURRENT_CATALOG, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_date", CURRENT_DATE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_role", CURRENT_ROLE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_schema", CURRENT_SCHEMA, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_time", CURRENT_TIME, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_timestamp", CURRENT_TIMESTAMP, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("current_user", CURRENT_USER, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cursor", CURSOR, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("cycle", CYCLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("data", DATA_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("database", DATABASE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("day", DAY_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("deallocate", DEALLOCATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("dec", DEC, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("decimal", DECIMAL_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("declare", DECLARE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("default", DEFAULT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("defaults", DEFAULTS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("deferrable", DEFERRABLE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("deferred", DEFERRED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("definer", DEFINER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("delete", DELETE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("delimiter", DELIMITER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("delimiters", DELIMITERS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("depends", DEPENDS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("desc", DESC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("detach", DETACH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("dictionary", DICTIONARY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("disable", DISABLE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("discard", DISCARD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("distinct", DISTINCT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("do", DO, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("document", DOCUMENT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("domain", DOMAIN_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("double", DOUBLE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("drop", DROP, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("each", EACH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("else", ELSE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("enable", ENABLE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("encoding", ENCODING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("encrypted", ENCRYPTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("end", END_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("enum", ENUM_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("escape", ESCAPE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("event", EVENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("except", EXCEPT, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("exclude", EXCLUDE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("excluding", EXCLUDING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("exclusive", EXCLUSIVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("execute", EXECUTE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("exists", EXISTS, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("explain", EXPLAIN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("expression", EXPRESSION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("extension", EXTENSION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("external", EXTERNAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("extract", EXTRACT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("false", FALSE_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("family", FAMILY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("fetch", FETCH, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("filter", FILTER, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("first", FIRST_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("float", FLOAT_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("following", FOLLOWING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("for", FOR, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("force", FORCE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("foreign", FOREIGN, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("forward", FORWARD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("freeze", FREEZE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("from", FROM, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("full", FULL, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("function", FUNCTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("functions", FUNCTIONS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("generated", GENERATED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("global", GLOBAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("grant", GRANT, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("granted", GRANTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("greatest", GREATEST, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("group", GROUP_P, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("grouping", GROUPING, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("groups", GROUPS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("handler", HANDLER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("having", HAVING, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("header", HEADER_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("hold", HOLD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("hour", HOUR_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("identity", IDENTITY_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("if", IF_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("ilike", ILIKE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("immediate", IMMEDIATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("immutable", IMMUTABLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("implicit", IMPLICIT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("import", IMPORT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("in", IN_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("include", INCLUDE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("including", INCLUDING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("increment", INCREMENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("index", INDEX, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("indexes", INDEXES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("inherit", INHERIT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("inherits", INHERITS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("initially", INITIALLY, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("inline", INLINE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("inner", INNER_P, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("inout", INOUT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("input", INPUT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("insensitive", INSENSITIVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("insert", INSERT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("instead", INSTEAD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("int", INT_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("integer", INTEGER, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("intersect", INTERSECT, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("interval", INTERVAL, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("into", INTO, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("invoker", INVOKER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("is", IS, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("isnull", ISNULL, TYPE_FUNC_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("isolation", ISOLATION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("join", JOIN, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("key", KEY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("label", LABEL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("language", LANGUAGE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("large", LARGE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("last", LAST_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("lateral", LATERAL_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("leading", LEADING, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("leakproof", LEAKPROOF, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("least", LEAST, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("left", LEFT, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("level", LEVEL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("like", LIKE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("limit", LIMIT, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("listen", LISTEN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("load", LOAD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("local", LOCAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("localtime", LOCALTIME, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("localtimestamp", LOCALTIMESTAMP, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("location", LOCATION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("lock", LOCK_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("locked", LOCKED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("logged", LOGGED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("mapping", MAPPING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("match", MATCH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("materialized", MATERIALIZED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("maxvalue", MAXVALUE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("method", METHOD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("minute", MINUTE_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("minvalue", MINVALUE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("mode", MODE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("month", MONTH_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("move", MOVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("name", NAME_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("names", NAMES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("national", NATIONAL, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("natural", NATURAL, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("nchar", NCHAR, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("new", NEW, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("next", NEXT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nfc", NFC, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nfd", NFD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nfkc", NFKC, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nfkd", NFKD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("no", NO, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("none", NONE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("normalize", NORMALIZE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("normalized", NORMALIZED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("not", NOT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nothing", NOTHING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("notify", NOTIFY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("notnull", NOTNULL, TYPE_FUNC_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("nowait", NOWAIT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("null", NULL_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("nullif", NULLIF, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("nulls", NULLS_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("numeric", NUMERIC, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("object", OBJECT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("of", OF, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("off", OFF, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("offset", OFFSET, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("oids", OIDS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("old", OLD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("on", ON, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("only", ONLY, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("operator", OPERATOR, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("option", OPTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("options", OPTIONS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("or", OR, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("order", ORDER, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("ordinality", ORDINALITY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("others", OTHERS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("out", OUT_P, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("outer", OUTER_P, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("over", OVER, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("overlaps", OVERLAPS, TYPE_FUNC_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("overlay", OVERLAY, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("overriding", OVERRIDING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("owned", OWNED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("owner", OWNER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("parallel", PARALLEL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("parser", PARSER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("partial", PARTIAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("partition", PARTITION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("passing", PASSING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("password", PASSWORD, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("placing", PLACING, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("plans", PLANS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("policy", POLICY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("position", POSITION, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("preceding", PRECEDING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("precision", PRECISION, COL_NAME_KEYWORD, AS_LABEL) +PG_KEYWORD("prepare", PREPARE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("prepared", PREPARED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("preserve", PRESERVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("primary", PRIMARY, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("prior", PRIOR, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("privileges", PRIVILEGES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("procedural", PROCEDURAL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("procedure", PROCEDURE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("procedures", PROCEDURES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("program", PROGRAM, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("publication", PUBLICATION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("quote", QUOTE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("range", RANGE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("read", READ, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("real", REAL, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("reassign", REASSIGN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("recheck", RECHECK, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("recursive", RECURSIVE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("ref", REF, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("references", REFERENCES, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("referencing", REFERENCING, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("refresh", REFRESH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("reindex", REINDEX, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("relative", RELATIVE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("release", RELEASE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("rename", RENAME, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("repeatable", REPEATABLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("replace", REPLACE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("replica", REPLICA, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("reset", RESET, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("restart", RESTART, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("restrict", RESTRICT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("returning", RETURNING, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("returns", RETURNS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("revoke", REVOKE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("right", RIGHT, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("role", ROLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("rollback", ROLLBACK, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("rollup", ROLLUP, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("routine", ROUTINE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("routines", ROUTINES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("row", ROW, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("rows", ROWS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("rule", RULE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("savepoint", SAVEPOINT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("schema", SCHEMA, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("schemas", SCHEMAS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("scroll", SCROLL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("search", SEARCH, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("second", SECOND_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("security", SECURITY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("select", SELECT, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("sequence", SEQUENCE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("sequences", SEQUENCES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("serializable", SERIALIZABLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("server", SERVER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("session", SESSION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("session_user", SESSION_USER, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("set", SET, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("setof", SETOF, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("sets", SETS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("share", SHARE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("show", SHOW, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("similar", SIMILAR, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("simple", SIMPLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("skip", SKIP, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("smallint", SMALLINT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("snapshot", SNAPSHOT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("some", SOME, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("sql", SQL_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("stable", STABLE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("standalone", STANDALONE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("start", START, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("statement", STATEMENT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("statistics", STATISTICS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("stdin", STDIN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("stdout", STDOUT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("storage", STORAGE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("stored", STORED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("strict", STRICT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("strip", STRIP_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("subscription", SUBSCRIPTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("substring", SUBSTRING, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("support", SUPPORT, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("symmetric", SYMMETRIC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("sysid", SYSID, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("system", SYSTEM_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("table", TABLE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("tables", TABLES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("tablesample", TABLESAMPLE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("tablespace", TABLESPACE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("temp", TEMP, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("template", TEMPLATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("temporary", TEMPORARY, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("text", TEXT_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("then", THEN, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("ties", TIES, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("time", TIME, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("timestamp", TIMESTAMP, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("to", TO, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("trailing", TRAILING, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("transaction", TRANSACTION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("transform", TRANSFORM, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("treat", TREAT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("trigger", TRIGGER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("trim", TRIM, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("true", TRUE_P, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("truncate", TRUNCATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("trusted", TRUSTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("type", TYPE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("types", TYPES_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("uescape", UESCAPE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unbounded", UNBOUNDED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("uncommitted", UNCOMMITTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unencrypted", UNENCRYPTED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("union", UNION, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("unique", UNIQUE, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unknown", UNKNOWN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unlisten", UNLISTEN, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("unlogged", UNLOGGED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("until", UNTIL, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("update", UPDATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("user", USER, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("using", USING, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("vacuum", VACUUM, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("valid", VALID, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("validate", VALIDATE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("validator", VALIDATOR, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("value", VALUE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("values", VALUES, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("varchar", VARCHAR, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("variadic", VARIADIC, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("varying", VARYING, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("verbose", VERBOSE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("version", VERSION_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("view", VIEW, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("views", VIEWS, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("volatile", VOLATILE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("when", WHEN, RESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("where", WHERE, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("whitespace", WHITESPACE_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("window", WINDOW, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("with", WITH, RESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("within", WITHIN, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("without", WITHOUT, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("work", WORK, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("wrapper", WRAPPER, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("write", WRITE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("xml", XML_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlattributes", XMLATTRIBUTES, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlconcat", XMLCONCAT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlelement", XMLELEMENT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlexists", XMLEXISTS, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlforest", XMLFOREST, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlnamespaces", XMLNAMESPACES, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlparse", XMLPARSE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlpi", XMLPI, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlroot", XMLROOT, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmlserialize", XMLSERIALIZE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("xmltable", XMLTABLE, COL_NAME_KEYWORD, BARE_LABEL) +PG_KEYWORD("year", YEAR_P, UNRESERVED_KEYWORD, AS_LABEL) +PG_KEYWORD("yes", YES_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("zone", ZONE, UNRESERVED_KEYWORD, BARE_LABEL) diff --git a/src/include/parser/parse_oper.h b/src/include/parser/parse_oper.h index bcd861e43ac34..09695a2765cff 100644 --- a/src/include/parser/parse_oper.h +++ b/src/include/parser/parse_oper.h @@ -31,8 +31,6 @@ extern Oid LookupOperWithArgs(ObjectWithArgs *oper, bool noError); /* NB: the selected operator may require coercion of the input types! */ extern Operator oper(ParseState *pstate, List *op, Oid arg1, Oid arg2, bool noError, int location); -extern Operator right_oper(ParseState *pstate, List *op, Oid arg, - bool noError, int location); extern Operator left_oper(ParseState *pstate, List *op, Oid arg, bool noError, int location); diff --git a/src/include/parser/scansup.h b/src/include/parser/scansup.h index 7a6ee529ae0c6..5bc426660df6a 100644 --- a/src/include/parser/scansup.h +++ b/src/include/parser/scansup.h @@ -1,8 +1,7 @@ /*------------------------------------------------------------------------- * * scansup.h - * scanner support routines. used by both the bootstrap lexer - * as well as the normal lexer + * scanner support routines used by the core lexer * * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -15,8 +14,6 @@ #ifndef SCANSUP_H #define SCANSUP_H -extern char *scanstr(const char *s); - extern char *downcase_truncate_identifier(const char *ident, int len, bool warn); diff --git a/src/include/partitioning/partbounds.h b/src/include/partitioning/partbounds.h index dfc720720b93a..192b0b1e2ad89 100644 --- a/src/include/partitioning/partbounds.h +++ b/src/include/partitioning/partbounds.h @@ -12,10 +12,9 @@ #define PARTBOUNDS_H #include "fmgr.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" +#include "parser/parse_node.h" #include "partitioning/partdefs.h" -#include "utils/relcache.h" + struct RelOptInfo; /* avoid including pathnodes.h here */ @@ -98,7 +97,8 @@ extern PartitionBoundInfo partition_bounds_merge(int partnatts, List **inner_parts); extern bool partitions_are_ordered(PartitionBoundInfo boundinfo, int nparts); extern void check_new_partition_bound(char *relname, Relation parent, - PartitionBoundSpec *spec); + PartitionBoundSpec *spec, + ParseState *pstate); extern void check_default_partition_contents(Relation parent, Relation defaultRel, PartitionBoundSpec *new_spec); diff --git a/src/include/pgstat.h b/src/include/pgstat.h index 0dfbac46b4b06..a821ff4f158fe 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -56,18 +56,21 @@ typedef enum StatMsgType PGSTAT_MTYPE_RESETSHAREDCOUNTER, PGSTAT_MTYPE_RESETSINGLECOUNTER, PGSTAT_MTYPE_RESETSLRUCOUNTER, + PGSTAT_MTYPE_RESETREPLSLOTCOUNTER, PGSTAT_MTYPE_AUTOVAC_START, PGSTAT_MTYPE_VACUUM, PGSTAT_MTYPE_ANALYZE, PGSTAT_MTYPE_ARCHIVER, PGSTAT_MTYPE_BGWRITER, + PGSTAT_MTYPE_WAL, PGSTAT_MTYPE_SLRU, PGSTAT_MTYPE_FUNCSTAT, PGSTAT_MTYPE_FUNCPURGE, PGSTAT_MTYPE_RECOVERYCONFLICT, PGSTAT_MTYPE_TEMPFILE, PGSTAT_MTYPE_DEADLOCK, - PGSTAT_MTYPE_CHECKSUMFAILURE + PGSTAT_MTYPE_CHECKSUMFAILURE, + PGSTAT_MTYPE_REPLSLOT, } StatMsgType; /* ---------- @@ -122,7 +125,8 @@ typedef struct PgStat_TableCounts typedef enum PgStat_Shared_Reset_Target { RESET_ARCHIVER, - RESET_BGWRITER + RESET_BGWRITER, + RESET_WAL } PgStat_Shared_Reset_Target; /* Possible object types for resetting single counters */ @@ -356,6 +360,18 @@ typedef struct PgStat_MsgResetslrucounter int m_index; } PgStat_MsgResetslrucounter; +/* ---------- + * PgStat_MsgResetreplslotcounter Sent by the backend to tell the collector + * to reset replication slot counter(s) + * ---------- + */ +typedef struct PgStat_MsgResetreplslotcounter +{ + PgStat_MsgHdr m_hdr; + char m_slotname[NAMEDATALEN]; + bool clearall; +} PgStat_MsgResetreplslotcounter; + /* ---------- * PgStat_MsgAutovacStart Sent by the autovacuum daemon to signal * that a database is going to be processed @@ -436,6 +452,16 @@ typedef struct PgStat_MsgBgWriter PgStat_Counter m_checkpoint_sync_time; } PgStat_MsgBgWriter; +/* ---------- + * PgStat_MsgWal Sent by backends and background processes to update WAL statistics. + * ---------- + */ +typedef struct PgStat_MsgWal +{ + PgStat_MsgHdr m_hdr; + PgStat_Counter m_wal_buffers_full; +} PgStat_MsgWal; + /* ---------- * PgStat_MsgSLRU Sent by a backend to update SLRU statistics. * ---------- @@ -453,6 +479,22 @@ typedef struct PgStat_MsgSLRU PgStat_Counter m_truncate; } PgStat_MsgSLRU; +/* ---------- + * PgStat_MsgReplSlot Sent by a backend or a wal sender to update replication + * slot statistics. + * ---------- + */ +typedef struct PgStat_MsgReplSlot +{ + PgStat_MsgHdr m_hdr; + char m_slotname[NAMEDATALEN]; + bool m_drop; + PgStat_Counter m_spill_txns; + PgStat_Counter m_spill_count; + PgStat_Counter m_spill_bytes; +} PgStat_MsgReplSlot; + + /* ---------- * PgStat_MsgRecoveryConflict Sent by the backend upon recovery conflict * ---------- @@ -591,11 +633,13 @@ typedef union PgStat_Msg PgStat_MsgResetsharedcounter msg_resetsharedcounter; PgStat_MsgResetsinglecounter msg_resetsinglecounter; PgStat_MsgResetslrucounter msg_resetslrucounter; + PgStat_MsgResetreplslotcounter msg_resetreplslotcounter; PgStat_MsgAutovacStart msg_autovacuum_start; PgStat_MsgVacuum msg_vacuum; PgStat_MsgAnalyze msg_analyze; PgStat_MsgArchiver msg_archiver; PgStat_MsgBgWriter msg_bgwriter; + PgStat_MsgWal msg_wal; PgStat_MsgSLRU msg_slru; PgStat_MsgFuncstat msg_funcstat; PgStat_MsgFuncpurge msg_funcpurge; @@ -603,6 +647,7 @@ typedef union PgStat_Msg PgStat_MsgDeadlock msg_deadlock; PgStat_MsgTempFile msg_tempfile; PgStat_MsgChecksumFailure msg_checksumfailure; + PgStat_MsgReplSlot msg_replslot; } PgStat_Msg; @@ -614,7 +659,7 @@ typedef union PgStat_Msg * ------------------------------------------------------------ */ -#define PGSTAT_FILE_FORMAT_ID 0x01A5BC9D +#define PGSTAT_FILE_FORMAT_ID 0x01A5BC9F /* ---------- * PgStat_StatDBEntry The collector's data per database @@ -745,6 +790,15 @@ typedef struct PgStat_GlobalStats TimestampTz stat_reset_timestamp; } PgStat_GlobalStats; +/* + * WAL statistics kept in the stats collector + */ +typedef struct PgStat_WalStats +{ + PgStat_Counter wal_buffers_full; + TimestampTz stat_reset_timestamp; +} PgStat_WalStats; + /* * SLRU statistics kept in the stats collector */ @@ -760,6 +814,17 @@ typedef struct PgStat_SLRUStats TimestampTz stat_reset_timestamp; } PgStat_SLRUStats; +/* + * Replication slot statistics kept in the stats collector + */ +typedef struct PgStat_ReplSlotStats +{ + char slotname[NAMEDATALEN]; + PgStat_Counter spill_txns; + PgStat_Counter spill_count; + PgStat_Counter spill_bytes; + TimestampTz stat_reset_timestamp; +} PgStat_ReplSlotStats; /* ---------- * Backend states @@ -1265,6 +1330,11 @@ extern char *pgstat_stat_filename; */ extern PgStat_MsgBgWriter BgWriterStats; +/* + * WAL statistics counter is updated by backends and background processes + */ +extern PgStat_MsgWal WalStats; + /* * Updated by pgstat_count_buffer_*_time macros */ @@ -1303,6 +1373,7 @@ extern void pgstat_reset_counters(void); extern void pgstat_reset_shared_counters(const char *); extern void pgstat_reset_single_counter(Oid objectid, PgStat_Single_Reset_Type type); extern void pgstat_reset_slru_counter(const char *); +extern void pgstat_reset_replslot_counter(const char *name); extern void pgstat_report_autovac(Oid dboid); extern void pgstat_report_vacuum(Oid tableoid, bool shared, @@ -1315,6 +1386,9 @@ extern void pgstat_report_recovery_conflict(int reason); extern void pgstat_report_deadlock(void); extern void pgstat_report_checksum_failures_in_db(Oid dboid, int failurecount); extern void pgstat_report_checksum_failure(void); +extern void pgstat_report_replslot(const char *slotname, int spilltxns, int spillcount, + int spillbytes); +extern void pgstat_report_replslot_drop(const char *slotname); extern void pgstat_initialize(void); extern void pgstat_bestart(void); @@ -1464,6 +1538,7 @@ extern void pgstat_twophase_postabort(TransactionId xid, uint16 info, extern void pgstat_send_archiver(const char *xlog, bool failed); extern void pgstat_send_bgwriter(void); +extern void pgstat_send_wal(void); /* ---------- * Support functions for the SQL-callable functions to @@ -1478,7 +1553,9 @@ extern PgStat_StatFuncEntry *pgstat_fetch_stat_funcentry(Oid funcid); extern int pgstat_fetch_stat_numbackends(void); extern PgStat_ArchiverStats *pgstat_fetch_stat_archiver(void); extern PgStat_GlobalStats *pgstat_fetch_global(void); +extern PgStat_WalStats *pgstat_fetch_stat_wal(void); extern PgStat_SLRUStats *pgstat_fetch_slru(void); +extern PgStat_ReplSlotStats *pgstat_fetch_replslot(int *nslots_p); extern void pgstat_count_slru_page_zeroed(int slru_idx); extern void pgstat_count_slru_page_hit(int slru_idx); diff --git a/src/include/port.h b/src/include/port.h index 84bf2c363f55a..d25716bf7f836 100644 --- a/src/include/port.h +++ b/src/include/port.h @@ -99,6 +99,28 @@ extern void pgfnames_cleanup(char **filenames); ) #endif +/* + * This macro provides a centralized list of all errnos that identify + * hard failure of a previously-established network connection. + * The macro is intended to be used in a switch statement, in the form + * "case ALL_CONNECTION_FAILURE_ERRNOS:". + * + * Note: this groups EPIPE and ECONNRESET, which we take to indicate a + * probable server crash, with other errors that indicate loss of network + * connectivity without proving much about the server's state. Places that + * are actually reporting errors typically single out EPIPE and ECONNRESET, + * while allowing the network failures to be reported generically. + */ +#define ALL_CONNECTION_FAILURE_ERRNOS \ + EPIPE: \ + case ECONNRESET: \ + case ECONNABORTED: \ + case EHOSTDOWN: \ + case EHOSTUNREACH: \ + case ENETDOWN: \ + case ENETRESET: \ + case ENETUNREACH + /* Portable locale initialization (in exec.c) */ extern void set_pglocale_pgservice(const char *argv0, const char *app); diff --git a/src/include/port/atomics/arch-ppc.h b/src/include/port/atomics/arch-ppc.h index fdfe0d0cd5f44..a82ae38c1d6e8 100644 --- a/src/include/port/atomics/arch-ppc.h +++ b/src/include/port/atomics/arch-ppc.h @@ -32,14 +32,14 @@ typedef struct pg_atomic_uint32 } pg_atomic_uint32; /* 64bit atomics are only supported in 64bit mode */ -#ifdef __64BIT__ +#if SIZEOF_VOID_P >= 8 #define PG_HAVE_ATOMIC_U64_SUPPORT typedef struct pg_atomic_uint64 { volatile uint64 value pg_attribute_aligned(8); } pg_atomic_uint64; -#endif /* __64BIT__ */ +#endif /* * This mimics gcc __atomic_compare_exchange_n(..., __ATOMIC_SEQ_CST), but @@ -72,14 +72,6 @@ typedef struct pg_atomic_uint64 * the __asm__. (That would remove the freedom to eliminate dead stores when * the caller ignores "expected", but few callers do.) * - * The cmpwi variant may be dead code. In gcc 7.2.0, - * __builtin_constant_p(*expected) always reports false. - * __atomic_compare_exchange_n() does use cmpwi when its second argument - * points to a constant. Hence, using this instead of - * __atomic_compare_exchange_n() nominally penalizes the generic.h - * pg_atomic_test_set_flag_impl(). Modern GCC will use the generic-gcc.h - * version, making the penalty theoretical only. - * * Recognizing constant "newval" would be superfluous, because there's no * immediate-operand version of stwcx. */ @@ -94,7 +86,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, #ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P if (__builtin_constant_p(*expected) && - *expected <= PG_INT16_MAX && *expected >= PG_INT16_MIN) + (int32) *expected <= PG_INT16_MAX && + (int32) *expected >= PG_INT16_MIN) __asm__ __volatile__( " sync \n" " lwarx %0,0,%5 \n" @@ -183,7 +176,8 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, /* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/; s/cmpw/cmpd/ */ #ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P if (__builtin_constant_p(*expected) && - *expected <= PG_INT16_MAX && *expected >= PG_INT16_MIN) + (int64) *expected <= PG_INT16_MAX && + (int64) *expected >= PG_INT16_MIN) __asm__ __volatile__( " sync \n" " ldarx %0,0,%5 \n" diff --git a/src/include/port/atomics/generic-gcc.h b/src/include/port/atomics/generic-gcc.h index 2d84305f26bcf..1a3dce34ed378 100644 --- a/src/include/port/atomics/generic-gcc.h +++ b/src/include/port/atomics/generic-gcc.h @@ -10,9 +10,9 @@ * * Documentation: * * Legacy __sync Built-in Functions for Atomic Memory Access - * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html + * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html * * Built-in functions for memory model aware atomic operations - * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html + * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html * * src/include/port/atomics/generic-gcc.h * diff --git a/src/include/port/win32_port.h b/src/include/port/win32_port.h index 8b6576b23dc86..59c7f35e3dfab 100644 --- a/src/include/port/win32_port.h +++ b/src/include/port/win32_port.h @@ -51,7 +51,13 @@ #include #include #undef near -#include /* needed before sys/stat hacking below */ + +/* needed before sys/stat hacking below: */ +#define fstat microsoft_native_fstat +#define stat microsoft_native_stat +#include +#undef fstat +#undef stat /* Must be here to avoid conflicting with prototype in windows.h */ #define mkdir(a,b) mkdir(a) @@ -240,20 +246,34 @@ typedef int pid_t; * Supplement to . * * We must pull in sys/stat.h before this part, else our overrides lose. - */ -#define lstat(path, sb) stat(path, sb) - -/* + * * stat() is not guaranteed to set the st_size field on win32, so we - * redefine it to our own implementation that is. + * redefine it to our own implementation. See src/port/win32stat.c. * - * Some frontends don't need the size from stat, so if UNSAFE_STAT_OK - * is defined we don't bother with this. + * The struct stat is 32 bit in MSVC, so we redefine it as a copy of + * struct __stat64. This also fixes the struct size for MINGW builds. */ -#ifndef UNSAFE_STAT_OK -extern int pgwin32_safestat(const char *path, struct stat *buf); -#define stat(a,b) pgwin32_safestat(a,b) -#endif +struct stat /* This should match struct __stat64 */ +{ + _dev_t st_dev; + _ino_t st_ino; + unsigned short st_mode; + short st_nlink; + short st_uid; + short st_gid; + _dev_t st_rdev; + __int64 st_size; + __time64_t st_atime; + __time64_t st_mtime; + __time64_t st_ctime; +}; + +extern int _pgfstat64(int fileno, struct stat *buf); +extern int _pgstat64(const char *name, struct stat *buf); + +#define fstat(fileno, sb) _pgfstat64(fileno, sb) +#define stat(path, sb) _pgstat64(path, sb) +#define lstat(path, sb) _pgstat64(path, sb) /* These macros are not provided by older MinGW, nor by MSVC */ #ifndef S_IRUSR @@ -349,8 +369,16 @@ extern int pgwin32_safestat(const char *path, struct stat *buf); #define EADDRINUSE WSAEADDRINUSE #undef EADDRNOTAVAIL #define EADDRNOTAVAIL WSAEADDRNOTAVAIL +#undef EHOSTDOWN +#define EHOSTDOWN WSAEHOSTDOWN #undef EHOSTUNREACH #define EHOSTUNREACH WSAEHOSTUNREACH +#undef ENETDOWN +#define ENETDOWN WSAENETDOWN +#undef ENETRESET +#define ENETRESET WSAENETRESET +#undef ENETUNREACH +#define ENETUNREACH WSAENETUNREACH #undef ENOTCONN #define ENOTCONN WSAENOTCONN diff --git a/src/include/replication/logical.h b/src/include/replication/logical.h index 45abc444b7a55..40bab7ee02df4 100644 --- a/src/include/replication/logical.h +++ b/src/include/replication/logical.h @@ -122,5 +122,6 @@ extern void LogicalConfirmReceivedLocation(XLogRecPtr lsn); extern bool filter_by_origin_cb_wrapper(LogicalDecodingContext *ctx, RepOriginId origin_id); extern void ResetLogicalStreamingState(void); +extern void UpdateDecodingStats(LogicalDecodingContext *ctx); #endif diff --git a/src/include/replication/logicalproto.h b/src/include/replication/logicalproto.h index 53905ee6080f4..0c2cda264e14f 100644 --- a/src/include/replication/logicalproto.h +++ b/src/include/replication/logicalproto.h @@ -19,8 +19,9 @@ /* * Protocol capabilities * - * LOGICALREP_PROTO_VERSION_NUM is our native protocol and the greatest version - * we can support. LOGICALREP_PROTO_MIN_VERSION_NUM is the oldest version we + * LOGICALREP_PROTO_VERSION_NUM is our native protocol. + * LOGICALREP_PROTO_MAX_VERSION_NUM is the greatest version we can support. + * LOGICALREP_PROTO_MIN_VERSION_NUM is the oldest version we * have backwards compatibility for. The client requests protocol version at * connect time. * @@ -28,8 +29,9 @@ * support for streaming large transactions. */ #define LOGICALREP_PROTO_MIN_VERSION_NUM 1 +#define LOGICALREP_PROTO_VERSION_NUM 1 #define LOGICALREP_PROTO_STREAM_VERSION_NUM 2 -#define LOGICALREP_PROTO_VERSION_NUM 2 +#define LOGICALREP_PROTO_MAX_VERSION_NUM LOGICALREP_PROTO_STREAM_VERSION_NUM /* * This struct stores a tuple received via logical replication. @@ -133,7 +135,6 @@ extern void logicalrep_write_stream_start(StringInfo out, TransactionId xid, extern TransactionId logicalrep_read_stream_start(StringInfo in, bool *first_segment); extern void logicalrep_write_stream_stop(StringInfo out); -extern TransactionId logicalrep_read_stream_stop(StringInfo in); extern void logicalrep_write_stream_commit(StringInfo out, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); extern TransactionId logicalrep_read_stream_commit(StringInfo out, diff --git a/src/include/replication/logicalrelation.h b/src/include/replication/logicalrelation.h index a6b44b12bd1f2..62ddd3c7a2ae5 100644 --- a/src/include/replication/logicalrelation.h +++ b/src/include/replication/logicalrelation.h @@ -19,9 +19,16 @@ typedef struct LogicalRepRelMapEntry { LogicalRepRelation remoterel; /* key is remoterel.remoteid */ - /* Mapping to local relation, filled as needed. */ + /* + * Validity flag -- when false, revalidate all derived info at next + * logicalrep_rel_open. (While the localrel is open, we assume our lock + * on that rel ensures the info remains good.) + */ + bool localrelvalid; + + /* Mapping to local relation. */ Oid localreloid; /* local relation id */ - Relation localrel; /* relcache entry */ + Relation localrel; /* relcache entry (NULL when closed) */ AttrMap *attrmap; /* map of local attributes to remote ones */ bool updatable; /* Can apply updates/deletes? */ diff --git a/src/include/replication/message.h b/src/include/replication/message.h index 937addde48582..e97891ebcafa7 100644 --- a/src/include/replication/message.h +++ b/src/include/replication/message.h @@ -23,9 +23,8 @@ typedef struct xl_logical_message bool transactional; /* is message transactional? */ Size prefix_size; /* length of prefix */ Size message_size; /* size of the message */ - char message[FLEXIBLE_ARRAY_MEMBER]; /* message including the null - * terminated prefix of length - * prefix_size */ + /* payload, including null-terminated prefix of length prefix_size */ + char message[FLEXIBLE_ARRAY_MEMBER]; } xl_logical_message; #define SizeOfLogicalMessage (offsetof(xl_logical_message, message)) diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h index 1ae17d5f11fd7..1c77819aad25c 100644 --- a/src/include/replication/reorderbuffer.h +++ b/src/include/replication/reorderbuffer.h @@ -57,6 +57,7 @@ enum ReorderBufferChangeType REORDER_BUFFER_CHANGE_UPDATE, REORDER_BUFFER_CHANGE_DELETE, REORDER_BUFFER_CHANGE_MESSAGE, + REORDER_BUFFER_CHANGE_INVALIDATION, REORDER_BUFFER_CHANGE_INTERNAL_SNAPSHOT, REORDER_BUFFER_CHANGE_INTERNAL_COMMAND_ID, REORDER_BUFFER_CHANGE_INTERNAL_TUPLECID, @@ -149,6 +150,13 @@ typedef struct ReorderBufferChange CommandId cmax; CommandId combocid; } tuplecid; + + /* Invalidation. */ + struct + { + uint32 ninvalidations; /* Number of messages */ + SharedInvalidationMessage *invalidations; /* invalidation message */ + } inval; } data; /* @@ -162,9 +170,10 @@ typedef struct ReorderBufferChange #define RBTXN_HAS_CATALOG_CHANGES 0x0001 #define RBTXN_IS_SUBXACT 0x0002 #define RBTXN_IS_SERIALIZED 0x0004 -#define RBTXN_IS_STREAMED 0x0008 -#define RBTXN_HAS_TOAST_INSERT 0x0010 -#define RBTXN_HAS_SPEC_INSERT 0x0020 +#define RBTXN_IS_SERIALIZED_CLEAR 0x0008 +#define RBTXN_IS_STREAMED 0x0010 +#define RBTXN_HAS_TOAST_INSERT 0x0020 +#define RBTXN_HAS_SPEC_INSERT 0x0040 /* Does the transaction have catalog changes? */ #define rbtxn_has_catalog_changes(txn) \ @@ -184,6 +193,12 @@ typedef struct ReorderBufferChange ((txn)->txn_flags & RBTXN_IS_SERIALIZED) != 0 \ ) +/* Has this transaction ever been spilled to disk? */ +#define rbtxn_is_serialized_clear(txn) \ +( \ + ((txn)->txn_flags & RBTXN_IS_SERIALIZED_CLEAR) != 0 \ +) + /* This transaction's changes has toast insert, without main table insert. */ #define rbtxn_has_toast_insert(txn) \ ( \ @@ -306,8 +321,8 @@ typedef struct ReorderBufferTXN uint64 nentries_mem; /* - * List of ReorderBufferChange structs, including new Snapshots and new - * CommandIds + * List of ReorderBufferChange structs, including new Snapshots, new + * CommandIds and command invalidation messages. */ dlist_head changes; @@ -525,6 +540,17 @@ struct ReorderBuffer /* memory accounting */ Size size; + + /* + * Statistics about transactions spilled to disk. + * + * A single transaction may be spilled repeatedly, which is why we keep + * two different counters. For spilling, the transaction counter includes + * both toplevel transactions and subtransactions. + */ + int64 spillTxns; /* number of transactions spilled to disk */ + int64 spillCount; /* spill-to-disk invocation counter */ + int64 spillBytes; /* amount of data spilled to disk */ }; diff --git a/src/include/replication/slot.h b/src/include/replication/slot.h index 31362585ecb1e..63bab6967fb58 100644 --- a/src/include/replication/slot.h +++ b/src/include/replication/slot.h @@ -210,6 +210,7 @@ extern XLogRecPtr ReplicationSlotsComputeLogicalRestartLSN(void); extern bool ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive); extern void ReplicationSlotsDropDBSlots(Oid dboid); extern void InvalidateObsoleteReplicationSlots(XLogSegNo oldestSegno); +extern ReplicationSlot *SearchNamedReplicationSlot(const char *name); extern void StartupReplicationSlots(void); extern void CheckPointReplicationSlots(void); diff --git a/src/include/storage/ipc.h b/src/include/storage/ipc.h index 462fe463417c5..88994fdc260bc 100644 --- a/src/include/storage/ipc.h +++ b/src/include/storage/ipc.h @@ -72,6 +72,7 @@ extern void on_shmem_exit(pg_on_exit_callback function, Datum arg); extern void before_shmem_exit(pg_on_exit_callback function, Datum arg); extern void cancel_before_shmem_exit(pg_on_exit_callback function, Datum arg); extern void on_exit_reset(void); +extern void check_on_shmem_exit_lists_are_empty(void); /* ipci.c */ extern PGDLLIMPORT shmem_startup_hook_type shmem_startup_hook; diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h index e16ab8e711c40..f32e412e751d9 100644 --- a/src/include/storage/sync.h +++ b/src/include/storage/sync.h @@ -34,7 +34,12 @@ typedef enum SyncRequestType */ typedef enum SyncRequestHandler { - SYNC_HANDLER_MD = 0 /* md smgr */ + SYNC_HANDLER_MD = 0, + SYNC_HANDLER_CLOG, + SYNC_HANDLER_COMMIT_TS, + SYNC_HANDLER_MULTIXACT_OFFSET, + SYNC_HANDLER_MULTIXACT_MEMBER, + SYNC_HANDLER_NONE } SyncRequestHandler; /* diff --git a/src/include/tcop/cmdtaglist.h b/src/include/tcop/cmdtaglist.h index 8ef0f55e74876..be94852bbd30d 100644 --- a/src/include/tcop/cmdtaglist.h +++ b/src/include/tcop/cmdtaglist.h @@ -157,7 +157,6 @@ PG_CMDTAG(CMDTAG_DROP_OWNED, "DROP OWNED", true, false, false) PG_CMDTAG(CMDTAG_DROP_POLICY, "DROP POLICY", true, false, false) PG_CMDTAG(CMDTAG_DROP_PROCEDURE, "DROP PROCEDURE", true, false, false) PG_CMDTAG(CMDTAG_DROP_PUBLICATION, "DROP PUBLICATION", true, false, false) -PG_CMDTAG(CMDTAG_DROP_REPLICATION_SLOT, "DROP REPLICATION SLOT", false, false, false) PG_CMDTAG(CMDTAG_DROP_ROLE, "DROP ROLE", false, false, false) PG_CMDTAG(CMDTAG_DROP_ROUTINE, "DROP ROUTINE", true, false, false) PG_CMDTAG(CMDTAG_DROP_RULE, "DROP RULE", true, false, false) diff --git a/src/include/tcop/dest.h b/src/include/tcop/dest.h index 662ce8a56f806..2e07f1516d16b 100644 --- a/src/include/tcop/dest.h +++ b/src/include/tcop/dest.h @@ -139,6 +139,7 @@ extern void BeginCommand(CommandTag commandTag, CommandDest dest); extern DestReceiver *CreateDestReceiver(CommandDest dest); extern void EndCommand(const QueryCompletion *qc, CommandDest dest, bool force_undecorated_output); +extern void EndReplicationCommand(const char *commandTag); /* Additional functions that go with destination management, more or less. */ diff --git a/src/include/tsearch/ts_locale.h b/src/include/tsearch/ts_locale.h index cc4bd9ab20d49..f1669fda2111b 100644 --- a/src/include/tsearch/ts_locale.h +++ b/src/include/tsearch/ts_locale.h @@ -15,6 +15,7 @@ #include #include +#include "lib/stringinfo.h" #include "mb/pg_wchar.h" #include "utils/pg_locale.h" @@ -33,7 +34,9 @@ typedef struct FILE *fp; const char *filename; int lineno; - char *curline; + StringInfoData buf; /* current input line, in UTF-8 */ + char *curline; /* current input line, in DB's encoding */ + /* curline may be NULL, or equal to buf.data, or a palloc'd string */ ErrorContextCallback cb; } tsearch_readline_state; @@ -57,6 +60,4 @@ extern bool tsearch_readline_begin(tsearch_readline_state *stp, extern char *tsearch_readline(tsearch_readline_state *stp); extern void tsearch_readline_end(tsearch_readline_state *stp); -extern char *t_readline(FILE *fp); - #endif /* __TSLOCALE_H__ */ diff --git a/src/include/utils/date.h b/src/include/utils/date.h index 4cdb1f97cc8c4..6fc491e6a6d77 100644 --- a/src/include/utils/date.h +++ b/src/include/utils/date.h @@ -72,6 +72,9 @@ extern int32 anytime_typmod_check(bool istz, int32 typmod); extern double date2timestamp_no_overflow(DateADT dateVal); extern Timestamp date2timestamp_opt_overflow(DateADT dateVal, int *overflow); extern TimestampTz date2timestamptz_opt_overflow(DateADT dateVal, int *overflow); +extern int32 date_cmp_timestamp_internal(DateADT dateVal, Timestamp dt2); +extern int32 date_cmp_timestamptz_internal(DateADT dateVal, TimestampTz dt2); + extern void EncodeSpecialDate(DateADT dt, char *str); extern DateADT GetSQLCurrentDate(void); extern TimeTzADT *GetSQLCurrentTime(int32 typmod); diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h index 2819282181678..073c8f3e0628b 100644 --- a/src/include/utils/guc.h +++ b/src/include/utils/guc.h @@ -155,6 +155,7 @@ extern bool ParseConfigDirectory(const char *includedir, ConfigVariable **head_p, ConfigVariable **tail_p); extern void FreeConfigVariables(ConfigVariable *list); +extern char *DeescapeQuotedString(const char *s); /* * The possible values of an enum variable are specified by an array of diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h index f1deb9beab045..bebf89b3c451f 100644 --- a/src/include/utils/hsearch.h +++ b/src/include/utils/hsearch.h @@ -68,7 +68,6 @@ typedef struct HASHCTL long ssize; /* segment size */ long dsize; /* (initial) directory size */ long max_dsize; /* limit to dsize if dir size is limited */ - long ffactor; /* fill factor */ Size keysize; /* hash key length in bytes */ Size entrysize; /* total user element size in bytes */ HashValueFunc hash; /* hash function */ @@ -83,7 +82,6 @@ typedef struct HASHCTL #define HASH_PARTITION 0x0001 /* Hashtable is used w/partitioned locking */ #define HASH_SEGMENT 0x0002 /* Set segment size */ #define HASH_DIRSIZE 0x0004 /* Set directory size (initial and max) */ -#define HASH_FFACTOR 0x0008 /* Set fill factor */ #define HASH_ELEM 0x0010 /* Set keysize and entrysize */ #define HASH_BLOBS 0x0020 /* Select support functions for binary keys */ #define HASH_FUNCTION 0x0040 /* Set user defined hash function */ diff --git a/src/include/utils/logtape.h b/src/include/utils/logtape.h index 39a99174afe34..da5159e4c6c76 100644 --- a/src/include/utils/logtape.h +++ b/src/include/utils/logtape.h @@ -54,7 +54,8 @@ typedef struct TapeShare * prototypes for functions in logtape.c */ -extern LogicalTapeSet *LogicalTapeSetCreate(int ntapes, TapeShare *shared, +extern LogicalTapeSet *LogicalTapeSetCreate(int ntapes, bool preallocate, + TapeShare *shared, SharedFileSet *fileset, int worker); extern void LogicalTapeSetClose(LogicalTapeSet *lts); extern void LogicalTapeSetForgetFreeSpace(LogicalTapeSet *lts); diff --git a/src/include/utils/numeric.h b/src/include/utils/numeric.h index 0b7d4ba3c4bc0..2a768b9a04ad5 100644 --- a/src/include/utils/numeric.h +++ b/src/include/utils/numeric.h @@ -62,6 +62,8 @@ int32 numeric_maximum_size(int32 typmod); extern char *numeric_out_sci(Numeric num, int scale); extern char *numeric_normalize(Numeric num); +extern Numeric int64_to_numeric(int64 val); + extern Numeric numeric_add_opt_error(Numeric num1, Numeric num2, bool *have_error); extern Numeric numeric_sub_opt_error(Numeric num1, Numeric num2, diff --git a/src/include/utils/old_snapshot.h b/src/include/utils/old_snapshot.h new file mode 100644 index 0000000000000..e6da1833a6341 --- /dev/null +++ b/src/include/utils/old_snapshot.h @@ -0,0 +1,75 @@ +/*------------------------------------------------------------------------- + * + * old_snapshot.h + * Data structures for 'snapshot too old' + * + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/include/utils/old_snapshot.h + * + *------------------------------------------------------------------------- + */ + +#ifndef OLD_SNAPSHOT_H +#define OLD_SNAPSHOT_H + +#include "datatype/timestamp.h" +#include "storage/s_lock.h" + +/* + * Structure for dealing with old_snapshot_threshold implementation. + */ +typedef struct OldSnapshotControlData +{ + /* + * Variables for old snapshot handling are shared among processes and are + * only allowed to move forward. + */ + slock_t mutex_current; /* protect current_timestamp */ + TimestampTz current_timestamp; /* latest snapshot timestamp */ + slock_t mutex_latest_xmin; /* protect latest_xmin and next_map_update */ + TransactionId latest_xmin; /* latest snapshot xmin */ + TimestampTz next_map_update; /* latest snapshot valid up to */ + slock_t mutex_threshold; /* protect threshold fields */ + TimestampTz threshold_timestamp; /* earlier snapshot is old */ + TransactionId threshold_xid; /* earlier xid may be gone */ + + /* + * Keep one xid per minute for old snapshot error handling. + * + * Use a circular buffer with a head offset, a count of entries currently + * used, and a timestamp corresponding to the xid at the head offset. A + * count_used value of zero means that there are no times stored; a + * count_used value of OLD_SNAPSHOT_TIME_MAP_ENTRIES means that the buffer + * is full and the head must be advanced to add new entries. Use + * timestamps aligned to minute boundaries, since that seems less + * surprising than aligning based on the first usage timestamp. The + * latest bucket is effectively stored within latest_xmin. The circular + * buffer is updated when we get a new xmin value that doesn't fall into + * the same interval. + * + * It is OK if the xid for a given time slot is from earlier than + * calculated by adding the number of minutes corresponding to the + * (possibly wrapped) distance from the head offset to the time of the + * head entry, since that just results in the vacuuming of old tuples + * being slightly less aggressive. It would not be OK for it to be off in + * the other direction, since it might result in vacuuming tuples that are + * still expected to be there. + * + * Use of an SLRU was considered but not chosen because it is more + * heavyweight than is needed for this, and would probably not be any less + * code to implement. + * + * Persistence is not needed. + */ + int head_offset; /* subscript of oldest tracked time */ + TimestampTz head_timestamp; /* time corresponding to head xid */ + int count_used; /* how many slots are in use */ + TransactionId xid_by_minute[FLEXIBLE_ARRAY_MEMBER]; +} OldSnapshotControlData; + +extern PGDLLIMPORT volatile OldSnapshotControlData *oldSnapshotControl; + +#endif diff --git a/src/include/utils/sortsupport.h b/src/include/utils/sortsupport.h index 264aec820b1cc..fb262c6e8d42f 100644 --- a/src/include/utils/sortsupport.h +++ b/src/include/utils/sortsupport.h @@ -272,5 +272,6 @@ extern void PrepareSortSupportComparisonShim(Oid cmpFunc, SortSupport ssup); extern void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup); extern void PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy, SortSupport ssup); +extern void PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup); #endif /* SORTSUPPORT_H */ diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h index 03a1de569f06f..16c3fd8ec977b 100644 --- a/src/include/utils/timestamp.h +++ b/src/include/utils/timestamp.h @@ -99,6 +99,8 @@ extern int timestamp_cmp_internal(Timestamp dt1, Timestamp dt2); extern TimestampTz timestamp2timestamptz_opt_overflow(Timestamp timestamp, int *overflow); +extern int32 timestamp_cmp_timestamptz_internal(Timestamp timestampVal, + TimestampTz dt2); extern int isoweek2j(int year, int week); extern void isoweek2date(int woy, int *year, int *mon, int *mday); diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h index 9e76666fe9483..c69b36e209ad6 100644 --- a/src/include/utils/tuplesort.h +++ b/src/include/utils/tuplesort.h @@ -217,6 +217,10 @@ extern Tuplesortstate *tuplesort_begin_index_hash(Relation heapRel, uint32 max_buckets, int workMem, SortCoordinate coordinate, bool randomAccess); +extern Tuplesortstate *tuplesort_begin_index_gist(Relation heapRel, + Relation indexRel, + int workMem, SortCoordinate coordinate, + bool randomAccess); extern Tuplesortstate *tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, diff --git a/src/interfaces/ecpg/Makefile b/src/interfaces/ecpg/Makefile index 41460a17c9644..a8f91e3dc2b34 100644 --- a/src/interfaces/ecpg/Makefile +++ b/src/interfaces/ecpg/Makefile @@ -5,7 +5,7 @@ include $(top_builddir)/src/Makefile.global SUBDIRS = include pgtypeslib ecpglib compatlib preproc # Suppress parallel build of subdirectories to avoid a bug in GNU make 3.82, cf -# http://savannah.gnu.org/bugs/?30653 +# https://savannah.gnu.org/bugs/?30653 # https://bugzilla.redhat.com/show_bug.cgi?id=835424 # (There are some other parallelism bugs in the subdirectory makefiles # themselves, but there's little point in fixing them as long as we have diff --git a/src/interfaces/ecpg/preproc/keywords.c b/src/interfaces/ecpg/preproc/keywords.c index f82764aeb97d3..f1640d0062b64 100644 --- a/src/interfaces/ecpg/preproc/keywords.c +++ b/src/interfaces/ecpg/preproc/keywords.c @@ -29,7 +29,7 @@ #include "preproc_extern.h" #include "preproc.h" -#define PG_KEYWORD(kwname, value, category) value, +#define PG_KEYWORD(kwname, value, category, collabel) value, const uint16 SQLScanKeywordTokens[] = { #include "parser/kwlist.h" diff --git a/src/interfaces/ecpg/preproc/pgc.l b/src/interfaces/ecpg/preproc/pgc.l index 466bbac6a7b01..91d8b635787b9 100644 --- a/src/interfaces/ecpg/preproc/pgc.l +++ b/src/interfaces/ecpg/preproc/pgc.l @@ -623,11 +623,8 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ } } -{xqdouble} { addlitchar('\''); } -{xqcquote} { - addlitchar('\\'); - addlitchar('\''); - } +{xqdouble} { addlit(yytext, yyleng); } +{xqcquote} { addlit(yytext, yyleng); } {xqinside} { addlit(yytext, yyleng); } {xeinside} { addlit(yytext, yyleng); @@ -718,7 +715,14 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ BEGIN(state_before_str_start); if (literallen == 0) mmerror(PARSE_ERROR, ET_ERROR, "zero-length delimited identifier"); - /* The backend will truncate the identifier here. We do not as it does not change the result. */ + /* + * The server will truncate the identifier here. We do + * not, as (1) it does not change the result; (2) we don't + * know what NAMEDATALEN the server might use; (3) this + * code path is also taken for literal query strings in + * PREPARE and EXECUTE IMMEDIATE, which can certainly be + * longer than NAMEDATALEN. + */ base_yylval.str = mm_strdup(literalbuf); return CSTRING; } @@ -736,7 +740,7 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})((\/\*[^*/]*\*+ return UIDENT; } {xddouble} { - addlitchar('"'); + addlit(yytext, yyleng); } {xdinside} { addlit(yytext, yyleng); diff --git a/src/interfaces/ecpg/test/expected/preproc-strings.c b/src/interfaces/ecpg/test/expected/preproc-strings.c index e695007b1336e..1e50cd36c3820 100644 --- a/src/interfaces/ecpg/test/expected/preproc-strings.c +++ b/src/interfaces/ecpg/test/expected/preproc-strings.c @@ -45,7 +45,7 @@ int main(void) #line 13 "strings.pgc" - { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select 'abcdef' , N'abcdef' as foo , E'abc\\bdef' as \"foo\" , U&'d\\0061t\\0061' as U&\"foo\" , U&'d!+000061t!+000061' UESCAPE '!' , $foo$abc$def$foo$", ECPGt_EOIT, + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "select 'abc''d\\ef' , N'abc''d\\ef' as foo , E'abc''d\\\\ef' as \"foo\"\"bar\" , U&'d\\0061t\\0061' as U&\"foo\"\"bar\" , U&'d!+000061t!+000061' UESCAPE '!' , $foo$abc$def$foo$", ECPGt_EOIT, ECPGt_char,&(s1),(long)0,(long)1,(1)*sizeof(char), ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_char,&(s2),(long)0,(long)1,(1)*sizeof(char), diff --git a/src/interfaces/ecpg/test/expected/preproc-strings.stderr b/src/interfaces/ecpg/test/expected/preproc-strings.stderr index dbc9e5c0b8dba..4c3a8eee5aad1 100644 --- a/src/interfaces/ecpg/test/expected/preproc-strings.stderr +++ b/src/interfaces/ecpg/test/expected/preproc-strings.stderr @@ -8,7 +8,7 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_process_output on line 13: OK: SET [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 15: query: select 'abcdef' , N'abcdef' as foo , E'abc\bdef' as "foo" , U&'d\0061t\0061' as U&"foo" , U&'d!+000061t!+000061' UESCAPE '!' , $foo$abc$def$foo$; with 0 parameter(s) on connection ecpg1_regression +[NO_PID]: ecpg_execute on line 15: query: select 'abc''d\ef' , N'abc''d\ef' as foo , E'abc''d\\ef' as "foo""bar" , U&'d\0061t\0061' as U&"foo""bar" , U&'d!+000061t!+000061' UESCAPE '!' , $foo$abc$def$foo$; with 0 parameter(s) on connection ecpg1_regression [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_execute on line 15: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 @@ -16,15 +16,15 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_store_result on line 15: allocating memory for 1 tuples [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 15: RESULT: abcdef offset: -1; array: no +[NO_PID]: ecpg_get_data on line 15: RESULT: abc'd\ef offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_store_result on line 15: allocating memory for 1 tuples [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 15: RESULT: abcdef offset: -1; array: no +[NO_PID]: ecpg_get_data on line 15: RESULT: abc'd\ef offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_store_result on line 15: allocating memory for 1 tuples [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_get_data on line 15: RESULT: abcdef offset: -1; array: no +[NO_PID]: ecpg_get_data on line 15: RESULT: abc'd\ef offset: -1; array: no [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_store_result on line 15: allocating memory for 1 tuples [NO_PID]: sqlca: code: 0, state: 00000 diff --git a/src/interfaces/ecpg/test/expected/preproc-strings.stdout b/src/interfaces/ecpg/test/expected/preproc-strings.stdout index 730d72dd64e13..1456b152d78e9 100644 --- a/src/interfaces/ecpg/test/expected/preproc-strings.stdout +++ b/src/interfaces/ecpg/test/expected/preproc-strings.stdout @@ -1 +1 @@ -abcdef abcdef abcdef data data abc$def +abc'd\ef abc'd\ef abc'd\ef data data abc$def diff --git a/src/interfaces/ecpg/test/expected/sql-execute.c b/src/interfaces/ecpg/test/expected/sql-execute.c index cac91dc5999da..10e9ad56b51bf 100644 --- a/src/interfaces/ecpg/test/expected/sql-execute.c +++ b/src/interfaces/ecpg/test/expected/sql-execute.c @@ -77,8 +77,8 @@ if (sqlca.sqlcode < 0) sqlprint();} #line 26 "execute.pgc" - sprintf(command, "insert into test (name, amount, letter) values ('db: ''r1''', 1, 'f')"); - { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_exec_immediate, command, ECPGt_EOIT, ECPGt_EORT); + /* test handling of embedded quotes in EXECUTE IMMEDIATE "literal" */ + { ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_exec_immediate, "insert into test (name, \042amount\042, letter) values ('db: ''r1''', 1, 'f')", ECPGt_EOIT, ECPGt_EORT); #line 29 "execute.pgc" if (sqlca.sqlcode < 0) sqlprint();} diff --git a/src/interfaces/ecpg/test/expected/sql-execute.stderr b/src/interfaces/ecpg/test/expected/sql-execute.stderr index 96b46bd158473..d8bc3c6524ab5 100644 --- a/src/interfaces/ecpg/test/expected/sql-execute.stderr +++ b/src/interfaces/ecpg/test/expected/sql-execute.stderr @@ -10,7 +10,7 @@ [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ECPGtrans on line 26: action "commit"; connection "main" [NO_PID]: sqlca: code: 0, state: 00000 -[NO_PID]: ecpg_execute on line 29: query: insert into test (name, amount, letter) values ('db: ''r1''', 1, 'f'); with 0 parameter(s) on connection main +[NO_PID]: ecpg_execute on line 29: query: insert into test (name, "amount", letter) values ('db: ''r1''', 1, 'f'); with 0 parameter(s) on connection main [NO_PID]: sqlca: code: 0, state: 00000 [NO_PID]: ecpg_execute on line 29: using PQexec [NO_PID]: sqlca: code: 0, state: 00000 diff --git a/src/interfaces/ecpg/test/expected/thread-thread.c b/src/interfaces/ecpg/test/expected/thread-thread.c index a7e401570a409..0e75c47fab26e 100644 --- a/src/interfaces/ecpg/test/expected/thread-thread.c +++ b/src/interfaces/ecpg/test/expected/thread-thread.c @@ -99,7 +99,7 @@ int main() #ifndef WIN32 pthread_create(&threads[n], NULL, test_thread, (void *) (n + 1)); #else - threads[n] = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)test_thread, (void *) (n + 1), 0, NULL); + threads[n] = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) (void (*) (void)) test_thread, (void *) (n + 1), 0, NULL); #endif } diff --git a/src/interfaces/ecpg/test/expected/thread-thread_implicit.c b/src/interfaces/ecpg/test/expected/thread-thread_implicit.c index 6c7adb062c807..0df2794530c92 100644 --- a/src/interfaces/ecpg/test/expected/thread-thread_implicit.c +++ b/src/interfaces/ecpg/test/expected/thread-thread_implicit.c @@ -99,7 +99,7 @@ int main() #ifndef WIN32 pthread_create(&threads[n], NULL, test_thread, (void *) (n + 1)); #else - threads[n] = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) test_thread, (void *) (n+1), 0, NULL); + threads[n] = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) (void (*) (void)) test_thread, (void *) (n+1), 0, NULL); #endif } diff --git a/src/interfaces/ecpg/test/pg_regress_ecpg.c b/src/interfaces/ecpg/test/pg_regress_ecpg.c index a2d7b70d9a307..6e1d25b1f4a3c 100644 --- a/src/interfaces/ecpg/test/pg_regress_ecpg.c +++ b/src/interfaces/ecpg/test/pg_regress_ecpg.c @@ -49,7 +49,7 @@ ecpg_filter(const char *sourcefile, const char *outfile) initStringInfo(&linebuf); - while (pg_get_line_append(s, &linebuf)) + while (pg_get_line_buf(s, &linebuf)) { /* check for "#line " in the beginning */ if (strstr(linebuf.data, "#line ") == linebuf.data) @@ -69,7 +69,6 @@ ecpg_filter(const char *sourcefile, const char *outfile) } } fputs(linebuf.data, t); - resetStringInfo(&linebuf); } pfree(linebuf.data); diff --git a/src/interfaces/ecpg/test/preproc/strings.pgc b/src/interfaces/ecpg/test/preproc/strings.pgc index f004ddf6dc1e1..25157f136c2ae 100644 --- a/src/interfaces/ecpg/test/preproc/strings.pgc +++ b/src/interfaces/ecpg/test/preproc/strings.pgc @@ -12,10 +12,10 @@ int main(void) exec sql set standard_conforming_strings to on; - exec sql select 'abcdef', - N'abcdef' AS foo, - E'abc\bdef' AS "foo", - U&'d\0061t\0061' AS U&"foo", + exec sql select 'abc''d\ef', + N'abc''d\ef' AS foo, + E'abc''d\\ef' AS "foo""bar", + U&'d\0061t\0061' AS U&"foo""bar", U&'d!+000061t!+000061' uescape '!', $foo$abc$def$foo$ into :s1, :s2, :s3, :s4, :s5, :s6; diff --git a/src/interfaces/ecpg/test/sql/dyntest.pgc b/src/interfaces/ecpg/test/sql/dyntest.pgc index 5f02fd5dd6951..0222c89851547 100644 --- a/src/interfaces/ecpg/test/sql/dyntest.pgc +++ b/src/interfaces/ecpg/test/sql/dyntest.pgc @@ -51,7 +51,7 @@ main () exec sql create table dyntest (name char (14), d float8, i int, bignumber int8, b boolean, comment text, day date); - exec sql insert into dyntest values ('first entry', 14.7, 14, 123045607890, true, 'The world''''s most advanced open source database.', '1987-07-14'); + exec sql insert into dyntest values ('first entry', 14.7, 14, 123045607890, true, 'The world''s most advanced open source database.', '1987-07-14'); exec sql insert into dyntest values ('second entry', 1407.87, 1407, 987065403210, false, 'The elephant never forgets.', '1999-11-5'); exec sql prepare MYQUERY from :QUERY; diff --git a/src/interfaces/ecpg/test/sql/execute.pgc b/src/interfaces/ecpg/test/sql/execute.pgc index cc9814e9bea71..43171bb77c997 100644 --- a/src/interfaces/ecpg/test/sql/execute.pgc +++ b/src/interfaces/ecpg/test/sql/execute.pgc @@ -25,8 +25,8 @@ exec sql end declare section; exec sql create table test (name char(8), amount int, letter char(1)); exec sql commit; - sprintf(command, "insert into test (name, amount, letter) values ('db: ''r1''', 1, 'f')"); - exec sql execute immediate :command; + /* test handling of embedded quotes in EXECUTE IMMEDIATE "literal" */ + exec sql execute immediate "insert into test (name, \042amount\042, letter) values ('db: ''r1''', 1, 'f')"; sprintf(command, "insert into test (name, amount, letter) values ('db: ''r1''', 2, 't')"); exec sql execute immediate :command; diff --git a/src/interfaces/ecpg/test/thread/thread.pgc b/src/interfaces/ecpg/test/thread/thread.pgc index e149b91d976b0..e7d8c00af6f4b 100644 --- a/src/interfaces/ecpg/test/thread/thread.pgc +++ b/src/interfaces/ecpg/test/thread/thread.pgc @@ -68,7 +68,7 @@ int main() #ifndef WIN32 pthread_create(&threads[n], NULL, test_thread, (void *) (n + 1)); #else - threads[n] = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)test_thread, (void *) (n + 1), 0, NULL); + threads[n] = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) (void (*) (void)) test_thread, (void *) (n + 1), 0, NULL); #endif } diff --git a/src/interfaces/ecpg/test/thread/thread_implicit.pgc b/src/interfaces/ecpg/test/thread/thread_implicit.pgc index 3209da22bc54d..b4cae7e1aecf3 100644 --- a/src/interfaces/ecpg/test/thread/thread_implicit.pgc +++ b/src/interfaces/ecpg/test/thread/thread_implicit.pgc @@ -68,7 +68,7 @@ int main() #ifndef WIN32 pthread_create(&threads[n], NULL, test_thread, (void *) (n + 1)); #else - threads[n] = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) test_thread, (void *) (n+1), 0, NULL); + threads[n] = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) (void (*) (void)) test_thread, (void *) (n+1), 0, NULL); #endif } diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile index d4919970f8886..4ac5f4b340f50 100644 --- a/src/interfaces/libpq/Makefile +++ b/src/interfaces/libpq/Makefile @@ -89,6 +89,8 @@ SHLIB_PREREQS = submake-libpgport SHLIB_EXPORTS = exports.txt +PKG_CONFIG_REQUIRES_PRIVATE = libssl libcrypto + all: all-lib # Shared library stuff diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index 724076a3103e5..b0ca37c2ed81b 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -28,6 +28,7 @@ #include "fe-auth.h" #include "libpq-fe.h" #include "libpq-int.h" +#include "lib/stringinfo.h" #include "mb/pg_wchar.h" #include "pg_config_paths.h" #include "port/pg_bswap.h" @@ -3870,23 +3871,30 @@ makeEmptyPGconn(void) #ifdef WIN32 /* - * Make sure socket support is up and running. + * Make sure socket support is up and running in this process. + * + * Note: the Windows documentation says that we should eventually do a + * matching WSACleanup() call, but experience suggests that that is at + * least as likely to cause problems as fix them. So we don't. */ - WSADATA wsaData; + static bool wsastartup_done = false; - if (WSAStartup(MAKEWORD(1, 1), &wsaData)) - return NULL; + if (!wsastartup_done) + { + WSADATA wsaData; + + if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0) + return NULL; + wsastartup_done = true; + } + + /* Forget any earlier error */ WSASetLastError(0); -#endif +#endif /* WIN32 */ conn = (PGconn *) malloc(sizeof(PGconn)); if (conn == NULL) - { -#ifdef WIN32 - WSACleanup(); -#endif return conn; - } /* Zero all pointers and booleans */ MemSet(conn, 0, sizeof(PGconn)); @@ -4079,10 +4087,6 @@ freePGconn(PGconn *conn) termPQExpBuffer(&conn->workBuffer); free(conn); - -#ifdef WIN32 - WSACleanup(); -#endif } /* @@ -5011,8 +5015,6 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options, #endif /* USE_LDAP */ -#define MAXBUFSIZE 256 - /* * parseServiceInfo: if a service name has been given, look it up and absorb * connection options from it into *options. @@ -5099,11 +5101,14 @@ parseServiceFile(const char *serviceFile, PQExpBuffer errorMessage, bool *group_found) { - int linenr = 0, + int result = 0, + linenr = 0, i; FILE *f; - char buf[MAXBUFSIZE], - *line; + char *line; + StringInfoData linebuf; + + *group_found = false; f = fopen(serviceFile, "r"); if (f == NULL) @@ -5113,26 +5118,18 @@ parseServiceFile(const char *serviceFile, return 1; } - while ((line = fgets(buf, sizeof(buf), f)) != NULL) - { - int len; + initStringInfo(&linebuf); + while (pg_get_line_buf(f, &linebuf)) + { linenr++; - if (strlen(line) >= sizeof(buf) - 1) - { - fclose(f); - printfPQExpBuffer(errorMessage, - libpq_gettext("line %d too long in service file \"%s\"\n"), - linenr, - serviceFile); - return 2; - } - /* ignore whitespace at end of line, especially the newline */ - len = strlen(line); - while (len > 0 && isspace((unsigned char) line[len - 1])) - line[--len] = '\0'; + while (linebuf.len > 0 && + isspace((unsigned char) linebuf.data[linebuf.len - 1])) + linebuf.data[--linebuf.len] = '\0'; + + line = linebuf.data; /* ignore leading whitespace too */ while (*line && isspace((unsigned char) line[0])) @@ -5147,9 +5144,8 @@ parseServiceFile(const char *serviceFile, { if (*group_found) { - /* group info already read */ - fclose(f); - return 0; + /* end of desired group reached; return success */ + goto exit; } if (strncmp(line + 1, service, strlen(service)) == 0 && @@ -5178,12 +5174,11 @@ parseServiceFile(const char *serviceFile, switch (rc) { case 0: - fclose(f); - return 0; + goto exit; case 1: case 3: - fclose(f); - return 3; + result = 3; + goto exit; case 2: continue; } @@ -5198,8 +5193,8 @@ parseServiceFile(const char *serviceFile, libpq_gettext("syntax error in service file \"%s\", line %d\n"), serviceFile, linenr); - fclose(f); - return 3; + result = 3; + goto exit; } *val++ = '\0'; @@ -5209,8 +5204,8 @@ parseServiceFile(const char *serviceFile, libpq_gettext("nested service specifications not supported in service file \"%s\", line %d\n"), serviceFile, linenr); - fclose(f); - return 3; + result = 3; + goto exit; } /* @@ -5228,8 +5223,8 @@ parseServiceFile(const char *serviceFile, { printfPQExpBuffer(errorMessage, libpq_gettext("out of memory\n")); - fclose(f); - return 3; + result = 3; + goto exit; } found_keyword = true; break; @@ -5242,16 +5237,18 @@ parseServiceFile(const char *serviceFile, libpq_gettext("syntax error in service file \"%s\", line %d\n"), serviceFile, linenr); - fclose(f); - return 3; + result = 3; + goto exit; } } } } +exit: fclose(f); + pfree(linebuf.data); - return 0; + return result; } diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c index ff840b7730d81..4ffc7f33fb5e7 100644 --- a/src/interfaces/libpq/fe-misc.c +++ b/src/interfaces/libpq/fe-misc.c @@ -668,24 +668,29 @@ pqReadData(PGconn *conn) conn->inBufSize - conn->inEnd); if (nread < 0) { - if (SOCK_ERRNO == EINTR) - goto retry3; - /* Some systems return EAGAIN/EWOULDBLOCK for no data */ + switch (SOCK_ERRNO) + { + case EINTR: + goto retry3; + + /* Some systems return EAGAIN/EWOULDBLOCK for no data */ #ifdef EAGAIN - if (SOCK_ERRNO == EAGAIN) - return someread; + case EAGAIN: + return someread; #endif #if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN)) - if (SOCK_ERRNO == EWOULDBLOCK) - return someread; + case EWOULDBLOCK: + return someread; #endif - /* We might get ECONNRESET here if using TCP and backend died */ -#ifdef ECONNRESET - if (SOCK_ERRNO == ECONNRESET) - goto definitelyFailed; -#endif - /* pqsecure_read set the error message for us */ - return -1; + + /* We might get ECONNRESET etc here if connection failed */ + case ALL_CONNECTION_FAILURE_ERRNOS: + goto definitelyFailed; + + default: + /* pqsecure_read set the error message for us */ + return -1; + } } if (nread > 0) { @@ -758,24 +763,29 @@ pqReadData(PGconn *conn) conn->inBufSize - conn->inEnd); if (nread < 0) { - if (SOCK_ERRNO == EINTR) - goto retry4; - /* Some systems return EAGAIN/EWOULDBLOCK for no data */ + switch (SOCK_ERRNO) + { + case EINTR: + goto retry4; + + /* Some systems return EAGAIN/EWOULDBLOCK for no data */ #ifdef EAGAIN - if (SOCK_ERRNO == EAGAIN) - return 0; + case EAGAIN: + return 0; #endif #if defined(EWOULDBLOCK) && (!defined(EAGAIN) || (EWOULDBLOCK != EAGAIN)) - if (SOCK_ERRNO == EWOULDBLOCK) - return 0; + case EWOULDBLOCK: + return 0; #endif - /* We might get ECONNRESET here if using TCP and backend died */ -#ifdef ECONNRESET - if (SOCK_ERRNO == ECONNRESET) - goto definitelyFailed; -#endif - /* pqsecure_read set the error message for us */ - return -1; + + /* We might get ECONNRESET etc here if connection failed */ + case ALL_CONNECTION_FAILURE_ERRNOS: + goto definitelyFailed; + + default: + /* pqsecure_read set the error message for us */ + return -1; + } } if (nread > 0) { diff --git a/src/interfaces/libpq/fe-secure-gssapi.c b/src/interfaces/libpq/fe-secure-gssapi.c index 1994e9f615068..bfc0f5521468b 100644 --- a/src/interfaces/libpq/fe-secure-gssapi.c +++ b/src/interfaces/libpq/fe-secure-gssapi.c @@ -226,7 +226,7 @@ pg_GSS_write(PGconn *conn, const void *ptr, size_t len) PqGSSSendConsumed += input.length; /* 4 network-order bytes of length, then payload */ - netlen = htonl(output.length); + netlen = pg_hton32(output.length); memcpy(PqGSSSendBuffer + PqGSSSendLength, &netlen, sizeof(uint32)); PqGSSSendLength += sizeof(uint32); @@ -346,7 +346,7 @@ pg_GSS_read(PGconn *conn, void *ptr, size_t len) } /* Decode the packet length and check for overlength packet */ - input.length = ntohl(*(uint32 *) PqGSSRecvBuffer); + input.length = pg_ntoh32(*(uint32 *) PqGSSRecvBuffer); if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)) { @@ -589,7 +589,7 @@ pqsecure_open_gss(PGconn *conn) */ /* Get the length and check for over-length packet */ - input.length = ntohl(*(uint32 *) PqGSSRecvBuffer); + input.length = pg_ntoh32(*(uint32 *) PqGSSRecvBuffer); if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32)) { printfPQExpBuffer(&conn->errorMessage, @@ -688,7 +688,7 @@ pqsecure_open_gss(PGconn *conn) } /* Queue the token for writing */ - netlen = htonl(output.length); + netlen = pg_hton32(output.length); memcpy(PqGSSSendBuffer, (char *) &netlen, sizeof(uint32)); PqGSSSendLength += sizeof(uint32); diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c index 3311fd7a5bdae..97c3805303f54 100644 --- a/src/interfaces/libpq/fe-secure.c +++ b/src/interfaces/libpq/fe-secure.c @@ -261,14 +261,13 @@ pqsecure_raw_read(PGconn *conn, void *ptr, size_t len) /* no error message, caller is expected to retry */ break; -#ifdef ECONNRESET + case EPIPE: case ECONNRESET: printfPQExpBuffer(&conn->errorMessage, libpq_gettext("server closed the connection unexpectedly\n" "\tThis probably means the server terminated abnormally\n" "\tbefore or while processing the request.\n")); break; -#endif default: printfPQExpBuffer(&conn->errorMessage, @@ -374,11 +373,9 @@ pqsecure_raw_write(PGconn *conn, const void *ptr, size_t len) /* Set flag for EPIPE */ REMEMBER_EPIPE(spinfo, true); -#ifdef ECONNRESET /* FALL THRU */ case ECONNRESET: -#endif printfPQExpBuffer(&conn->errorMessage, libpq_gettext("server closed the connection unexpectedly\n" "\tThis probably means the server terminated abnormally\n" diff --git a/src/interfaces/libpq/win32.h b/src/interfaces/libpq/win32.h index c42d7abfe30a4..fcce1e0544a7c 100644 --- a/src/interfaces/libpq/win32.h +++ b/src/interfaces/libpq/win32.h @@ -14,17 +14,6 @@ #define write(a,b,c) _write(a,b,c) #undef EAGAIN /* doesn't apply on sockets */ -#undef EINTR -#define EINTR WSAEINTR -#ifndef EWOULDBLOCK -#define EWOULDBLOCK WSAEWOULDBLOCK -#endif -#ifndef ECONNRESET -#define ECONNRESET WSAECONNRESET -#endif -#ifndef EINPROGRESS -#define EINPROGRESS WSAEINPROGRESS -#endif /* * support for handling Windows Socket errors diff --git a/src/pl/plperl/expected/plperl_call.out b/src/pl/plperl/expected/plperl_call.out index c55c59cbceb3b..a08b9ff795c64 100644 --- a/src/pl/plperl/expected/plperl_call.out +++ b/src/pl/plperl/expected/plperl_call.out @@ -48,6 +48,24 @@ CALL test_proc6(2, 3, 4); 6 | 8 (1 row) +-- OUT parameters +CREATE PROCEDURE test_proc9(IN a int, OUT b int) +LANGUAGE plperl +AS $$ +my ($a, $b) = @_; +elog(NOTICE, "a: $a, b: $b"); +return { b => $a * 2 }; +$$; +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc9(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; +NOTICE: a: 10, b: +NOTICE: _a: 10, _b: 20 DROP PROCEDURE test_proc1; DROP PROCEDURE test_proc2; DROP PROCEDURE test_proc3; diff --git a/src/pl/plperl/plperl.h b/src/pl/plperl/plperl.h index a9449d9d8a4fe..619e7121a1bfe 100644 --- a/src/pl/plperl/plperl.h +++ b/src/pl/plperl/plperl.h @@ -92,6 +92,7 @@ #undef bind #undef connect #undef fopen +#undef fstat #undef kill #undef listen #undef lstat diff --git a/src/pl/plperl/plperl_opmask.pl b/src/pl/plperl/plperl_opmask.pl index 3b33112ff9456..ee18e915289bf 100644 --- a/src/pl/plperl/plperl_opmask.pl +++ b/src/pl/plperl/plperl_opmask.pl @@ -52,7 +52,7 @@ printf $fh qq{ opmask[OP_%-12s] = 0;\t/* %s */ \\\n}, uc($opname), opdesc($opname); } -printf $fh " /* end */ \n"; +printf $fh " /* end */\n"; close $fh or die "Error closing $plperl_opmask_tmp: $!"; diff --git a/src/pl/plperl/sql/plperl_call.sql b/src/pl/plperl/sql/plperl_call.sql index 2cf5461fefde6..bbea85fc9f501 100644 --- a/src/pl/plperl/sql/plperl_call.sql +++ b/src/pl/plperl/sql/plperl_call.sql @@ -51,6 +51,26 @@ $$; CALL test_proc6(2, 3, 4); +-- OUT parameters + +CREATE PROCEDURE test_proc9(IN a int, OUT b int) +LANGUAGE plperl +AS $$ +my ($a, $b) = @_; +elog(NOTICE, "a: $a, b: $b"); +return { b => $a * 2 }; +$$; + +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc9(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; + + DROP PROCEDURE test_proc1; DROP PROCEDURE test_proc2; DROP PROCEDURE test_proc3; diff --git a/src/pl/plpgsql/src/expected/plpgsql_call.out b/src/pl/plpgsql/src/expected/plpgsql_call.out index d9c88e85c8d87..973857161105a 100644 --- a/src/pl/plpgsql/src/expected/plpgsql_call.out +++ b/src/pl/plpgsql/src/expected/plpgsql_call.out @@ -264,6 +264,25 @@ END $$; ERROR: procedure parameter "c" is an output parameter but corresponding argument is not writable CONTEXT: PL/pgSQL function inline_code_block line 5 at CALL +-- OUT parameters +CREATE PROCEDURE test_proc9(IN a int, OUT b int) +LANGUAGE plpgsql +AS $$ +BEGIN + RAISE NOTICE 'a: %, b: %', a, b; + b := a * 2; +END; +$$; +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc9(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; +NOTICE: a: 10, b: +NOTICE: _a: 10, _b: 20 -- transition variable assignment TRUNCATE test1; CREATE FUNCTION triggerfunc1() RETURNS trigger diff --git a/src/pl/plpgsql/src/generate-plerrcodes.pl b/src/pl/plpgsql/src/generate-plerrcodes.pl index f74dd0ef03f8c..a50de66ef813f 100644 --- a/src/pl/plpgsql/src/generate-plerrcodes.pl +++ b/src/pl/plpgsql/src/generate-plerrcodes.pl @@ -3,8 +3,8 @@ # Generate the plerrcodes.h header from errcodes.txt # Copyright (c) 2000-2020, PostgreSQL Global Development Group -use warnings; use strict; +use warnings; print "/* autogenerated from src/backend/utils/errcodes.txt, do not edit */\n"; @@ -34,7 +34,7 @@ # Skip lines without PL/pgSQL condition names next unless defined($condition_name); - print "{\n\t\"$condition_name\", $errcode_macro\n},\n\n"; + print "\n{\n\t\"$condition_name\", $errcode_macro\n},\n"; } close $errcodes; diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index e7f4a5f291d17..344627da956f8 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -458,6 +458,7 @@ do_compile(FunctionCallInfo fcinfo, /* Remember arguments in appropriate arrays */ if (argmode == PROARGMODE_IN || argmode == PROARGMODE_INOUT || + (argmode == PROARGMODE_OUT && function->fn_prokind == PROKIND_PROCEDURE) || argmode == PROARGMODE_VARIADIC) in_arg_varnos[num_in_args++] = argvariable->dno; if (argmode == PROARGMODE_OUT || diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index d4a3d58daa9e0..ccbc50fc457ce 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -2145,40 +2145,60 @@ exec_stmt_perform(PLpgSQL_execstate *estate, PLpgSQL_stmt_perform *stmt) /* * exec_stmt_call + * + * NOTE: this is used for both CALL and DO statements. */ static int exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) { PLpgSQL_expr *expr = stmt->expr; + SPIPlanPtr orig_plan = expr->plan; + bool local_plan; + PLpgSQL_variable *volatile cur_target = stmt->target; volatile LocalTransactionId before_lxid; LocalTransactionId after_lxid; volatile bool pushed_active_snap = false; volatile int rc; + /* + * If not in atomic context, we make a local plan that we'll just use for + * this invocation, and will free at the end. Otherwise, transaction ends + * would cause errors about plancache leaks. + * + * XXX This would be fixable with some plancache/resowner surgery + * elsewhere, but for now we'll just work around this here. + */ + local_plan = !estate->atomic; + /* PG_TRY to ensure we clear the plan link, if needed, on failure */ PG_TRY(); { SPIPlanPtr plan = expr->plan; ParamListInfo paramLI; - if (plan == NULL) + /* + * Make a plan if we don't have one, or if we need a local one. Note + * that we'll overwrite expr->plan either way; the PG_TRY block will + * ensure we undo that on the way out, if the plan is local. + */ + if (plan == NULL || local_plan) { + /* Don't let SPI save the plan if it's going to be local */ + exec_prepare_plan(estate, expr, 0, !local_plan); + plan = expr->plan; /* - * Don't save the plan if not in atomic context. Otherwise, - * transaction ends would cause errors about plancache leaks. - * - * XXX This would be fixable with some plancache/resowner surgery - * elsewhere, but for now we'll just work around this here. + * A CALL or DO can never be a simple expression. (If it could + * be, we'd have to worry about saving/restoring the previous + * values of the related expr fields, not just expr->plan.) */ - exec_prepare_plan(estate, expr, 0, estate->atomic); + Assert(!expr->expr_simple_expr); /* * The procedure call could end transactions, which would upset * the snapshot management in SPI_execute*, so don't let it do it. * Instead, we set the snapshots ourselves below. */ - plan = expr->plan; plan->no_snapshots = true; /* @@ -2186,14 +2206,21 @@ exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) * case the procedure's argument list has changed. */ stmt->target = NULL; + cur_target = NULL; } /* * We construct a DTYPE_ROW datum representing the plpgsql variables * associated with the procedure's output arguments. Then we can use * exec_move_row() to do the assignments. + * + * If we're using a local plan, also make a local target; otherwise, + * since the above code will force a new plan each time through, we'd + * repeatedly leak the memory for the target. (Note: we also leak the + * target when a plan change is forced, but that isn't so likely to + * cause excessive memory leaks.) */ - if (stmt->is_call && stmt->target == NULL) + if (stmt->is_call && cur_target == NULL) { Node *node; FuncExpr *funcexpr; @@ -2208,6 +2235,9 @@ exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) int i; ListCell *lc; + /* Use eval_mcontext for any cruft accumulated here */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); + /* * Get the parsed CallStmt, and look up the called procedure */ @@ -2239,9 +2269,11 @@ exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) ReleaseSysCache(func_tuple); /* - * Begin constructing row Datum + * Begin constructing row Datum; keep it in fn_cxt if it's to be + * long-lived. */ - oldcontext = MemoryContextSwitchTo(estate->func->fn_cxt); + if (!local_plan) + MemoryContextSwitchTo(estate->func->fn_cxt); row = (PLpgSQL_row *) palloc0(sizeof(PLpgSQL_row)); row->dtype = PLPGSQL_DTYPE_ROW; @@ -2249,7 +2281,8 @@ exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) row->lineno = -1; row->varnos = (int *) palloc(sizeof(int) * list_length(funcargs)); - MemoryContextSwitchTo(oldcontext); + if (!local_plan) + MemoryContextSwitchTo(get_eval_mcontext(estate)); /* * Examine procedure's argument list. Each output arg position @@ -2293,7 +2326,13 @@ exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) row->nfields = nfields; - stmt->target = (PLpgSQL_variable *) row; + cur_target = (PLpgSQL_variable *) row; + + /* We can save and re-use the target datum, if it's not local */ + if (!local_plan) + stmt->target = cur_target; + + MemoryContextSwitchTo(oldcontext); } paramLI = setup_param_list(estate, expr); @@ -2316,17 +2355,27 @@ exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) PG_CATCH(); { /* - * If we aren't saving the plan, unset the pointer. Note that it - * could have been unset already, in case of a recursive call. + * If we are using a local plan, restore the old plan link. */ - if (expr->plan && !expr->plan->saved) - expr->plan = NULL; + if (local_plan) + expr->plan = orig_plan; PG_RE_THROW(); } PG_END_TRY(); - if (expr->plan && !expr->plan->saved) - expr->plan = NULL; + /* + * If we are using a local plan, restore the old plan link; then free the + * local plan to avoid memory leaks. (Note that the error exit path above + * just clears the link without risking calling SPI_freeplan; we expect + * that xact cleanup will take care of the mess in that case.) + */ + if (local_plan) + { + SPIPlanPtr plan = expr->plan; + + expr->plan = orig_plan; + SPI_freeplan(plan); + } if (rc < 0) elog(ERROR, "SPI_execute_plan_with_paramlist failed executing query \"%s\": %s", @@ -2363,10 +2412,10 @@ exec_stmt_call(PLpgSQL_execstate *estate, PLpgSQL_stmt_call *stmt) { SPITupleTable *tuptab = SPI_tuptable; - if (!stmt->target) + if (!cur_target) elog(ERROR, "DO statement returned a row"); - exec_move_row(estate, stmt->target, tuptab->vals[0], tuptab->tupdesc); + exec_move_row(estate, cur_target, tuptab->vals[0], tuptab->tupdesc); } else if (SPI_processed > 1) elog(ERROR, "procedure call returned more than one row"); diff --git a/src/pl/plpgsql/src/sql/plpgsql_call.sql b/src/pl/plpgsql/src/sql/plpgsql_call.sql index 4702bd14d12e8..d506809ddbf63 100644 --- a/src/pl/plpgsql/src/sql/plpgsql_call.sql +++ b/src/pl/plpgsql/src/sql/plpgsql_call.sql @@ -237,6 +237,27 @@ END $$; +-- OUT parameters + +CREATE PROCEDURE test_proc9(IN a int, OUT b int) +LANGUAGE plpgsql +AS $$ +BEGIN + RAISE NOTICE 'a: %, b: %', a, b; + b := a * 2; +END; +$$; + +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc9(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; + + -- transition variable assignment TRUNCATE test1; diff --git a/src/pl/plpython/expected/plpython_call.out b/src/pl/plpython/expected/plpython_call.out index 07ae04e98ba22..c3f3c8e95e573 100644 --- a/src/pl/plpython/expected/plpython_call.out +++ b/src/pl/plpython/expected/plpython_call.out @@ -52,6 +52,23 @@ CALL test_proc6(2, 3, 4); 6 | 8 (1 row) +-- OUT parameters +CREATE PROCEDURE test_proc9(IN a int, OUT b int) +LANGUAGE plpythonu +AS $$ +plpy.notice("a: %s, b: %s" % (a, b)) +return (a * 2,) +$$; +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc9(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; +NOTICE: a: 10, b: None +NOTICE: _a: 10, _b: 20 DROP PROCEDURE test_proc1; DROP PROCEDURE test_proc2; DROP PROCEDURE test_proc3; diff --git a/src/pl/plpython/generate-spiexceptions.pl b/src/pl/plpython/generate-spiexceptions.pl index d7afb8516dedb..14967ba3eef6a 100644 --- a/src/pl/plpython/generate-spiexceptions.pl +++ b/src/pl/plpython/generate-spiexceptions.pl @@ -3,8 +3,8 @@ # Generate the spiexceptions.h header from errcodes.txt # Copyright (c) 2000-2020, PostgreSQL Global Development Group -use warnings; use strict; +use warnings; print "/* autogenerated from src/backend/utils/errcodes.txt, do not edit */\n"; @@ -37,8 +37,8 @@ # Change some_error_condition to SomeErrorCondition $condition_name =~ s/([a-z])([^_]*)(?:_|$)/\u$1$2/g; - print "{ \"spiexceptions.$condition_name\", " - . "\"$condition_name\", $errcode_macro },\n"; + print "\n{\n\t\"spiexceptions.$condition_name\", " + . "\"$condition_name\", $errcode_macro\n},\n"; } close $errcodes; diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c index ae0b97c85d3f2..224b8836fba90 100644 --- a/src/pl/plpython/plpy_elog.c +++ b/src/pl/plpython/plpy_elog.c @@ -216,7 +216,7 @@ PLy_traceback(PyObject *e, PyObject *v, PyObject *tb, else if (strcmp(e_module_s, "builtins") == 0 || strcmp(e_module_s, "__main__") == 0 || strcmp(e_module_s, "exceptions") == 0) - appendStringInfo(&xstr, "%s", e_type_s); + appendStringInfoString(&xstr, e_type_s); else appendStringInfo(&xstr, "%s.%s", e_module_s, e_type_s); appendStringInfo(&xstr, ": %s", vstr); diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c index 9e15839611159..ec47f52e61d64 100644 --- a/src/pl/plpython/plpy_procedure.c +++ b/src/pl/plpython/plpy_procedure.c @@ -273,7 +273,7 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) /* proc->nargs was initialized to 0 above */ for (i = 0; i < total; i++) { - if (modes[i] != PROARGMODE_OUT && + if ((modes[i] != PROARGMODE_OUT || proc->is_procedure) && modes[i] != PROARGMODE_TABLE) (proc->nargs)++; } @@ -289,7 +289,7 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) Form_pg_type argTypeStruct; if (modes && - (modes[i] == PROARGMODE_OUT || + ((modes[i] == PROARGMODE_OUT && !proc->is_procedure) || modes[i] == PROARGMODE_TABLE)) continue; /* skip OUT arguments */ diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c index 7c844c2b8a10c..b4aeb7fd59598 100644 --- a/src/pl/plpython/plpy_typeio.c +++ b/src/pl/plpython/plpy_typeio.c @@ -679,7 +679,7 @@ PLyList_FromArray(PLyDatumToOb *arg, Datum d) /* Array dimensions and left bounds */ ndim = ARR_NDIM(array); dims = ARR_DIMS(array); - Assert(ndim < MAXDIM); + Assert(ndim <= MAXDIM); /* * We iterate the SQL array in the physical order it's stored in the @@ -1173,18 +1173,25 @@ PLySequence_ToArray(PLyObToDatum *arg, PyObject *plrv, break; if (ndim == MAXDIM) - PLy_elog(ERROR, "number of array dimensions exceeds the maximum allowed (%d)", MAXDIM); + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("number of array dimensions exceeds the maximum allowed (%d)", + MAXDIM))); dims[ndim] = PySequence_Length(pyptr); if (dims[ndim] < 0) PLy_elog(ERROR, "could not determine sequence length for function return value"); if (dims[ndim] > MaxAllocSize) - PLy_elog(ERROR, "array size exceeds the maximum allowed"); + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("array size exceeds the maximum allowed"))); len *= dims[ndim]; if (len > MaxAllocSize) - PLy_elog(ERROR, "array size exceeds the maximum allowed"); + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("array size exceeds the maximum allowed"))); if (dims[ndim] == 0) { @@ -1210,7 +1217,9 @@ PLySequence_ToArray(PLyObToDatum *arg, PyObject *plrv, if (ndim == 0) { if (!PySequence_Check(plrv)) - PLy_elog(ERROR, "return value of function with array return type is not a Python sequence"); + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("return value of function with array return type is not a Python sequence"))); ndim = 1; len = dims[0] = PySequence_Length(plrv); @@ -1256,7 +1265,8 @@ PLySequence_ToArray_recurse(PLyObToDatum *elm, PyObject *list, if (PySequence_Length(list) != dims[dim]) ereport(ERROR, - (errmsg("wrong length of inner sequence: has length %d, but %d was expected", + (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), + errmsg("wrong length of inner sequence: has length %d, but %d was expected", (int) PySequence_Length(list), dims[dim]), (errdetail("To construct a multidimensional array, the inner sequences must all have the same length.")))); diff --git a/src/pl/plpython/sql/plpython_call.sql b/src/pl/plpython/sql/plpython_call.sql index 2f792f92bd789..46e89b1a9e1c6 100644 --- a/src/pl/plpython/sql/plpython_call.sql +++ b/src/pl/plpython/sql/plpython_call.sql @@ -54,6 +54,25 @@ $$; CALL test_proc6(2, 3, 4); +-- OUT parameters + +CREATE PROCEDURE test_proc9(IN a int, OUT b int) +LANGUAGE plpythonu +AS $$ +plpy.notice("a: %s, b: %s" % (a, b)) +return (a * 2,) +$$; + +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc9(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; + + DROP PROCEDURE test_proc1; DROP PROCEDURE test_proc2; DROP PROCEDURE test_proc3; diff --git a/src/pl/tcl/expected/pltcl_call.out b/src/pl/tcl/expected/pltcl_call.out index d290c8fbd05c3..f0eb356cf23ac 100644 --- a/src/pl/tcl/expected/pltcl_call.out +++ b/src/pl/tcl/expected/pltcl_call.out @@ -49,6 +49,23 @@ CALL test_proc6(2, 3, 4); 6 | 8 (1 row) +-- OUT parameters +CREATE PROCEDURE test_proc9(IN a int, OUT b int) +LANGUAGE pltcl +AS $$ +elog NOTICE "a: $1, b: $2" +return [list b [expr {$1 * 2}]] +$$; +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc9(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; +NOTICE: a: 10, b: +NOTICE: _a: 10, _b: 20 DROP PROCEDURE test_proc1; DROP PROCEDURE test_proc2; DROP PROCEDURE test_proc3; diff --git a/src/pl/tcl/generate-pltclerrcodes.pl b/src/pl/tcl/generate-pltclerrcodes.pl index 59ea2baab7edf..bb9eb8a824dc0 100644 --- a/src/pl/tcl/generate-pltclerrcodes.pl +++ b/src/pl/tcl/generate-pltclerrcodes.pl @@ -3,8 +3,8 @@ # Generate the pltclerrcodes.h header from errcodes.txt # Copyright (c) 2000-2020, PostgreSQL Global Development Group -use warnings; use strict; +use warnings; print "/* autogenerated from src/backend/utils/errcodes.txt, do not edit */\n"; @@ -34,7 +34,7 @@ # Skip lines without PL/pgSQL condition names next unless defined($condition_name); - print "{\n\t\"$condition_name\", $errcode_macro\n},\n\n"; + print "\n{\n\t\"$condition_name\", $errcode_macro\n},\n"; } close $errcodes; diff --git a/src/pl/tcl/sql/pltcl_call.sql b/src/pl/tcl/sql/pltcl_call.sql index 95791d08beea6..963277e1fb87b 100644 --- a/src/pl/tcl/sql/pltcl_call.sql +++ b/src/pl/tcl/sql/pltcl_call.sql @@ -52,6 +52,25 @@ $$; CALL test_proc6(2, 3, 4); +-- OUT parameters + +CREATE PROCEDURE test_proc9(IN a int, OUT b int) +LANGUAGE pltcl +AS $$ +elog NOTICE "a: $1, b: $2" +return [list b [expr {$1 * 2}]] +$$; + +DO $$ +DECLARE _a int; _b int; +BEGIN + _a := 10; _b := 30; + CALL test_proc9(_a, _b); + RAISE NOTICE '_a: %, _b: %', _a, _b; +END +$$; + + DROP PROCEDURE test_proc1; DROP PROCEDURE test_proc2; DROP PROCEDURE test_proc3; diff --git a/src/port/dirmod.c b/src/port/dirmod.c index e22a41c77e1ec..8979f100803bf 100644 --- a/src/port/dirmod.c +++ b/src/port/dirmod.c @@ -353,55 +353,3 @@ pgwin32_is_junction(const char *path) return ((attr & FILE_ATTRIBUTE_REPARSE_POINT) == FILE_ATTRIBUTE_REPARSE_POINT); } #endif /* defined(WIN32) && !defined(__CYGWIN__) */ - - -#if defined(WIN32) && !defined(__CYGWIN__) - -#undef stat - -/* - * The stat() function in win32 is not guaranteed to update the st_size - * field when run. So we define our own version that uses the Win32 API - * to update this field. - */ -int -pgwin32_safestat(const char *path, struct stat *buf) -{ - int r; - WIN32_FILE_ATTRIBUTE_DATA attr; - - r = stat(path, buf); - if (r < 0) - { - if (GetLastError() == ERROR_DELETE_PENDING) - { - /* - * File has been deleted, but is not gone from the filesystem yet. - * This can happen when some process with FILE_SHARE_DELETE has it - * open and it will be fully removed once that handle is closed. - * Meanwhile, we can't open it, so indicate that the file just - * doesn't exist. - */ - errno = ENOENT; - return -1; - } - - return r; - } - - if (!GetFileAttributesEx(path, GetFileExInfoStandard, &attr)) - { - _dosmaperr(GetLastError()); - return -1; - } - - /* - * XXX no support for large files here, but we don't do that in general on - * Win32 yet. - */ - buf->st_size = attr.nFileSizeLow; - - return 0; -} - -#endif diff --git a/src/port/getaddrinfo.c b/src/port/getaddrinfo.c index 3b51eea4815e2..495ad343f392d 100644 --- a/src/port/getaddrinfo.c +++ b/src/port/getaddrinfo.c @@ -79,12 +79,12 @@ haveNativeWindowsIPv6routines(void) { /* We found a dll, so now get the addresses of the routines */ - getaddrinfo_ptr = (getaddrinfo_ptr_t) GetProcAddress(hLibrary, - "getaddrinfo"); - freeaddrinfo_ptr = (freeaddrinfo_ptr_t) GetProcAddress(hLibrary, - "freeaddrinfo"); - getnameinfo_ptr = (getnameinfo_ptr_t) GetProcAddress(hLibrary, - "getnameinfo"); + getaddrinfo_ptr = (getaddrinfo_ptr_t) (pg_funcptr_t) GetProcAddress(hLibrary, + "getaddrinfo"); + freeaddrinfo_ptr = (freeaddrinfo_ptr_t) (pg_funcptr_t) GetProcAddress(hLibrary, + "freeaddrinfo"); + getnameinfo_ptr = (getnameinfo_ptr_t) (pg_funcptr_t) GetProcAddress(hLibrary, + "getnameinfo"); /* * If any one of the routines is missing, let's play it safe and diff --git a/src/port/strerror.c b/src/port/strerror.c index 375edb0f5abde..127bc5ef1fee7 100644 --- a/src/port/strerror.c +++ b/src/port/strerror.c @@ -118,14 +118,10 @@ get_errno_symbol(int errnum) return "E2BIG"; case EACCES: return "EACCES"; -#ifdef EADDRINUSE case EADDRINUSE: return "EADDRINUSE"; -#endif -#ifdef EADDRNOTAVAIL case EADDRNOTAVAIL: return "EADDRNOTAVAIL"; -#endif case EAFNOSUPPORT: return "EAFNOSUPPORT"; #ifdef EAGAIN @@ -146,16 +142,12 @@ get_errno_symbol(int errnum) return "EBUSY"; case ECHILD: return "ECHILD"; -#ifdef ECONNABORTED case ECONNABORTED: return "ECONNABORTED"; -#endif case ECONNREFUSED: return "ECONNREFUSED"; -#ifdef ECONNRESET case ECONNRESET: return "ECONNRESET"; -#endif case EDEADLK: return "EDEADLK"; case EDOM: @@ -166,10 +158,10 @@ get_errno_symbol(int errnum) return "EFAULT"; case EFBIG: return "EFBIG"; -#ifdef EHOSTUNREACH + case EHOSTDOWN: + return "EHOSTDOWN"; case EHOSTUNREACH: return "EHOSTUNREACH"; -#endif case EIDRM: return "EIDRM"; case EINPROGRESS: @@ -180,10 +172,8 @@ get_errno_symbol(int errnum) return "EINVAL"; case EIO: return "EIO"; -#ifdef EISCONN case EISCONN: return "EISCONN"; -#endif case EISDIR: return "EISDIR"; #ifdef ELOOP @@ -198,6 +188,12 @@ get_errno_symbol(int errnum) return "EMSGSIZE"; case ENAMETOOLONG: return "ENAMETOOLONG"; + case ENETDOWN: + return "ENETDOWN"; + case ENETRESET: + return "ENETRESET"; + case ENETUNREACH: + return "ENETUNREACH"; case ENFILE: return "ENFILE"; case ENOBUFS: @@ -214,20 +210,16 @@ get_errno_symbol(int errnum) return "ENOSPC"; case ENOSYS: return "ENOSYS"; -#ifdef ENOTCONN case ENOTCONN: return "ENOTCONN"; -#endif case ENOTDIR: return "ENOTDIR"; #if defined(ENOTEMPTY) && (ENOTEMPTY != EEXIST) /* same code on AIX */ case ENOTEMPTY: return "ENOTEMPTY"; #endif -#ifdef ENOTSOCK case ENOTSOCK: return "ENOTSOCK"; -#endif #ifdef ENOTSUP case ENOTSUP: return "ENOTSUP"; diff --git a/src/port/thread.c b/src/port/thread.c index 171df0f9e381d..0941cb6a88f4f 100644 --- a/src/port/thread.c +++ b/src/port/thread.c @@ -47,9 +47,6 @@ * use non-*_r functions if they are thread-safe * * One thread-safe solution for gethostbyname() might be to use getaddrinfo(). - * - * Run src/test/thread to test if your operating system has thread-safe - * non-*_r functions. */ diff --git a/src/port/win32env.c b/src/port/win32env.c index 2021f3d5aa6e6..177488cc67ea9 100644 --- a/src/port/win32env.c +++ b/src/port/win32env.c @@ -95,7 +95,7 @@ pgwin32_putenv(const char *envval) { PUTENVPROC putenvFunc; - putenvFunc = (PUTENVPROC) GetProcAddress(hmodule, "_putenv"); + putenvFunc = (PUTENVPROC) (pg_funcptr_t) GetProcAddress(hmodule, "_putenv"); if (putenvFunc) putenvFunc(envval); FreeLibrary(hmodule); diff --git a/src/port/win32stat.c b/src/port/win32stat.c new file mode 100644 index 0000000000000..4351aa4d08f25 --- /dev/null +++ b/src/port/win32stat.c @@ -0,0 +1,307 @@ +/*------------------------------------------------------------------------- + * + * win32stat.c + * Replacements for functions using GetFileInformationByHandle + * + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * src/port/win32stat.c + * + *------------------------------------------------------------------------- + */ + +#ifdef WIN32 + +#include "c.h" +#include + +/* + * In order to support MinGW and MSVC2013 we use NtQueryInformationFile as an + * alternative for GetFileInformationByHandleEx. It is loaded from the ntdll + * library. + */ +#if _WIN32_WINNT < 0x0600 +#include + +#if !defined(__MINGW32__) && !defined(__MINGW64__) +/* MinGW includes this in , but it is missing in MSVC */ +typedef struct _FILE_STANDARD_INFORMATION +{ + LARGE_INTEGER AllocationSize; + LARGE_INTEGER EndOfFile; + ULONG NumberOfLinks; + BOOLEAN DeletePending; + BOOLEAN Directory; +} FILE_STANDARD_INFORMATION; +#define FileStandardInformation 5 +#endif /* !defined(__MINGW32__) && + * !defined(__MINGW64__) */ + +typedef NTSTATUS (NTAPI * PFN_NTQUERYINFORMATIONFILE) + (IN HANDLE FileHandle, + OUT PIO_STATUS_BLOCK IoStatusBlock, + OUT PVOID FileInformation, + IN ULONG Length, + IN FILE_INFORMATION_CLASS FileInformationClass); + +static PFN_NTQUERYINFORMATIONFILE _NtQueryInformationFile = NULL; + +static HMODULE ntdll = NULL; + +/* + * Load DLL file just once regardless of how many functions we load/call in it. + */ +static void +LoadNtdll(void) +{ + if (ntdll != NULL) + return; + ntdll = LoadLibraryEx("ntdll.dll", NULL, 0); +} + +#endif /* _WIN32_WINNT < 0x0600 */ + + +/* + * Convert a FILETIME struct into a 64 bit time_t. + */ +static __time64_t +filetime_to_time(const FILETIME *ft) +{ + ULARGE_INTEGER unified_ft = {0}; + static const uint64 EpochShift = UINT64CONST(116444736000000000); + + unified_ft.LowPart = ft->dwLowDateTime; + unified_ft.HighPart = ft->dwHighDateTime; + + if (unified_ft.QuadPart < EpochShift) + return -1; + + unified_ft.QuadPart -= EpochShift; + unified_ft.QuadPart /= 10 * 1000 * 1000; + + return unified_ft.QuadPart; +} + +/* + * Convert WIN32 file attributes to a Unix-style mode. + * + * Only owner permissions are set. + */ +static unsigned short +fileattr_to_unixmode(int attr) +{ + unsigned short uxmode = 0; + + uxmode |= (unsigned short) ((attr & FILE_ATTRIBUTE_DIRECTORY) ? + (_S_IFDIR) : (_S_IFREG)); + + uxmode |= (unsigned short) ((attr & FILE_ATTRIBUTE_READONLY) ? + (_S_IREAD) : (_S_IREAD | _S_IWRITE)); + + /* there is no need to simulate _S_IEXEC using CMD's PATHEXT extensions */ + uxmode |= _S_IEXEC; + + return uxmode; +} + +/* + * Convert WIN32 file information (from a HANDLE) to a struct stat. + */ +static int +fileinfo_to_stat(HANDLE hFile, struct stat *buf) +{ + BY_HANDLE_FILE_INFORMATION fiData; + + memset(buf, 0, sizeof(*buf)); + + /* + * GetFileInformationByHandle minimum supported version: Windows XP and + * Windows Server 2003, so it exists everywhere we care about. + */ + if (!GetFileInformationByHandle(hFile, &fiData)) + { + _dosmaperr(GetLastError()); + return -1; + } + + if (fiData.ftLastWriteTime.dwLowDateTime || + fiData.ftLastWriteTime.dwHighDateTime) + buf->st_mtime = filetime_to_time(&fiData.ftLastWriteTime); + + if (fiData.ftLastAccessTime.dwLowDateTime || + fiData.ftLastAccessTime.dwHighDateTime) + buf->st_atime = filetime_to_time(&fiData.ftLastAccessTime); + else + buf->st_atime = buf->st_mtime; + + if (fiData.ftCreationTime.dwLowDateTime || + fiData.ftCreationTime.dwHighDateTime) + buf->st_ctime = filetime_to_time(&fiData.ftCreationTime); + else + buf->st_ctime = buf->st_mtime; + + buf->st_mode = fileattr_to_unixmode(fiData.dwFileAttributes); + buf->st_nlink = fiData.nNumberOfLinks; + + buf->st_size = ((((uint64) fiData.nFileSizeHigh) << 32) | + fiData.nFileSizeLow); + + return 0; +} + +/* + * Windows implementation of stat(). + * + * This currently also implements lstat(), though perhaps that should change. + */ +int +_pgstat64(const char *name, struct stat *buf) +{ + /* + * We must use a handle so lstat() returns the information of the target + * file. To have a reliable test for ERROR_DELETE_PENDING, we use + * NtQueryInformationFile from Windows 2000 or + * GetFileInformationByHandleEx from Server 2008 / Vista. + */ + SECURITY_ATTRIBUTES sa; + HANDLE hFile; + int ret; +#if _WIN32_WINNT < 0x0600 + IO_STATUS_BLOCK ioStatus; + FILE_STANDARD_INFORMATION standardInfo; +#else + FILE_STANDARD_INFO standardInfo; +#endif + + if (name == NULL || buf == NULL) + { + errno = EINVAL; + return -1; + } + + /* fast not-exists check */ + if (GetFileAttributes(name) == INVALID_FILE_ATTRIBUTES) + { + _dosmaperr(GetLastError()); + return -1; + } + + /* get a file handle as lightweight as we can */ + sa.nLength = sizeof(SECURITY_ATTRIBUTES); + sa.bInheritHandle = TRUE; + sa.lpSecurityDescriptor = NULL; + hFile = CreateFile(name, + GENERIC_READ, + (FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE), + &sa, + OPEN_EXISTING, + (FILE_FLAG_NO_BUFFERING | FILE_FLAG_BACKUP_SEMANTICS | + FILE_FLAG_OVERLAPPED), + NULL); + if (hFile == INVALID_HANDLE_VALUE) + { + DWORD err = GetLastError(); + + CloseHandle(hFile); + _dosmaperr(err); + return -1; + } + + memset(&standardInfo, 0, sizeof(standardInfo)); + +#if _WIN32_WINNT < 0x0600 + if (_NtQueryInformationFile == NULL) + { + /* First time through: load ntdll.dll and find NtQueryInformationFile */ + LoadNtdll(); + if (ntdll == NULL) + { + DWORD err = GetLastError(); + + CloseHandle(hFile); + _dosmaperr(err); + return -1; + } + + _NtQueryInformationFile = (PFN_NTQUERYINFORMATIONFILE) (pg_funcptr_t) + GetProcAddress(ntdll, "NtQueryInformationFile"); + if (_NtQueryInformationFile == NULL) + { + DWORD err = GetLastError(); + + CloseHandle(hFile); + _dosmaperr(err); + return -1; + } + } + + if (!NT_SUCCESS(_NtQueryInformationFile(hFile, &ioStatus, &standardInfo, + sizeof(standardInfo), + FileStandardInformation))) + { + DWORD err = GetLastError(); + + CloseHandle(hFile); + _dosmaperr(err); + return -1; + } +#else + if (!GetFileInformationByHandleEx(hFile, FileStandardInfo, &standardInfo, + sizeof(standardInfo))) + { + DWORD err = GetLastError(); + + CloseHandle(hFile); + _dosmaperr(err); + return -1; + } +#endif /* _WIN32_WINNT < 0x0600 */ + + if (standardInfo.DeletePending) + { + /* + * File has been deleted, but is not gone from the filesystem yet. + * This can happen when some process with FILE_SHARE_DELETE has it + * open, and it will be fully removed once that handle is closed. + * Meanwhile, we can't open it, so indicate that the file just doesn't + * exist. + */ + CloseHandle(hFile); + errno = ENOENT; + return -1; + } + + /* At last we can invoke fileinfo_to_stat */ + ret = fileinfo_to_stat(hFile, buf); + + CloseHandle(hFile); + return ret; +} + +/* + * Windows implementation of fstat(). + */ +int +_pgfstat64(int fileno, struct stat *buf) +{ + HANDLE hFile = (HANDLE) _get_osfhandle(fileno); + + if (hFile == INVALID_HANDLE_VALUE || buf == NULL) + { + errno = EINVAL; + return -1; + } + + /* + * Since we already have a file handle there is no need to check for + * ERROR_DELETE_PENDING. + */ + + return fileinfo_to_stat(hFile, buf); +} + +#endif /* WIN32 */ diff --git a/src/test/Makefile b/src/test/Makefile index efb206aa75014..9774f534d93f8 100644 --- a/src/test/Makefile +++ b/src/test/Makefile @@ -37,7 +37,7 @@ endif # clean" etc to recurse into them. (We must filter out those that we # have conditionally included into SUBDIRS above, else there will be # make confusion.) -ALWAYS_SUBDIRS = $(filter-out $(SUBDIRS),examples kerberos ldap locale thread ssl) +ALWAYS_SUBDIRS = $(filter-out $(SUBDIRS),examples kerberos ldap locale ssl) # We want to recurse to all subdirs for all standard targets, except that # installcheck and install should not recurse into the subdirectory "modules". diff --git a/src/test/README b/src/test/README index b5ccfc0cf6730..afdc767651907 100644 --- a/src/test/README +++ b/src/test/README @@ -9,7 +9,7 @@ Not all these tests get run by "make check". Check src/test/Makefile to see which tests get run automatically. authentication/ - Tests for authentication + Tests for authentication (but see also below) examples/ Demonstration programs for libpq that double as regression tests via @@ -18,6 +18,12 @@ examples/ isolation/ Tests for concurrent behavior at the SQL level +kerberos/ + Tests for Kerberos/GSSAPI authentication and encryption + +ldap/ + Tests for LDAP-based authentication + locale/ Sanity checks for locale data, encodings, etc @@ -42,6 +48,3 @@ ssl/ subscription/ Tests for logical replication - -thread/ - A thread-safety-testing utility used by configure diff --git a/src/test/isolation/Makefile b/src/test/isolation/Makefile index da5e088bdde73..d23e2cec64009 100644 --- a/src/test/isolation/Makefile +++ b/src/test/isolation/Makefile @@ -18,12 +18,16 @@ OBJS = \ all: isolationtester$(X) pg_isolation_regress$(X) -# Though we don't install these binaries, build them during installation -# (including temp-install). Otherwise, "make -j check-world" and "make -j -# installcheck-world" would spawn multiple, concurrent builds in this -# directory. Later builds would overwrite files while earlier builds are -# reading them, causing occasional failures. -install: | all +install: all installdirs + $(INSTALL_PROGRAM) pg_isolation_regress$(X) '$(DESTDIR)$(pgxsdir)/$(subdir)/pg_isolation_regress$(X)' + $(INSTALL_PROGRAM) isolationtester$(X) '$(DESTDIR)$(pgxsdir)/$(subdir)/isolationtester$(X)' + +installdirs: + $(MKDIR_P) '$(DESTDIR)$(pgxsdir)/$(subdir)' + +uninstall: + rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/pg_isolation_regress$(X)' + rm -f '$(DESTDIR)$(pgxsdir)/$(subdir)/isolationtester$(X)' submake-regress: $(MAKE) -C $(top_builddir)/src/test/regress pg_regress.o diff --git a/src/test/modules/test_extensions/Makefile b/src/test/modules/test_extensions/Makefile index d18108e4e5aba..77ee4d5d9ecd5 100644 --- a/src/test/modules/test_extensions/Makefile +++ b/src/test/modules/test_extensions/Makefile @@ -4,11 +4,13 @@ MODULE = test_extensions PGFILEDESC = "test_extensions - regression testing for EXTENSION support" EXTENSION = test_ext1 test_ext2 test_ext3 test_ext4 test_ext5 test_ext6 \ - test_ext7 test_ext8 test_ext_cyclic1 test_ext_cyclic2 + test_ext7 test_ext8 test_ext_cyclic1 test_ext_cyclic2 \ + test_ext_evttrig DATA = test_ext1--1.0.sql test_ext2--1.0.sql test_ext3--1.0.sql \ test_ext4--1.0.sql test_ext5--1.0.sql test_ext6--1.0.sql \ test_ext7--1.0.sql test_ext7--1.0--2.0.sql test_ext8--1.0.sql \ - test_ext_cyclic1--1.0.sql test_ext_cyclic2--1.0.sql + test_ext_cyclic1--1.0.sql test_ext_cyclic2--1.0.sql \ + test_ext_evttrig--1.0.sql test_ext_evttrig--1.0--2.0.sql REGRESS = test_extensions test_extdepend diff --git a/src/test/modules/test_extensions/expected/test_extensions.out b/src/test/modules/test_extensions/expected/test_extensions.out index b5cbdfcad4f33..30ae621d05ef6 100644 --- a/src/test/modules/test_extensions/expected/test_extensions.out +++ b/src/test/modules/test_extensions/expected/test_extensions.out @@ -154,3 +154,8 @@ DROP TABLE test_ext4_tab; DROP FUNCTION create_extension_with_temp_schema(); RESET client_min_messages; \unset SHOW_CONTEXT +-- Test case of an event trigger run in an extension upgrade script. +-- See: https://postgr.es/m/20200902193715.6e0269d4@firost +CREATE EXTENSION test_ext_evttrig; +ALTER EXTENSION test_ext_evttrig UPDATE TO '2.0'; +DROP EXTENSION test_ext_evttrig; diff --git a/src/test/modules/test_extensions/sql/test_extensions.sql b/src/test/modules/test_extensions/sql/test_extensions.sql index f505466ab4ebb..c16fd36da8967 100644 --- a/src/test/modules/test_extensions/sql/test_extensions.sql +++ b/src/test/modules/test_extensions/sql/test_extensions.sql @@ -93,3 +93,9 @@ DROP TABLE test_ext4_tab; DROP FUNCTION create_extension_with_temp_schema(); RESET client_min_messages; \unset SHOW_CONTEXT + +-- Test case of an event trigger run in an extension upgrade script. +-- See: https://postgr.es/m/20200902193715.6e0269d4@firost +CREATE EXTENSION test_ext_evttrig; +ALTER EXTENSION test_ext_evttrig UPDATE TO '2.0'; +DROP EXTENSION test_ext_evttrig; diff --git a/src/test/modules/test_extensions/test_ext_evttrig--1.0--2.0.sql b/src/test/modules/test_extensions/test_ext_evttrig--1.0--2.0.sql new file mode 100644 index 0000000000000..fdd2f3542e6c3 --- /dev/null +++ b/src/test/modules/test_extensions/test_ext_evttrig--1.0--2.0.sql @@ -0,0 +1,7 @@ +/* src/test/modules/test_extensions/test_event_trigger--1.0--2.0.sql */ +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION test_event_trigger UPDATE TO '2.0'" to load this file. \quit + +-- Test extension upgrade with event trigger. +ALTER EVENT TRIGGER table_rewrite_trg DISABLE; +ALTER TABLE t DROP COLUMN id; diff --git a/src/test/modules/test_extensions/test_ext_evttrig--1.0.sql b/src/test/modules/test_extensions/test_ext_evttrig--1.0.sql new file mode 100644 index 0000000000000..0071712cb88bf --- /dev/null +++ b/src/test/modules/test_extensions/test_ext_evttrig--1.0.sql @@ -0,0 +1,16 @@ +/* src/test/modules/test_extensions/test_event_trigger--1.0.sql */ +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION test_event_trigger" to load this file. \quit + +-- Base table with event trigger, used in a regression test involving +-- extension upgrades. +CREATE TABLE t (id text); +CREATE OR REPLACE FUNCTION _evt_table_rewrite_fnct() +RETURNS EVENT_TRIGGER LANGUAGE plpgsql AS +$$ + BEGIN + END; +$$; +CREATE EVENT TRIGGER table_rewrite_trg + ON table_rewrite + EXECUTE PROCEDURE _evt_table_rewrite_fnct(); diff --git a/src/test/modules/test_extensions/test_ext_evttrig.control b/src/test/modules/test_extensions/test_ext_evttrig.control new file mode 100644 index 0000000000000..915fae61666a6 --- /dev/null +++ b/src/test/modules/test_extensions/test_ext_evttrig.control @@ -0,0 +1,3 @@ +comment = 'Test extension - event trigger' +default_version = '1.0' +relocatable = true diff --git a/src/test/modules/test_misc/t/001_constraint_validation.pl b/src/test/modules/test_misc/t/001_constraint_validation.pl index 22497f22b01c9..c9453f9063e69 100644 --- a/src/test/modules/test_misc/t/001_constraint_validation.pl +++ b/src/test/modules/test_misc/t/001_constraint_validation.pl @@ -56,7 +56,7 @@ sub is_table_verified $output = run_sql_command('alter table atacc1 alter test_a set not null;'); ok(!is_table_verified($output), 'with constraint will not scan table'); ok( $output =~ - m/existing constraints on column "atacc1"."test_a" are sufficient to prove that it does not contain nulls/, + m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/, 'test_a proved by constraints'); run_sql_command('alter table atacc1 alter test_a drop not null;'); @@ -68,7 +68,7 @@ sub is_table_verified ok(is_table_verified($output), 'table was scanned'); # we may miss debug message for test_a constraint because we need verify table due test_b ok( !( $output =~ - m/existing constraints on column "atacc1"."test_b" are sufficient to prove that it does not contain nulls/ + m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/ ), 'test_b not proved by wrong constraints'); run_sql_command( @@ -84,10 +84,10 @@ sub is_table_verified ); ok(!is_table_verified($output), 'table was not scanned for both columns'); ok( $output =~ - m/existing constraints on column "atacc1"."test_a" are sufficient to prove that it does not contain nulls/, + m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/, 'test_a proved by constraints'); ok( $output =~ - m/existing constraints on column "atacc1"."test_b" are sufficient to prove that it does not contain nulls/, + m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/, 'test_b proved by constraints'); run_sql_command('drop table atacc1;'); diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl index 78aa07ce511a4..b3227b855c0a9 100644 --- a/src/test/modules/test_pg_dump/t/001_base.pl +++ b/src/test/modules/test_pg_dump/t/001_base.pl @@ -135,9 +135,17 @@ "$tempdir/defaults_tar_format.tar", ], }, + exclude_table => { + dump_cmd => [ + 'pg_dump', + '--exclude-table=regress_table_dumpable', + "--file=$tempdir/exclude_table.sql", + 'postgres', + ], + }, extension_schema => { dump_cmd => [ - 'pg_dump', '--schema=public', '--inserts', + 'pg_dump', '--schema=public', "--file=$tempdir/extension_schema.sql", 'postgres', ], }, @@ -225,6 +233,7 @@ clean_if_exists => 1, createdb => 1, defaults => 1, + exclude_table => 1, no_privs => 1, no_owner => 1,); @@ -317,11 +326,28 @@ regexp => qr/^ \QCREATE TABLE public.regress_pg_dump_table (\E \n\s+\Qcol1 integer NOT NULL,\E - \n\s+\Qcol2 integer\E + \n\s+\Qcol2 integer,\E + \n\s+\QCONSTRAINT regress_pg_dump_table_col2_check CHECK ((col2 > 0))\E \n\);\n/xm, like => { binary_upgrade => 1, }, }, + 'COPY public.regress_table_dumpable (col1)' => { + regexp => qr/^ + \QCOPY public.regress_table_dumpable (col1) FROM stdin;\E + \n/xm, + like => { + %full_runs, + data_only => 1, + section_data => 1, + extension_schema => 1, + }, + unlike => { + binary_upgrade => 1, + exclude_table => 1, + }, + }, + 'CREATE ACCESS METHOD regress_test_am' => { regexp => qr/^ \QCREATE ACCESS METHOD regress_test_am TYPE INDEX HANDLER bthandler;\E @@ -443,7 +469,8 @@ regexp => qr/^ \QCREATE TABLE regress_pg_dump_schema.test_table (\E \n\s+\Qcol1 integer,\E - \n\s+\Qcol2 integer\E + \n\s+\Qcol2 integer,\E + \n\s+\QCONSTRAINT test_table_col2_check CHECK ((col2 > 0))\E \n\);\n/xm, like => { binary_upgrade => 1, }, }, @@ -578,17 +605,6 @@ schema_only => 1, section_pre_data => 1, }, - }, - - # Dumpable object inside specific schema - 'INSERT INTO public.regress_table_dumpable VALUES (1);' => { - create_sql => 'INSERT INTO public.regress_table_dumpable VALUES (1);', - regexp => qr/^ - \QINSERT INTO public.regress_table_dumpable VALUES (1);\E - \n/xm, - like => { - extension_schema => 1, - }, },); ######################################### diff --git a/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql b/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql index 90e461ed3573b..c7a35c3afa0f9 100644 --- a/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql +++ b/src/test/modules/test_pg_dump/test_pg_dump--1.0.sql @@ -5,7 +5,7 @@ CREATE TABLE regress_pg_dump_table ( col1 serial, - col2 int + col2 int check (col2 > 0) ); CREATE SEQUENCE regress_pg_dump_seq; @@ -14,7 +14,7 @@ CREATE SEQUENCE regress_seq_dumpable; SELECT pg_catalog.pg_extension_config_dump('regress_seq_dumpable', ''); CREATE TABLE regress_table_dumpable ( - col1 int + col1 int check (col1 > 0) ); SELECT pg_catalog.pg_extension_config_dump('regress_table_dumpable', ''); @@ -34,7 +34,7 @@ CREATE ACCESS METHOD regress_test_am TYPE INDEX HANDLER bthandler; -- this extension. CREATE TABLE regress_pg_dump_schema.test_table ( col1 int, - col2 int + col2 int check (col2 > 0) ); GRANT SELECT ON regress_pg_dump_schema.test_table TO regress_dump_test_role; diff --git a/src/test/modules/unsafe_tests/expected/rolenames.out b/src/test/modules/unsafe_tests/expected/rolenames.out index ff6aa69fc0978..eb608fdc2eaf7 100644 --- a/src/test/modules/unsafe_tests/expected/rolenames.out +++ b/src/test/modules/unsafe_tests/expected/rolenames.out @@ -1,19 +1,21 @@ -CREATE OR REPLACE FUNCTION chkrolattr() +CREATE FUNCTION chkrolattr() RETURNS TABLE ("role" name, rolekeyword text, canlogin bool, replication bool) AS $$ SELECT r.rolname, v.keyword, r.rolcanlogin, r.rolreplication FROM pg_roles r - JOIN (VALUES(CURRENT_USER, 'current_user'), + JOIN (VALUES(CURRENT_ROLE, 'current_role'), + (CURRENT_USER, 'current_user'), (SESSION_USER, 'session_user'), + ('current_role', '-'), ('current_user', '-'), ('session_user', '-'), ('Public', '-'), ('None', '-')) AS v(uname, keyword) ON (r.rolname = v.uname) - ORDER BY 1; + ORDER BY 1, 2; $$ LANGUAGE SQL; -CREATE OR REPLACE FUNCTION chksetconfig() +CREATE FUNCTION chksetconfig() RETURNS TABLE (db name, "role" name, rolkeyword text, setconfig text[]) AS $$ SELECT COALESCE(d.datname, 'ALL'), COALESCE(r.rolname, 'ALL'), @@ -21,21 +23,22 @@ SELECT COALESCE(d.datname, 'ALL'), COALESCE(r.rolname, 'ALL'), FROM pg_db_role_setting s LEFT JOIN pg_roles r ON (r.oid = s.setrole) LEFT JOIN pg_database d ON (d.oid = s.setdatabase) - LEFT JOIN (VALUES(CURRENT_USER, 'current_user'), - (SESSION_USER, 'session_user')) + LEFT JOIN (VALUES(CURRENT_ROLE, 'current_role'), + (CURRENT_USER, 'current_user'), + (SESSION_USER, 'session_user')) AS v(uname, keyword) ON (r.rolname = v.uname) WHERE (r.rolname) IN ('Public', 'current_user', 'regress_testrol1', 'regress_testrol2') -ORDER BY 1, 2; +ORDER BY 1, 2, 3; $$ LANGUAGE SQL; -CREATE OR REPLACE FUNCTION chkumapping() +CREATE FUNCTION chkumapping() RETURNS TABLE (umname name, umserver name, umoptions text[]) AS $$ SELECT r.rolname, s.srvname, m.umoptions FROM pg_user_mapping m LEFT JOIN pg_roles r ON (r.oid = m.umuser) JOIN pg_foreign_server s ON (s.oid = m.umserver) - ORDER BY 2; + ORDER BY 2, 1; $$ LANGUAGE SQL; -- -- We test creation and use of these role names to ensure that the server @@ -46,6 +49,7 @@ $$ LANGUAGE SQL; SET client_min_messages = ERROR; CREATE ROLE "Public"; CREATE ROLE "None"; +CREATE ROLE "current_role"; CREATE ROLE "current_user"; CREATE ROLE "session_user"; CREATE ROLE "user"; @@ -55,7 +59,7 @@ ERROR: CURRENT_USER cannot be used as a role name here LINE 1: CREATE ROLE current_user; ^ CREATE ROLE current_role; -- error -ERROR: syntax error at or near "current_role" +ERROR: CURRENT_ROLE cannot be used as a role name here LINE 1: CREATE ROLE current_role; ^ CREATE ROLE session_user; -- error @@ -112,23 +116,56 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | f current_user | - | f | f regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | f regress_testrol2 | current_user | f | f session_user | - | f | f -(6 rows) +(8 rows) + +ALTER ROLE CURRENT_ROLE WITH REPLICATION; +SELECT * FROM chkrolattr(); + role | rolekeyword | canlogin | replication +------------------+--------------+----------+------------- + None | - | f | f + Public | - | f | f + current_role | - | f | f + current_user | - | f | f + regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | t + regress_testrol2 | current_user | f | t + session_user | - | f | f +(8 rows) +ALTER ROLE "current_role" WITH REPLICATION; +SELECT * FROM chkrolattr(); + role | rolekeyword | canlogin | replication +------------------+--------------+----------+------------- + None | - | f | f + Public | - | f | f + current_role | - | f | t + current_user | - | f | f + regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | t + regress_testrol2 | current_user | f | t + session_user | - | f | f +(8 rows) + +ALTER ROLE CURRENT_ROLE WITH NOREPLICATION; ALTER ROLE CURRENT_USER WITH REPLICATION; SELECT * FROM chkrolattr(); role | rolekeyword | canlogin | replication ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | t current_user | - | f | f regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | f -(6 rows) +(8 rows) ALTER ROLE "current_user" WITH REPLICATION; SELECT * FROM chkrolattr(); @@ -136,11 +173,13 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | f -(6 rows) +(8 rows) ALTER ROLE SESSION_USER WITH REPLICATION; SELECT * FROM chkrolattr(); @@ -148,11 +187,13 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | t + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | f -(6 rows) +(8 rows) ALTER ROLE "session_user" WITH REPLICATION; SELECT * FROM chkrolattr(); @@ -160,11 +201,13 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | t + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | t -(6 rows) +(8 rows) ALTER USER "Public" WITH REPLICATION; ALTER USER "None" WITH REPLICATION; @@ -173,11 +216,13 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | t Public | - | f | t + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | t + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | t -(6 rows) +(8 rows) ALTER USER regress_testrol1 WITH NOREPLICATION; ALTER USER regress_testrol2 WITH NOREPLICATION; @@ -186,21 +231,19 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | t Public | - | f | t + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | f regress_testrol2 | current_user | f | f session_user | - | f | t -(6 rows) +(8 rows) ROLLBACK; ALTER ROLE USER WITH LOGIN; -- error ERROR: syntax error at or near "USER" LINE 1: ALTER ROLE USER WITH LOGIN; ^ -ALTER ROLE CURRENT_ROLE WITH LOGIN; --error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ALTER ROLE CURRENT_ROLE WITH LOGIN; - ^ ALTER ROLE ALL WITH REPLICATION; -- error ERROR: syntax error at or near "WITH" LINE 1: ALTER ROLE ALL WITH REPLICATION; @@ -228,23 +271,56 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | f current_user | - | f | f regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | f regress_testrol2 | current_user | f | f session_user | - | f | f -(6 rows) +(8 rows) + +ALTER USER CURRENT_ROLE WITH REPLICATION; +SELECT * FROM chkrolattr(); + role | rolekeyword | canlogin | replication +------------------+--------------+----------+------------- + None | - | f | f + Public | - | f | f + current_role | - | f | f + current_user | - | f | f + regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | t + regress_testrol2 | current_user | f | t + session_user | - | f | f +(8 rows) + +ALTER USER "current_role" WITH REPLICATION; +SELECT * FROM chkrolattr(); + role | rolekeyword | canlogin | replication +------------------+--------------+----------+------------- + None | - | f | f + Public | - | f | f + current_role | - | f | t + current_user | - | f | f + regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | t + regress_testrol2 | current_user | f | t + session_user | - | f | f +(8 rows) +ALTER USER CURRENT_ROLE WITH NOREPLICATION; ALTER USER CURRENT_USER WITH REPLICATION; SELECT * FROM chkrolattr(); role | rolekeyword | canlogin | replication ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | t current_user | - | f | f regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | f -(6 rows) +(8 rows) ALTER USER "current_user" WITH REPLICATION; SELECT * FROM chkrolattr(); @@ -252,11 +328,13 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | f -(6 rows) +(8 rows) ALTER USER SESSION_USER WITH REPLICATION; SELECT * FROM chkrolattr(); @@ -264,11 +342,13 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | t + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | f -(6 rows) +(8 rows) ALTER USER "session_user" WITH REPLICATION; SELECT * FROM chkrolattr(); @@ -276,11 +356,13 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | f Public | - | f | f + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | t + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | t -(6 rows) +(8 rows) ALTER USER "Public" WITH REPLICATION; ALTER USER "None" WITH REPLICATION; @@ -289,11 +371,13 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | t Public | - | f | t + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | t + regress_testrol2 | current_role | f | t regress_testrol2 | current_user | f | t session_user | - | f | t -(6 rows) +(8 rows) ALTER USER regress_testrol1 WITH NOREPLICATION; ALTER USER regress_testrol2 WITH NOREPLICATION; @@ -302,21 +386,19 @@ SELECT * FROM chkrolattr(); ------------------+--------------+----------+------------- None | - | f | t Public | - | f | t + current_role | - | f | t current_user | - | f | t regress_testrol1 | session_user | t | f + regress_testrol2 | current_role | f | f regress_testrol2 | current_user | f | f session_user | - | f | t -(6 rows) +(8 rows) ROLLBACK; ALTER USER USER WITH LOGIN; -- error ERROR: syntax error at or near "USER" LINE 1: ALTER USER USER WITH LOGIN; ^ -ALTER USER CURRENT_ROLE WITH LOGIN; -- error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ALTER USER CURRENT_ROLE WITH LOGIN; - ^ ALTER USER ALL WITH REPLICATION; -- error ERROR: syntax error at or near "WITH" LINE 1: ALTER USER ALL WITH REPLICATION; @@ -343,6 +425,7 @@ SELECT * FROM chksetconfig(); ----+------+------------+----------- (0 rows) +ALTER ROLE CURRENT_ROLE SET application_name to 'BAZ'; ALTER ROLE CURRENT_USER SET application_name to 'FOO'; ALTER ROLE SESSION_USER SET application_name to 'BAR'; ALTER ROLE "current_user" SET application_name to 'FOOFOO'; @@ -354,8 +437,9 @@ SELECT * FROM chksetconfig(); ALL | Public | - | {application_name=BARBAR} ALL | current_user | - | {application_name=FOOFOO} ALL | regress_testrol1 | session_user | {application_name=BAR} + ALL | regress_testrol2 | current_role | {application_name=FOO} ALL | regress_testrol2 | current_user | {application_name=FOO} -(4 rows) +(5 rows) ALTER ROLE regress_testrol1 SET application_name to 'SLAM'; SELECT * FROM chksetconfig(); @@ -364,9 +448,11 @@ SELECT * FROM chksetconfig(); ALL | Public | - | {application_name=BARBAR} ALL | current_user | - | {application_name=FOOFOO} ALL | regress_testrol1 | session_user | {application_name=SLAM} + ALL | regress_testrol2 | current_role | {application_name=FOO} ALL | regress_testrol2 | current_user | {application_name=FOO} -(4 rows) +(5 rows) +ALTER ROLE CURRENT_ROLE RESET application_name; ALTER ROLE CURRENT_USER RESET application_name; ALTER ROLE SESSION_USER RESET application_name; ALTER ROLE "current_user" RESET application_name; @@ -377,10 +463,6 @@ SELECT * FROM chksetconfig(); ----+------+------------+----------- (0 rows) -ALTER ROLE CURRENT_ROLE SET application_name to 'BAZ'; -- error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ALTER ROLE CURRENT_ROLE SET application_name to 'BAZ'; - ^ ALTER ROLE USER SET application_name to 'BOOM'; -- error ERROR: syntax error at or near "USER" LINE 1: ALTER ROLE USER SET application_name to 'BOOM'; @@ -395,6 +477,7 @@ SELECT * FROM chksetconfig(); ----+------+------------+----------- (0 rows) +ALTER USER CURRENT_ROLE SET application_name to 'BAZ'; ALTER USER CURRENT_USER SET application_name to 'FOO'; ALTER USER SESSION_USER SET application_name to 'BAR'; ALTER USER "current_user" SET application_name to 'FOOFOO'; @@ -406,8 +489,9 @@ SELECT * FROM chksetconfig(); ALL | Public | - | {application_name=BARBAR} ALL | current_user | - | {application_name=FOOFOO} ALL | regress_testrol1 | session_user | {application_name=BAR} + ALL | regress_testrol2 | current_role | {application_name=FOO} ALL | regress_testrol2 | current_user | {application_name=FOO} -(4 rows) +(5 rows) ALTER USER regress_testrol1 SET application_name to 'SLAM'; SELECT * FROM chksetconfig(); @@ -416,9 +500,11 @@ SELECT * FROM chksetconfig(); ALL | Public | - | {application_name=BARBAR} ALL | current_user | - | {application_name=FOOFOO} ALL | regress_testrol1 | session_user | {application_name=SLAM} + ALL | regress_testrol2 | current_role | {application_name=FOO} ALL | regress_testrol2 | current_user | {application_name=FOO} -(4 rows) +(5 rows) +ALTER USER CURRENT_ROLE RESET application_name; ALTER USER CURRENT_USER RESET application_name; ALTER USER SESSION_USER RESET application_name; ALTER USER "current_user" RESET application_name; @@ -429,10 +515,6 @@ SELECT * FROM chksetconfig(); ----+------+------------+----------- (0 rows) -ALTER USER CURRENT_ROLE SET application_name to 'BAZ'; -- error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ALTER USER CURRENT_ROLE SET application_name to 'BAZ'; - ^ ALTER USER USER SET application_name to 'BOOM'; -- error ERROR: syntax error at or near "USER" LINE 1: ALTER USER USER SET application_name to 'BOOM'; @@ -448,26 +530,23 @@ ERROR: role "nonexistent" does not exist -- CREATE SCHEMA CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER; CREATE SCHEMA newschema2 AUTHORIZATION "current_user"; -CREATE SCHEMA newschema3 AUTHORIZATION SESSION_USER; -CREATE SCHEMA newschema4 AUTHORIZATION regress_testrolx; -CREATE SCHEMA newschema5 AUTHORIZATION "Public"; -CREATE SCHEMA newschema6 AUTHORIZATION USER; -- error +CREATE SCHEMA newschema3 AUTHORIZATION CURRENT_ROLE; +CREATE SCHEMA newschema4 AUTHORIZATION SESSION_USER; +CREATE SCHEMA newschema5 AUTHORIZATION regress_testrolx; +CREATE SCHEMA newschema6 AUTHORIZATION "Public"; +CREATE SCHEMA newschemax AUTHORIZATION USER; -- error ERROR: syntax error at or near "USER" -LINE 1: CREATE SCHEMA newschema6 AUTHORIZATION USER; +LINE 1: CREATE SCHEMA newschemax AUTHORIZATION USER; ^ -CREATE SCHEMA newschema6 AUTHORIZATION CURRENT_ROLE; -- error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: CREATE SCHEMA newschema6 AUTHORIZATION CURRENT_ROLE; - ^ -CREATE SCHEMA newschema6 AUTHORIZATION PUBLIC; -- error +CREATE SCHEMA newschemax AUTHORIZATION PUBLIC; -- error ERROR: role "public" does not exist -CREATE SCHEMA newschema6 AUTHORIZATION "public"; -- error +CREATE SCHEMA newschemax AUTHORIZATION "public"; -- error ERROR: role "public" does not exist -CREATE SCHEMA newschema6 AUTHORIZATION NONE; -- error +CREATE SCHEMA newschemax AUTHORIZATION NONE; -- error ERROR: role name "none" is reserved -LINE 1: CREATE SCHEMA newschema6 AUTHORIZATION NONE; +LINE 1: CREATE SCHEMA newschemax AUTHORIZATION NONE; ^ -CREATE SCHEMA newschema6 AUTHORIZATION nonexistent; -- error +CREATE SCHEMA newschemax AUTHORIZATION nonexistent; -- error ERROR: role "nonexistent" does not exist SELECT n.nspname, r.rolname FROM pg_namespace n JOIN pg_roles r ON (r.oid = n.nspowner) @@ -476,38 +555,37 @@ SELECT n.nspname, r.rolname FROM pg_namespace n ------------+------------------ newschema1 | regress_testrol2 newschema2 | current_user - newschema3 | regress_testrol1 - newschema4 | regress_testrolx - newschema5 | Public -(5 rows) + newschema3 | regress_testrol2 + newschema4 | regress_testrol1 + newschema5 | regress_testrolx + newschema6 | Public +(6 rows) CREATE SCHEMA IF NOT EXISTS newschema1 AUTHORIZATION CURRENT_USER; NOTICE: schema "newschema1" already exists, skipping CREATE SCHEMA IF NOT EXISTS newschema2 AUTHORIZATION "current_user"; NOTICE: schema "newschema2" already exists, skipping -CREATE SCHEMA IF NOT EXISTS newschema3 AUTHORIZATION SESSION_USER; +CREATE SCHEMA IF NOT EXISTS newschema3 AUTHORIZATION CURRENT_ROLE; NOTICE: schema "newschema3" already exists, skipping -CREATE SCHEMA IF NOT EXISTS newschema4 AUTHORIZATION regress_testrolx; +CREATE SCHEMA IF NOT EXISTS newschema4 AUTHORIZATION SESSION_USER; NOTICE: schema "newschema4" already exists, skipping -CREATE SCHEMA IF NOT EXISTS newschema5 AUTHORIZATION "Public"; +CREATE SCHEMA IF NOT EXISTS newschema5 AUTHORIZATION regress_testrolx; NOTICE: schema "newschema5" already exists, skipping -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION USER; -- error +CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION "Public"; +NOTICE: schema "newschema6" already exists, skipping +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION USER; -- error ERROR: syntax error at or near "USER" -LINE 1: CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION USER; - ^ -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION CURRENT_ROLE; -- error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ...ATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION CURRENT_RO... +LINE 1: CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION USER; ^ -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION PUBLIC; -- error +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION PUBLIC; -- error ERROR: role "public" does not exist -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION "public"; -- error +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION "public"; -- error ERROR: role "public" does not exist -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION NONE; -- error +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION NONE; -- error ERROR: role name "none" is reserved -LINE 1: CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION NONE; +LINE 1: CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION NONE; ^ -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION nonexistent; -- error +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION nonexistent; -- error ERROR: role "nonexistent" does not exist SELECT n.nspname, r.rolname FROM pg_namespace n JOIN pg_roles r ON (r.oid = n.nspowner) @@ -516,10 +594,11 @@ SELECT n.nspname, r.rolname FROM pg_namespace n ------------+------------------ newschema1 | regress_testrol2 newschema2 | current_user - newschema3 | regress_testrol1 - newschema4 | regress_testrolx - newschema5 | Public -(5 rows) + newschema3 | regress_testrol2 + newschema4 | regress_testrol1 + newschema5 | regress_testrolx + newschema6 | Public +(6 rows) -- ALTER TABLE OWNER TO \c - @@ -530,27 +609,25 @@ CREATE TABLE testtab3 (a int); CREATE TABLE testtab4 (a int); CREATE TABLE testtab5 (a int); CREATE TABLE testtab6 (a int); +CREATE TABLE testtab7 (a int); \c - SET SESSION AUTHORIZATION regress_testrol1; SET ROLE regress_testrol2; ALTER TABLE testtab1 OWNER TO CURRENT_USER; ALTER TABLE testtab2 OWNER TO "current_user"; -ALTER TABLE testtab3 OWNER TO SESSION_USER; -ALTER TABLE testtab4 OWNER TO regress_testrolx; -ALTER TABLE testtab5 OWNER TO "Public"; -ALTER TABLE testtab6 OWNER TO CURRENT_ROLE; -- error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ALTER TABLE testtab6 OWNER TO CURRENT_ROLE; - ^ -ALTER TABLE testtab6 OWNER TO USER; --error +ALTER TABLE testtab3 OWNER TO CURRENT_ROLE; +ALTER TABLE testtab4 OWNER TO SESSION_USER; +ALTER TABLE testtab5 OWNER TO regress_testrolx; +ALTER TABLE testtab6 OWNER TO "Public"; +ALTER TABLE testtab7 OWNER TO USER; --error ERROR: syntax error at or near "USER" -LINE 1: ALTER TABLE testtab6 OWNER TO USER; +LINE 1: ALTER TABLE testtab7 OWNER TO USER; ^ -ALTER TABLE testtab6 OWNER TO PUBLIC; -- error +ALTER TABLE testtab7 OWNER TO PUBLIC; -- error ERROR: role "public" does not exist -ALTER TABLE testtab6 OWNER TO "public"; -- error +ALTER TABLE testtab7 OWNER TO "public"; -- error ERROR: role "public" does not exist -ALTER TABLE testtab6 OWNER TO nonexistent; -- error +ALTER TABLE testtab7 OWNER TO nonexistent; -- error ERROR: role "nonexistent" does not exist SELECT c.relname, r.rolname FROM pg_class c JOIN pg_roles r ON (r.oid = c.relowner) @@ -560,11 +637,12 @@ SELECT c.relname, r.rolname ----------+------------------ testtab1 | regress_testrol2 testtab2 | current_user - testtab3 | regress_testrol1 - testtab4 | regress_testrolx - testtab5 | Public - testtab6 | regress_testrol0 -(6 rows) + testtab3 | regress_testrol2 + testtab4 | regress_testrol1 + testtab5 | regress_testrolx + testtab6 | Public + testtab7 | regress_testrol0 +(7 rows) -- ALTER TABLE, VIEW, MATERIALIZED VIEW, FOREIGN TABLE, SEQUENCE are -- changed their owner in the same way. @@ -580,27 +658,25 @@ CREATE AGGREGATE testagg6(int2) (SFUNC = int2_sum, STYPE = int8); CREATE AGGREGATE testagg7(int2) (SFUNC = int2_sum, STYPE = int8); CREATE AGGREGATE testagg8(int2) (SFUNC = int2_sum, STYPE = int8); CREATE AGGREGATE testagg9(int2) (SFUNC = int2_sum, STYPE = int8); +CREATE AGGREGATE testagga(int2) (SFUNC = int2_sum, STYPE = int8); \c - SET SESSION AUTHORIZATION regress_testrol1; SET ROLE regress_testrol2; ALTER AGGREGATE testagg1(int2) OWNER TO CURRENT_USER; ALTER AGGREGATE testagg2(int2) OWNER TO "current_user"; -ALTER AGGREGATE testagg3(int2) OWNER TO SESSION_USER; -ALTER AGGREGATE testagg4(int2) OWNER TO regress_testrolx; -ALTER AGGREGATE testagg5(int2) OWNER TO "Public"; -ALTER AGGREGATE testagg5(int2) OWNER TO CURRENT_ROLE; -- error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ALTER AGGREGATE testagg5(int2) OWNER TO CURRENT_ROLE; - ^ -ALTER AGGREGATE testagg5(int2) OWNER TO USER; -- error +ALTER AGGREGATE testagg3(int2) OWNER TO CURRENT_ROLE; +ALTER AGGREGATE testagg4(int2) OWNER TO SESSION_USER; +ALTER AGGREGATE testagg5(int2) OWNER TO regress_testrolx; +ALTER AGGREGATE testagg6(int2) OWNER TO "Public"; +ALTER AGGREGATE testagg6(int2) OWNER TO USER; -- error ERROR: syntax error at or near "USER" -LINE 1: ALTER AGGREGATE testagg5(int2) OWNER TO USER; +LINE 1: ALTER AGGREGATE testagg6(int2) OWNER TO USER; ^ -ALTER AGGREGATE testagg5(int2) OWNER TO PUBLIC; -- error +ALTER AGGREGATE testagg6(int2) OWNER TO PUBLIC; -- error ERROR: role "public" does not exist -ALTER AGGREGATE testagg5(int2) OWNER TO "public"; -- error +ALTER AGGREGATE testagg6(int2) OWNER TO "public"; -- error ERROR: role "public" does not exist -ALTER AGGREGATE testagg5(int2) OWNER TO nonexistent; -- error +ALTER AGGREGATE testagg6(int2) OWNER TO nonexistent; -- error ERROR: role "nonexistent" does not exist SELECT p.proname, r.rolname FROM pg_proc p JOIN pg_roles r ON (r.oid = p.proowner) @@ -610,14 +686,15 @@ SELECT p.proname, r.rolname ----------+------------------ testagg1 | regress_testrol2 testagg2 | current_user - testagg3 | regress_testrol1 - testagg4 | regress_testrolx - testagg5 | Public - testagg6 | regress_testrol0 + testagg3 | regress_testrol2 + testagg4 | regress_testrol1 + testagg5 | regress_testrolx + testagg6 | Public testagg7 | regress_testrol0 testagg8 | regress_testrol0 testagg9 | regress_testrol0 -(9 rows) + testagga | regress_testrol0 +(10 rows) -- CREATE USER MAPPING CREATE FOREIGN DATA WRAPPER test_wrapper; @@ -630,58 +707,52 @@ CREATE SERVER sv6 FOREIGN DATA WRAPPER test_wrapper; CREATE SERVER sv7 FOREIGN DATA WRAPPER test_wrapper; CREATE SERVER sv8 FOREIGN DATA WRAPPER test_wrapper; CREATE SERVER sv9 FOREIGN DATA WRAPPER test_wrapper; +CREATE SERVER sv10 FOREIGN DATA WRAPPER test_wrapper; CREATE USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (user 'CURRENT_USER'); CREATE USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (user '"current_user"'); -CREATE USER MAPPING FOR USER SERVER sv3 OPTIONS (user 'USER'); -CREATE USER MAPPING FOR "user" SERVER sv4 OPTIONS (user '"USER"'); -CREATE USER MAPPING FOR SESSION_USER SERVER sv5 OPTIONS (user 'SESSION_USER'); -CREATE USER MAPPING FOR PUBLIC SERVER sv6 OPTIONS (user 'PUBLIC'); -CREATE USER MAPPING FOR "Public" SERVER sv7 OPTIONS (user '"Public"'); -CREATE USER MAPPING FOR regress_testrolx SERVER sv8 OPTIONS (user 'regress_testrolx'); -CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv9 - OPTIONS (user 'CURRENT_ROLE'); -- error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv9 - ^ -CREATE USER MAPPING FOR nonexistent SERVER sv9 - OPTIONS (user 'nonexistent'); -- error; +CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv3 OPTIONS (user 'CURRENT_ROLE'); +CREATE USER MAPPING FOR USER SERVER sv4 OPTIONS (user 'USER'); +CREATE USER MAPPING FOR "user" SERVER sv5 OPTIONS (user '"USER"'); +CREATE USER MAPPING FOR SESSION_USER SERVER sv6 OPTIONS (user 'SESSION_USER'); +CREATE USER MAPPING FOR PUBLIC SERVER sv7 OPTIONS (user 'PUBLIC'); +CREATE USER MAPPING FOR "Public" SERVER sv8 OPTIONS (user '"Public"'); +CREATE USER MAPPING FOR regress_testrolx SERVER sv9 OPTIONS (user 'regress_testrolx'); +CREATE USER MAPPING FOR nonexistent SERVER sv10 OPTIONS (user 'nonexistent'); -- error; ERROR: role "nonexistent" does not exist SELECT * FROM chkumapping(); umname | umserver | umoptions ------------------+----------+--------------------------- regress_testrol2 | sv1 | {user=CURRENT_USER} current_user | sv2 | {"user=\"current_user\""} - regress_testrol2 | sv3 | {user=USER} - user | sv4 | {"user=\"USER\""} - regress_testrol1 | sv5 | {user=SESSION_USER} - | sv6 | {user=PUBLIC} - Public | sv7 | {"user=\"Public\""} - regress_testrolx | sv8 | {user=regress_testrolx} -(8 rows) + regress_testrol2 | sv3 | {user=CURRENT_ROLE} + regress_testrol2 | sv4 | {user=USER} + user | sv5 | {"user=\"USER\""} + regress_testrol1 | sv6 | {user=SESSION_USER} + | sv7 | {user=PUBLIC} + Public | sv8 | {"user=\"Public\""} + regress_testrolx | sv9 | {user=regress_testrolx} +(9 rows) -- ALTER USER MAPPING ALTER USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (SET user 'CURRENT_USER_alt'); ALTER USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (SET user '"current_user"_alt'); -ALTER USER MAPPING FOR USER SERVER sv3 +ALTER USER MAPPING FOR CURRENT_ROLE SERVER sv3 + OPTIONS (SET user 'CURRENT_ROLE_alt'); +ALTER USER MAPPING FOR USER SERVER sv4 OPTIONS (SET user 'USER_alt'); -ALTER USER MAPPING FOR "user" SERVER sv4 +ALTER USER MAPPING FOR "user" SERVER sv5 OPTIONS (SET user '"user"_alt'); -ALTER USER MAPPING FOR SESSION_USER SERVER sv5 +ALTER USER MAPPING FOR SESSION_USER SERVER sv6 OPTIONS (SET user 'SESSION_USER_alt'); -ALTER USER MAPPING FOR PUBLIC SERVER sv6 +ALTER USER MAPPING FOR PUBLIC SERVER sv7 OPTIONS (SET user 'public_alt'); -ALTER USER MAPPING FOR "Public" SERVER sv7 +ALTER USER MAPPING FOR "Public" SERVER sv8 OPTIONS (SET user '"Public"_alt'); -ALTER USER MAPPING FOR regress_testrolx SERVER sv8 +ALTER USER MAPPING FOR regress_testrolx SERVER sv9 OPTIONS (SET user 'regress_testrolx_alt'); -ALTER USER MAPPING FOR CURRENT_ROLE SERVER sv9 - OPTIONS (SET user 'CURRENT_ROLE_alt'); -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ALTER USER MAPPING FOR CURRENT_ROLE SERVER sv9 - ^ -ALTER USER MAPPING FOR nonexistent SERVER sv9 +ALTER USER MAPPING FOR nonexistent SERVER sv10 OPTIONS (SET user 'nonexistent_alt'); -- error ERROR: role "nonexistent" does not exist SELECT * FROM chkumapping(); @@ -689,28 +760,26 @@ SELECT * FROM chkumapping(); ------------------+----------+------------------------------- regress_testrol2 | sv1 | {user=CURRENT_USER_alt} current_user | sv2 | {"user=\"current_user\"_alt"} - regress_testrol2 | sv3 | {user=USER_alt} - user | sv4 | {"user=\"user\"_alt"} - regress_testrol1 | sv5 | {user=SESSION_USER_alt} - | sv6 | {user=public_alt} - Public | sv7 | {"user=\"Public\"_alt"} - regress_testrolx | sv8 | {user=regress_testrolx_alt} -(8 rows) + regress_testrol2 | sv3 | {user=CURRENT_ROLE_alt} + regress_testrol2 | sv4 | {user=USER_alt} + user | sv5 | {"user=\"user\"_alt"} + regress_testrol1 | sv6 | {user=SESSION_USER_alt} + | sv7 | {user=public_alt} + Public | sv8 | {"user=\"Public\"_alt"} + regress_testrolx | sv9 | {user=regress_testrolx_alt} +(9 rows) -- DROP USER MAPPING DROP USER MAPPING FOR CURRENT_USER SERVER sv1; DROP USER MAPPING FOR "current_user" SERVER sv2; -DROP USER MAPPING FOR USER SERVER sv3; -DROP USER MAPPING FOR "user" SERVER sv4; -DROP USER MAPPING FOR SESSION_USER SERVER sv5; -DROP USER MAPPING FOR PUBLIC SERVER sv6; -DROP USER MAPPING FOR "Public" SERVER sv7; -DROP USER MAPPING FOR regress_testrolx SERVER sv8; -DROP USER MAPPING FOR CURRENT_ROLE SERVER sv9; -- error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: DROP USER MAPPING FOR CURRENT_ROLE SERVER sv9; - ^ -DROP USER MAPPING FOR nonexistent SERVER sv; -- error +DROP USER MAPPING FOR CURRENT_ROLE SERVER sv3; +DROP USER MAPPING FOR USER SERVER sv4; +DROP USER MAPPING FOR "user" SERVER sv5; +DROP USER MAPPING FOR SESSION_USER SERVER sv6; +DROP USER MAPPING FOR PUBLIC SERVER sv7; +DROP USER MAPPING FOR "Public" SERVER sv8; +DROP USER MAPPING FOR regress_testrolx SERVER sv9; +DROP USER MAPPING FOR nonexistent SERVER sv10; -- error ERROR: role "nonexistent" does not exist SELECT * FROM chkumapping(); umname | umserver | umoptions @@ -719,24 +788,26 @@ SELECT * FROM chkumapping(); CREATE USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (user 'CURRENT_USER'); CREATE USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (user '"current_user"'); -CREATE USER MAPPING FOR USER SERVER sv3 OPTIONS (user 'USER'); -CREATE USER MAPPING FOR "user" SERVER sv4 OPTIONS (user '"USER"'); -CREATE USER MAPPING FOR SESSION_USER SERVER sv5 OPTIONS (user 'SESSION_USER'); -CREATE USER MAPPING FOR PUBLIC SERVER sv6 OPTIONS (user 'PUBLIC'); -CREATE USER MAPPING FOR "Public" SERVER sv7 OPTIONS (user '"Public"'); -CREATE USER MAPPING FOR regress_testrolx SERVER sv8 OPTIONS (user 'regress_testrolx'); +CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv3 OPTIONS (user 'CURRENT_ROLE'); +CREATE USER MAPPING FOR USER SERVER sv4 OPTIONS (user 'USER'); +CREATE USER MAPPING FOR "user" SERVER sv5 OPTIONS (user '"USER"'); +CREATE USER MAPPING FOR SESSION_USER SERVER sv6 OPTIONS (user 'SESSION_USER'); +CREATE USER MAPPING FOR PUBLIC SERVER sv7 OPTIONS (user 'PUBLIC'); +CREATE USER MAPPING FOR "Public" SERVER sv8 OPTIONS (user '"Public"'); +CREATE USER MAPPING FOR regress_testrolx SERVER sv9 OPTIONS (user 'regress_testrolx'); SELECT * FROM chkumapping(); umname | umserver | umoptions ------------------+----------+--------------------------- regress_testrol2 | sv1 | {user=CURRENT_USER} current_user | sv2 | {"user=\"current_user\""} - regress_testrol2 | sv3 | {user=USER} - user | sv4 | {"user=\"USER\""} - regress_testrol1 | sv5 | {user=SESSION_USER} - | sv6 | {user=PUBLIC} - Public | sv7 | {"user=\"Public\""} - regress_testrolx | sv8 | {user=regress_testrolx} -(8 rows) + regress_testrol2 | sv3 | {user=CURRENT_ROLE} + regress_testrol2 | sv4 | {user=USER} + user | sv5 | {"user=\"USER\""} + regress_testrol1 | sv6 | {user=SESSION_USER} + | sv7 | {user=PUBLIC} + Public | sv8 | {"user=\"Public\""} + regress_testrolx | sv9 | {user=regress_testrolx} +(9 rows) -- DROP USER MAPPING IF EXISTS DROP USER MAPPING IF EXISTS FOR CURRENT_USER SERVER sv1; @@ -744,82 +815,92 @@ SELECT * FROM chkumapping(); umname | umserver | umoptions ------------------+----------+--------------------------- current_user | sv2 | {"user=\"current_user\""} - regress_testrol2 | sv3 | {user=USER} - user | sv4 | {"user=\"USER\""} - regress_testrol1 | sv5 | {user=SESSION_USER} - | sv6 | {user=PUBLIC} - Public | sv7 | {"user=\"Public\""} - regress_testrolx | sv8 | {user=regress_testrolx} -(7 rows) + regress_testrol2 | sv3 | {user=CURRENT_ROLE} + regress_testrol2 | sv4 | {user=USER} + user | sv5 | {"user=\"USER\""} + regress_testrol1 | sv6 | {user=SESSION_USER} + | sv7 | {user=PUBLIC} + Public | sv8 | {"user=\"Public\""} + regress_testrolx | sv9 | {user=regress_testrolx} +(8 rows) DROP USER MAPPING IF EXISTS FOR "current_user" SERVER sv2; SELECT * FROM chkumapping(); umname | umserver | umoptions ------------------+----------+------------------------- - regress_testrol2 | sv3 | {user=USER} - user | sv4 | {"user=\"USER\""} - regress_testrol1 | sv5 | {user=SESSION_USER} - | sv6 | {user=PUBLIC} - Public | sv7 | {"user=\"Public\""} - regress_testrolx | sv8 | {user=regress_testrolx} + regress_testrol2 | sv3 | {user=CURRENT_ROLE} + regress_testrol2 | sv4 | {user=USER} + user | sv5 | {"user=\"USER\""} + regress_testrol1 | sv6 | {user=SESSION_USER} + | sv7 | {user=PUBLIC} + Public | sv8 | {"user=\"Public\""} + regress_testrolx | sv9 | {user=regress_testrolx} +(7 rows) + +DROP USER MAPPING IF EXISTS FOR CURRENT_USER SERVER sv3; +SELECT * FROM chkumapping(); + umname | umserver | umoptions +------------------+----------+------------------------- + regress_testrol2 | sv4 | {user=USER} + user | sv5 | {"user=\"USER\""} + regress_testrol1 | sv6 | {user=SESSION_USER} + | sv7 | {user=PUBLIC} + Public | sv8 | {"user=\"Public\""} + regress_testrolx | sv9 | {user=regress_testrolx} (6 rows) -DROP USER MAPPING IF EXISTS FOR USER SERVER sv3; +DROP USER MAPPING IF EXISTS FOR USER SERVER sv4; SELECT * FROM chkumapping(); umname | umserver | umoptions ------------------+----------+------------------------- - user | sv4 | {"user=\"USER\""} - regress_testrol1 | sv5 | {user=SESSION_USER} - | sv6 | {user=PUBLIC} - Public | sv7 | {"user=\"Public\""} - regress_testrolx | sv8 | {user=regress_testrolx} + user | sv5 | {"user=\"USER\""} + regress_testrol1 | sv6 | {user=SESSION_USER} + | sv7 | {user=PUBLIC} + Public | sv8 | {"user=\"Public\""} + regress_testrolx | sv9 | {user=regress_testrolx} (5 rows) -DROP USER MAPPING IF EXISTS FOR "user" SERVER sv4; +DROP USER MAPPING IF EXISTS FOR "user" SERVER sv5; SELECT * FROM chkumapping(); umname | umserver | umoptions ------------------+----------+------------------------- - regress_testrol1 | sv5 | {user=SESSION_USER} - | sv6 | {user=PUBLIC} - Public | sv7 | {"user=\"Public\""} - regress_testrolx | sv8 | {user=regress_testrolx} + regress_testrol1 | sv6 | {user=SESSION_USER} + | sv7 | {user=PUBLIC} + Public | sv8 | {"user=\"Public\""} + regress_testrolx | sv9 | {user=regress_testrolx} (4 rows) -DROP USER MAPPING IF EXISTS FOR SESSION_USER SERVER sv5; +DROP USER MAPPING IF EXISTS FOR SESSION_USER SERVER sv6; SELECT * FROM chkumapping(); umname | umserver | umoptions ------------------+----------+------------------------- - | sv6 | {user=PUBLIC} - Public | sv7 | {"user=\"Public\""} - regress_testrolx | sv8 | {user=regress_testrolx} + | sv7 | {user=PUBLIC} + Public | sv8 | {"user=\"Public\""} + regress_testrolx | sv9 | {user=regress_testrolx} (3 rows) -DROP USER MAPPING IF EXISTS FOR PUBLIC SERVER sv6; +DROP USER MAPPING IF EXISTS FOR PUBLIC SERVER sv7; SELECT * FROM chkumapping(); umname | umserver | umoptions ------------------+----------+------------------------- - Public | sv7 | {"user=\"Public\""} - regress_testrolx | sv8 | {user=regress_testrolx} + Public | sv8 | {"user=\"Public\""} + regress_testrolx | sv9 | {user=regress_testrolx} (2 rows) -DROP USER MAPPING IF EXISTS FOR "Public" SERVER sv7; +DROP USER MAPPING IF EXISTS FOR "Public" SERVER sv8; SELECT * FROM chkumapping(); umname | umserver | umoptions ------------------+----------+------------------------- - regress_testrolx | sv8 | {user=regress_testrolx} + regress_testrolx | sv9 | {user=regress_testrolx} (1 row) -DROP USER MAPPING IF EXISTS FOR regress_testrolx SERVER sv8; +DROP USER MAPPING IF EXISTS FOR regress_testrolx SERVER sv9; SELECT * FROM chkumapping(); umname | umserver | umoptions --------+----------+----------- (0 rows) -DROP USER MAPPING IF EXISTS FOR CURRENT_ROLE SERVER sv9; --error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: DROP USER MAPPING IF EXISTS FOR CURRENT_ROLE SERVER sv9; - ^ -DROP USER MAPPING IF EXISTS FOR nonexistent SERVER sv9; -- error +DROP USER MAPPING IF EXISTS FOR nonexistent SERVER sv10; -- error NOTICE: role "nonexistent" does not exist, skipping -- GRANT/REVOKE GRANT regress_testrol0 TO pg_signal_backend; -- success @@ -840,7 +921,8 @@ SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; testagg7 | testagg8 | testagg9 | -(9 rows) + testagga | +(10 rows) REVOKE ALL PRIVILEGES ON FUNCTION testagg1(int2) FROM PUBLIC; REVOKE ALL PRIVILEGES ON FUNCTION testagg2(int2) FROM PUBLIC; @@ -853,108 +935,106 @@ REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2) FROM PUBLIC; GRANT ALL PRIVILEGES ON FUNCTION testagg1(int2) TO PUBLIC; GRANT ALL PRIVILEGES ON FUNCTION testagg2(int2) TO CURRENT_USER; GRANT ALL PRIVILEGES ON FUNCTION testagg3(int2) TO "current_user"; -GRANT ALL PRIVILEGES ON FUNCTION testagg4(int2) TO SESSION_USER; -GRANT ALL PRIVILEGES ON FUNCTION testagg5(int2) TO "Public"; -GRANT ALL PRIVILEGES ON FUNCTION testagg6(int2) TO regress_testrolx; -GRANT ALL PRIVILEGES ON FUNCTION testagg7(int2) TO "public"; -GRANT ALL PRIVILEGES ON FUNCTION testagg8(int2) +GRANT ALL PRIVILEGES ON FUNCTION testagg4(int2) TO CURRENT_ROLE; +GRANT ALL PRIVILEGES ON FUNCTION testagg5(int2) TO SESSION_USER; +GRANT ALL PRIVILEGES ON FUNCTION testagg6(int2) TO "Public"; +GRANT ALL PRIVILEGES ON FUNCTION testagg7(int2) TO regress_testrolx; +GRANT ALL PRIVILEGES ON FUNCTION testagg8(int2) TO "public"; +GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO current_user, public, regress_testrolx; SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; proname | proacl ----------+----------------------------------------------------------------------------------------------------------------------------------- testagg1 | {regress_testrol2=X/regress_testrol2,=X/regress_testrol2} testagg2 | {current_user=X/current_user,regress_testrol2=X/current_user} - testagg3 | {regress_testrol1=X/regress_testrol1,current_user=X/regress_testrol1} - testagg4 | {regress_testrolx=X/regress_testrolx,regress_testrol1=X/regress_testrolx} - testagg5 | {Public=X/Public} - testagg6 | {regress_testrol0=X/regress_testrol0,regress_testrolx=X/regress_testrol0} - testagg7 | {regress_testrol0=X/regress_testrol0,=X/regress_testrol0} - testagg8 | {regress_testrol0=X/regress_testrol0,regress_testrol2=X/regress_testrol0,=X/regress_testrol0,regress_testrolx=X/regress_testrol0} - testagg9 | -(9 rows) + testagg3 | {regress_testrol2=X/regress_testrol2,current_user=X/regress_testrol2} + testagg4 | {regress_testrol1=X/regress_testrol1,regress_testrol2=X/regress_testrol1} + testagg5 | {regress_testrolx=X/regress_testrolx,regress_testrol1=X/regress_testrolx} + testagg6 | {Public=X/Public} + testagg7 | {regress_testrol0=X/regress_testrol0,regress_testrolx=X/regress_testrol0} + testagg8 | {regress_testrol0=X/regress_testrol0,=X/regress_testrol0} + testagg9 | {=X/regress_testrol0,regress_testrol0=X/regress_testrol0,regress_testrol2=X/regress_testrol0,regress_testrolx=X/regress_testrol0} + testagga | +(10 rows) -GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO CURRENT_ROLE; --error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ...RANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO CURRENT_RO... - ^ -GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO USER; --error +GRANT ALL PRIVILEGES ON FUNCTION testagga(int2) TO USER; --error ERROR: syntax error at or near "USER" -LINE 1: GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO USER; +LINE 1: GRANT ALL PRIVILEGES ON FUNCTION testagga(int2) TO USER; ^ -GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO NONE; --error +GRANT ALL PRIVILEGES ON FUNCTION testagga(int2) TO NONE; --error ERROR: role name "none" is reserved -LINE 1: GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO NONE; +LINE 1: GRANT ALL PRIVILEGES ON FUNCTION testagga(int2) TO NONE; ^ -GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO "none"; --error +GRANT ALL PRIVILEGES ON FUNCTION testagga(int2) TO "none"; --error ERROR: role name "none" is reserved -LINE 1: GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO "none"; +LINE 1: GRANT ALL PRIVILEGES ON FUNCTION testagga(int2) TO "none"; ^ SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; proname | proacl ----------+----------------------------------------------------------------------------------------------------------------------------------- testagg1 | {regress_testrol2=X/regress_testrol2,=X/regress_testrol2} testagg2 | {current_user=X/current_user,regress_testrol2=X/current_user} - testagg3 | {regress_testrol1=X/regress_testrol1,current_user=X/regress_testrol1} - testagg4 | {regress_testrolx=X/regress_testrolx,regress_testrol1=X/regress_testrolx} - testagg5 | {Public=X/Public} - testagg6 | {regress_testrol0=X/regress_testrol0,regress_testrolx=X/regress_testrol0} - testagg7 | {regress_testrol0=X/regress_testrol0,=X/regress_testrol0} - testagg8 | {regress_testrol0=X/regress_testrol0,regress_testrol2=X/regress_testrol0,=X/regress_testrol0,regress_testrolx=X/regress_testrol0} - testagg9 | -(9 rows) + testagg3 | {regress_testrol2=X/regress_testrol2,current_user=X/regress_testrol2} + testagg4 | {regress_testrol1=X/regress_testrol1,regress_testrol2=X/regress_testrol1} + testagg5 | {regress_testrolx=X/regress_testrolx,regress_testrol1=X/regress_testrolx} + testagg6 | {Public=X/Public} + testagg7 | {regress_testrol0=X/regress_testrol0,regress_testrolx=X/regress_testrol0} + testagg8 | {regress_testrol0=X/regress_testrol0,=X/regress_testrol0} + testagg9 | {=X/regress_testrol0,regress_testrol0=X/regress_testrol0,regress_testrol2=X/regress_testrol0,regress_testrolx=X/regress_testrol0} + testagga | +(10 rows) REVOKE ALL PRIVILEGES ON FUNCTION testagg1(int2) FROM PUBLIC; REVOKE ALL PRIVILEGES ON FUNCTION testagg2(int2) FROM CURRENT_USER; REVOKE ALL PRIVILEGES ON FUNCTION testagg3(int2) FROM "current_user"; -REVOKE ALL PRIVILEGES ON FUNCTION testagg4(int2) FROM SESSION_USER; -REVOKE ALL PRIVILEGES ON FUNCTION testagg5(int2) FROM "Public"; -REVOKE ALL PRIVILEGES ON FUNCTION testagg6(int2) FROM regress_testrolx; -REVOKE ALL PRIVILEGES ON FUNCTION testagg7(int2) FROM "public"; -REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2) +REVOKE ALL PRIVILEGES ON FUNCTION testagg4(int2) FROM CURRENT_ROLE; +REVOKE ALL PRIVILEGES ON FUNCTION testagg5(int2) FROM SESSION_USER; +REVOKE ALL PRIVILEGES ON FUNCTION testagg6(int2) FROM "Public"; +REVOKE ALL PRIVILEGES ON FUNCTION testagg7(int2) FROM regress_testrolx; +REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2) FROM "public"; +REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM current_user, public, regress_testrolx; SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; proname | proacl ----------+--------------------------------------- testagg1 | {regress_testrol2=X/regress_testrol2} testagg2 | {current_user=X/current_user} - testagg3 | {regress_testrol1=X/regress_testrol1} - testagg4 | {regress_testrolx=X/regress_testrolx} - testagg5 | {} - testagg6 | {regress_testrol0=X/regress_testrol0} + testagg3 | {regress_testrol2=X/regress_testrol2} + testagg4 | {regress_testrol1=X/regress_testrol1} + testagg5 | {regress_testrolx=X/regress_testrolx} + testagg6 | {} testagg7 | {regress_testrol0=X/regress_testrol0} testagg8 | {regress_testrol0=X/regress_testrol0} - testagg9 | -(9 rows) + testagg9 | {regress_testrol0=X/regress_testrol0} + testagga | +(10 rows) -REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM CURRENT_ROLE; --error -ERROR: syntax error at or near "CURRENT_ROLE" -LINE 1: ...KE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM CURRENT_RO... - ^ -REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM USER; --error +REVOKE ALL PRIVILEGES ON FUNCTION testagga(int2) FROM USER; --error ERROR: syntax error at or near "USER" -LINE 1: REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM USER; +LINE 1: REVOKE ALL PRIVILEGES ON FUNCTION testagga(int2) FROM USER; ^ -REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM NONE; --error +REVOKE ALL PRIVILEGES ON FUNCTION testagga(int2) FROM NONE; --error ERROR: role name "none" is reserved -LINE 1: REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM NONE; +LINE 1: REVOKE ALL PRIVILEGES ON FUNCTION testagga(int2) FROM NONE; ^ -REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM "none"; --error +REVOKE ALL PRIVILEGES ON FUNCTION testagga(int2) FROM "none"; --error ERROR: role name "none" is reserved -LINE 1: ...EVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM "none"; +LINE 1: ...EVOKE ALL PRIVILEGES ON FUNCTION testagga(int2) FROM "none"; ^ SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; proname | proacl ----------+--------------------------------------- testagg1 | {regress_testrol2=X/regress_testrol2} testagg2 | {current_user=X/current_user} - testagg3 | {regress_testrol1=X/regress_testrol1} - testagg4 | {regress_testrolx=X/regress_testrolx} - testagg5 | {} - testagg6 | {regress_testrol0=X/regress_testrol0} + testagg3 | {regress_testrol2=X/regress_testrol2} + testagg4 | {regress_testrol1=X/regress_testrol1} + testagg5 | {regress_testrolx=X/regress_testrolx} + testagg6 | {} testagg7 | {regress_testrol0=X/regress_testrol0} testagg8 | {regress_testrol0=X/regress_testrol0} - testagg9 | -(9 rows) + testagg9 | {regress_testrol0=X/regress_testrol0} + testagga | +(10 rows) -- DEFAULT MONITORING ROLES CREATE ROLE regress_role_haspriv; @@ -1005,7 +1085,7 @@ REVOKE pg_read_all_settings FROM regress_role_haspriv; -- clean up \c DROP SCHEMA test_roles_schema; -DROP OWNED BY regress_testrol0, "Public", "current_user", regress_testrol1, regress_testrol2, regress_testrolx CASCADE; +DROP OWNED BY regress_testrol0, "Public", "current_role", "current_user", regress_testrol1, regress_testrol2, regress_testrolx CASCADE; DROP ROLE regress_testrol0, regress_testrol1, regress_testrol2, regress_testrolx; -DROP ROLE "Public", "None", "current_user", "session_user", "user"; +DROP ROLE "Public", "None", "current_role", "current_user", "session_user", "user"; DROP ROLE regress_role_haspriv, regress_role_nopriv; diff --git a/src/test/modules/unsafe_tests/sql/rolenames.sql b/src/test/modules/unsafe_tests/sql/rolenames.sql index c3013c146498c..adac36536db4d 100644 --- a/src/test/modules/unsafe_tests/sql/rolenames.sql +++ b/src/test/modules/unsafe_tests/sql/rolenames.sql @@ -1,20 +1,22 @@ -CREATE OR REPLACE FUNCTION chkrolattr() +CREATE FUNCTION chkrolattr() RETURNS TABLE ("role" name, rolekeyword text, canlogin bool, replication bool) AS $$ SELECT r.rolname, v.keyword, r.rolcanlogin, r.rolreplication FROM pg_roles r - JOIN (VALUES(CURRENT_USER, 'current_user'), + JOIN (VALUES(CURRENT_ROLE, 'current_role'), + (CURRENT_USER, 'current_user'), (SESSION_USER, 'session_user'), + ('current_role', '-'), ('current_user', '-'), ('session_user', '-'), ('Public', '-'), ('None', '-')) AS v(uname, keyword) ON (r.rolname = v.uname) - ORDER BY 1; + ORDER BY 1, 2; $$ LANGUAGE SQL; -CREATE OR REPLACE FUNCTION chksetconfig() +CREATE FUNCTION chksetconfig() RETURNS TABLE (db name, "role" name, rolkeyword text, setconfig text[]) AS $$ SELECT COALESCE(d.datname, 'ALL'), COALESCE(r.rolname, 'ALL'), @@ -22,22 +24,23 @@ SELECT COALESCE(d.datname, 'ALL'), COALESCE(r.rolname, 'ALL'), FROM pg_db_role_setting s LEFT JOIN pg_roles r ON (r.oid = s.setrole) LEFT JOIN pg_database d ON (d.oid = s.setdatabase) - LEFT JOIN (VALUES(CURRENT_USER, 'current_user'), - (SESSION_USER, 'session_user')) + LEFT JOIN (VALUES(CURRENT_ROLE, 'current_role'), + (CURRENT_USER, 'current_user'), + (SESSION_USER, 'session_user')) AS v(uname, keyword) ON (r.rolname = v.uname) WHERE (r.rolname) IN ('Public', 'current_user', 'regress_testrol1', 'regress_testrol2') -ORDER BY 1, 2; +ORDER BY 1, 2, 3; $$ LANGUAGE SQL; -CREATE OR REPLACE FUNCTION chkumapping() +CREATE FUNCTION chkumapping() RETURNS TABLE (umname name, umserver name, umoptions text[]) AS $$ SELECT r.rolname, s.srvname, m.umoptions FROM pg_user_mapping m LEFT JOIN pg_roles r ON (r.oid = m.umuser) JOIN pg_foreign_server s ON (s.oid = m.umserver) - ORDER BY 2; + ORDER BY 2, 1; $$ LANGUAGE SQL; -- @@ -50,6 +53,7 @@ SET client_min_messages = ERROR; CREATE ROLE "Public"; CREATE ROLE "None"; +CREATE ROLE "current_role"; CREATE ROLE "current_user"; CREATE ROLE "session_user"; CREATE ROLE "user"; @@ -84,6 +88,11 @@ SET ROLE regress_testrol2; -- ALTER ROLE BEGIN; SELECT * FROM chkrolattr(); +ALTER ROLE CURRENT_ROLE WITH REPLICATION; +SELECT * FROM chkrolattr(); +ALTER ROLE "current_role" WITH REPLICATION; +SELECT * FROM chkrolattr(); +ALTER ROLE CURRENT_ROLE WITH NOREPLICATION; ALTER ROLE CURRENT_USER WITH REPLICATION; SELECT * FROM chkrolattr(); ALTER ROLE "current_user" WITH REPLICATION; @@ -101,7 +110,6 @@ SELECT * FROM chkrolattr(); ROLLBACK; ALTER ROLE USER WITH LOGIN; -- error -ALTER ROLE CURRENT_ROLE WITH LOGIN; --error ALTER ROLE ALL WITH REPLICATION; -- error ALTER ROLE SESSION_ROLE WITH NOREPLICATION; -- error ALTER ROLE PUBLIC WITH NOREPLICATION; -- error @@ -113,6 +121,11 @@ ALTER ROLE nonexistent WITH NOREPLICATION; -- error -- ALTER USER BEGIN; SELECT * FROM chkrolattr(); +ALTER USER CURRENT_ROLE WITH REPLICATION; +SELECT * FROM chkrolattr(); +ALTER USER "current_role" WITH REPLICATION; +SELECT * FROM chkrolattr(); +ALTER USER CURRENT_ROLE WITH NOREPLICATION; ALTER USER CURRENT_USER WITH REPLICATION; SELECT * FROM chkrolattr(); ALTER USER "current_user" WITH REPLICATION; @@ -130,7 +143,6 @@ SELECT * FROM chkrolattr(); ROLLBACK; ALTER USER USER WITH LOGIN; -- error -ALTER USER CURRENT_ROLE WITH LOGIN; -- error ALTER USER ALL WITH REPLICATION; -- error ALTER USER SESSION_ROLE WITH NOREPLICATION; -- error ALTER USER PUBLIC WITH NOREPLICATION; -- error @@ -141,6 +153,7 @@ ALTER USER nonexistent WITH NOREPLICATION; -- error -- ALTER ROLE SET/RESET SELECT * FROM chksetconfig(); +ALTER ROLE CURRENT_ROLE SET application_name to 'BAZ'; ALTER ROLE CURRENT_USER SET application_name to 'FOO'; ALTER ROLE SESSION_USER SET application_name to 'BAR'; ALTER ROLE "current_user" SET application_name to 'FOOFOO'; @@ -149,6 +162,7 @@ ALTER ROLE ALL SET application_name to 'SLAP'; SELECT * FROM chksetconfig(); ALTER ROLE regress_testrol1 SET application_name to 'SLAM'; SELECT * FROM chksetconfig(); +ALTER ROLE CURRENT_ROLE RESET application_name; ALTER ROLE CURRENT_USER RESET application_name; ALTER ROLE SESSION_USER RESET application_name; ALTER ROLE "current_user" RESET application_name; @@ -157,13 +171,13 @@ ALTER ROLE ALL RESET application_name; SELECT * FROM chksetconfig(); -ALTER ROLE CURRENT_ROLE SET application_name to 'BAZ'; -- error ALTER ROLE USER SET application_name to 'BOOM'; -- error ALTER ROLE PUBLIC SET application_name to 'BOMB'; -- error ALTER ROLE nonexistent SET application_name to 'BOMB'; -- error -- ALTER USER SET/RESET SELECT * FROM chksetconfig(); +ALTER USER CURRENT_ROLE SET application_name to 'BAZ'; ALTER USER CURRENT_USER SET application_name to 'FOO'; ALTER USER SESSION_USER SET application_name to 'BAR'; ALTER USER "current_user" SET application_name to 'FOOFOO'; @@ -172,6 +186,7 @@ ALTER USER ALL SET application_name to 'SLAP'; SELECT * FROM chksetconfig(); ALTER USER regress_testrol1 SET application_name to 'SLAM'; SELECT * FROM chksetconfig(); +ALTER USER CURRENT_ROLE RESET application_name; ALTER USER CURRENT_USER RESET application_name; ALTER USER SESSION_USER RESET application_name; ALTER USER "current_user" RESET application_name; @@ -180,7 +195,6 @@ ALTER USER ALL RESET application_name; SELECT * FROM chksetconfig(); -ALTER USER CURRENT_ROLE SET application_name to 'BAZ'; -- error ALTER USER USER SET application_name to 'BOOM'; -- error ALTER USER PUBLIC SET application_name to 'BOMB'; -- error ALTER USER NONE SET application_name to 'BOMB'; -- error @@ -189,16 +203,16 @@ ALTER USER nonexistent SET application_name to 'BOMB'; -- error -- CREATE SCHEMA CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER; CREATE SCHEMA newschema2 AUTHORIZATION "current_user"; -CREATE SCHEMA newschema3 AUTHORIZATION SESSION_USER; -CREATE SCHEMA newschema4 AUTHORIZATION regress_testrolx; -CREATE SCHEMA newschema5 AUTHORIZATION "Public"; +CREATE SCHEMA newschema3 AUTHORIZATION CURRENT_ROLE; +CREATE SCHEMA newschema4 AUTHORIZATION SESSION_USER; +CREATE SCHEMA newschema5 AUTHORIZATION regress_testrolx; +CREATE SCHEMA newschema6 AUTHORIZATION "Public"; -CREATE SCHEMA newschema6 AUTHORIZATION USER; -- error -CREATE SCHEMA newschema6 AUTHORIZATION CURRENT_ROLE; -- error -CREATE SCHEMA newschema6 AUTHORIZATION PUBLIC; -- error -CREATE SCHEMA newschema6 AUTHORIZATION "public"; -- error -CREATE SCHEMA newschema6 AUTHORIZATION NONE; -- error -CREATE SCHEMA newschema6 AUTHORIZATION nonexistent; -- error +CREATE SCHEMA newschemax AUTHORIZATION USER; -- error +CREATE SCHEMA newschemax AUTHORIZATION PUBLIC; -- error +CREATE SCHEMA newschemax AUTHORIZATION "public"; -- error +CREATE SCHEMA newschemax AUTHORIZATION NONE; -- error +CREATE SCHEMA newschemax AUTHORIZATION nonexistent; -- error SELECT n.nspname, r.rolname FROM pg_namespace n JOIN pg_roles r ON (r.oid = n.nspowner) @@ -206,16 +220,16 @@ SELECT n.nspname, r.rolname FROM pg_namespace n CREATE SCHEMA IF NOT EXISTS newschema1 AUTHORIZATION CURRENT_USER; CREATE SCHEMA IF NOT EXISTS newschema2 AUTHORIZATION "current_user"; -CREATE SCHEMA IF NOT EXISTS newschema3 AUTHORIZATION SESSION_USER; -CREATE SCHEMA IF NOT EXISTS newschema4 AUTHORIZATION regress_testrolx; -CREATE SCHEMA IF NOT EXISTS newschema5 AUTHORIZATION "Public"; +CREATE SCHEMA IF NOT EXISTS newschema3 AUTHORIZATION CURRENT_ROLE; +CREATE SCHEMA IF NOT EXISTS newschema4 AUTHORIZATION SESSION_USER; +CREATE SCHEMA IF NOT EXISTS newschema5 AUTHORIZATION regress_testrolx; +CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION "Public"; -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION USER; -- error -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION CURRENT_ROLE; -- error -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION PUBLIC; -- error -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION "public"; -- error -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION NONE; -- error -CREATE SCHEMA IF NOT EXISTS newschema6 AUTHORIZATION nonexistent; -- error +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION USER; -- error +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION PUBLIC; -- error +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION "public"; -- error +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION NONE; -- error +CREATE SCHEMA IF NOT EXISTS newschemax AUTHORIZATION nonexistent; -- error SELECT n.nspname, r.rolname FROM pg_namespace n JOIN pg_roles r ON (r.oid = n.nspowner) @@ -230,6 +244,7 @@ CREATE TABLE testtab3 (a int); CREATE TABLE testtab4 (a int); CREATE TABLE testtab5 (a int); CREATE TABLE testtab6 (a int); +CREATE TABLE testtab7 (a int); \c - SET SESSION AUTHORIZATION regress_testrol1; @@ -237,15 +252,15 @@ SET ROLE regress_testrol2; ALTER TABLE testtab1 OWNER TO CURRENT_USER; ALTER TABLE testtab2 OWNER TO "current_user"; -ALTER TABLE testtab3 OWNER TO SESSION_USER; -ALTER TABLE testtab4 OWNER TO regress_testrolx; -ALTER TABLE testtab5 OWNER TO "Public"; +ALTER TABLE testtab3 OWNER TO CURRENT_ROLE; +ALTER TABLE testtab4 OWNER TO SESSION_USER; +ALTER TABLE testtab5 OWNER TO regress_testrolx; +ALTER TABLE testtab6 OWNER TO "Public"; -ALTER TABLE testtab6 OWNER TO CURRENT_ROLE; -- error -ALTER TABLE testtab6 OWNER TO USER; --error -ALTER TABLE testtab6 OWNER TO PUBLIC; -- error -ALTER TABLE testtab6 OWNER TO "public"; -- error -ALTER TABLE testtab6 OWNER TO nonexistent; -- error +ALTER TABLE testtab7 OWNER TO USER; --error +ALTER TABLE testtab7 OWNER TO PUBLIC; -- error +ALTER TABLE testtab7 OWNER TO "public"; -- error +ALTER TABLE testtab7 OWNER TO nonexistent; -- error SELECT c.relname, r.rolname FROM pg_class c JOIN pg_roles r ON (r.oid = c.relowner) @@ -267,6 +282,7 @@ CREATE AGGREGATE testagg6(int2) (SFUNC = int2_sum, STYPE = int8); CREATE AGGREGATE testagg7(int2) (SFUNC = int2_sum, STYPE = int8); CREATE AGGREGATE testagg8(int2) (SFUNC = int2_sum, STYPE = int8); CREATE AGGREGATE testagg9(int2) (SFUNC = int2_sum, STYPE = int8); +CREATE AGGREGATE testagga(int2) (SFUNC = int2_sum, STYPE = int8); \c - SET SESSION AUTHORIZATION regress_testrol1; @@ -274,15 +290,15 @@ SET ROLE regress_testrol2; ALTER AGGREGATE testagg1(int2) OWNER TO CURRENT_USER; ALTER AGGREGATE testagg2(int2) OWNER TO "current_user"; -ALTER AGGREGATE testagg3(int2) OWNER TO SESSION_USER; -ALTER AGGREGATE testagg4(int2) OWNER TO regress_testrolx; -ALTER AGGREGATE testagg5(int2) OWNER TO "Public"; +ALTER AGGREGATE testagg3(int2) OWNER TO CURRENT_ROLE; +ALTER AGGREGATE testagg4(int2) OWNER TO SESSION_USER; +ALTER AGGREGATE testagg5(int2) OWNER TO regress_testrolx; +ALTER AGGREGATE testagg6(int2) OWNER TO "Public"; -ALTER AGGREGATE testagg5(int2) OWNER TO CURRENT_ROLE; -- error -ALTER AGGREGATE testagg5(int2) OWNER TO USER; -- error -ALTER AGGREGATE testagg5(int2) OWNER TO PUBLIC; -- error -ALTER AGGREGATE testagg5(int2) OWNER TO "public"; -- error -ALTER AGGREGATE testagg5(int2) OWNER TO nonexistent; -- error +ALTER AGGREGATE testagg6(int2) OWNER TO USER; -- error +ALTER AGGREGATE testagg6(int2) OWNER TO PUBLIC; -- error +ALTER AGGREGATE testagg6(int2) OWNER TO "public"; -- error +ALTER AGGREGATE testagg6(int2) OWNER TO nonexistent; -- error SELECT p.proname, r.rolname FROM pg_proc p JOIN pg_roles r ON (r.oid = p.proowner) @@ -300,20 +316,19 @@ CREATE SERVER sv6 FOREIGN DATA WRAPPER test_wrapper; CREATE SERVER sv7 FOREIGN DATA WRAPPER test_wrapper; CREATE SERVER sv8 FOREIGN DATA WRAPPER test_wrapper; CREATE SERVER sv9 FOREIGN DATA WRAPPER test_wrapper; +CREATE SERVER sv10 FOREIGN DATA WRAPPER test_wrapper; CREATE USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (user 'CURRENT_USER'); CREATE USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (user '"current_user"'); -CREATE USER MAPPING FOR USER SERVER sv3 OPTIONS (user 'USER'); -CREATE USER MAPPING FOR "user" SERVER sv4 OPTIONS (user '"USER"'); -CREATE USER MAPPING FOR SESSION_USER SERVER sv5 OPTIONS (user 'SESSION_USER'); -CREATE USER MAPPING FOR PUBLIC SERVER sv6 OPTIONS (user 'PUBLIC'); -CREATE USER MAPPING FOR "Public" SERVER sv7 OPTIONS (user '"Public"'); -CREATE USER MAPPING FOR regress_testrolx SERVER sv8 OPTIONS (user 'regress_testrolx'); - -CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv9 - OPTIONS (user 'CURRENT_ROLE'); -- error -CREATE USER MAPPING FOR nonexistent SERVER sv9 - OPTIONS (user 'nonexistent'); -- error; +CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv3 OPTIONS (user 'CURRENT_ROLE'); +CREATE USER MAPPING FOR USER SERVER sv4 OPTIONS (user 'USER'); +CREATE USER MAPPING FOR "user" SERVER sv5 OPTIONS (user '"USER"'); +CREATE USER MAPPING FOR SESSION_USER SERVER sv6 OPTIONS (user 'SESSION_USER'); +CREATE USER MAPPING FOR PUBLIC SERVER sv7 OPTIONS (user 'PUBLIC'); +CREATE USER MAPPING FOR "Public" SERVER sv8 OPTIONS (user '"Public"'); +CREATE USER MAPPING FOR regress_testrolx SERVER sv9 OPTIONS (user 'regress_testrolx'); + +CREATE USER MAPPING FOR nonexistent SERVER sv10 OPTIONS (user 'nonexistent'); -- error; SELECT * FROM chkumapping(); @@ -322,22 +337,22 @@ ALTER USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (SET user 'CURRENT_USER_alt'); ALTER USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (SET user '"current_user"_alt'); -ALTER USER MAPPING FOR USER SERVER sv3 +ALTER USER MAPPING FOR CURRENT_ROLE SERVER sv3 + OPTIONS (SET user 'CURRENT_ROLE_alt'); +ALTER USER MAPPING FOR USER SERVER sv4 OPTIONS (SET user 'USER_alt'); -ALTER USER MAPPING FOR "user" SERVER sv4 +ALTER USER MAPPING FOR "user" SERVER sv5 OPTIONS (SET user '"user"_alt'); -ALTER USER MAPPING FOR SESSION_USER SERVER sv5 +ALTER USER MAPPING FOR SESSION_USER SERVER sv6 OPTIONS (SET user 'SESSION_USER_alt'); -ALTER USER MAPPING FOR PUBLIC SERVER sv6 +ALTER USER MAPPING FOR PUBLIC SERVER sv7 OPTIONS (SET user 'public_alt'); -ALTER USER MAPPING FOR "Public" SERVER sv7 +ALTER USER MAPPING FOR "Public" SERVER sv8 OPTIONS (SET user '"Public"_alt'); -ALTER USER MAPPING FOR regress_testrolx SERVER sv8 +ALTER USER MAPPING FOR regress_testrolx SERVER sv9 OPTIONS (SET user 'regress_testrolx_alt'); -ALTER USER MAPPING FOR CURRENT_ROLE SERVER sv9 - OPTIONS (SET user 'CURRENT_ROLE_alt'); -ALTER USER MAPPING FOR nonexistent SERVER sv9 +ALTER USER MAPPING FOR nonexistent SERVER sv10 OPTIONS (SET user 'nonexistent_alt'); -- error SELECT * FROM chkumapping(); @@ -345,25 +360,26 @@ SELECT * FROM chkumapping(); -- DROP USER MAPPING DROP USER MAPPING FOR CURRENT_USER SERVER sv1; DROP USER MAPPING FOR "current_user" SERVER sv2; -DROP USER MAPPING FOR USER SERVER sv3; -DROP USER MAPPING FOR "user" SERVER sv4; -DROP USER MAPPING FOR SESSION_USER SERVER sv5; -DROP USER MAPPING FOR PUBLIC SERVER sv6; -DROP USER MAPPING FOR "Public" SERVER sv7; -DROP USER MAPPING FOR regress_testrolx SERVER sv8; - -DROP USER MAPPING FOR CURRENT_ROLE SERVER sv9; -- error -DROP USER MAPPING FOR nonexistent SERVER sv; -- error +DROP USER MAPPING FOR CURRENT_ROLE SERVER sv3; +DROP USER MAPPING FOR USER SERVER sv4; +DROP USER MAPPING FOR "user" SERVER sv5; +DROP USER MAPPING FOR SESSION_USER SERVER sv6; +DROP USER MAPPING FOR PUBLIC SERVER sv7; +DROP USER MAPPING FOR "Public" SERVER sv8; +DROP USER MAPPING FOR regress_testrolx SERVER sv9; + +DROP USER MAPPING FOR nonexistent SERVER sv10; -- error SELECT * FROM chkumapping(); CREATE USER MAPPING FOR CURRENT_USER SERVER sv1 OPTIONS (user 'CURRENT_USER'); CREATE USER MAPPING FOR "current_user" SERVER sv2 OPTIONS (user '"current_user"'); -CREATE USER MAPPING FOR USER SERVER sv3 OPTIONS (user 'USER'); -CREATE USER MAPPING FOR "user" SERVER sv4 OPTIONS (user '"USER"'); -CREATE USER MAPPING FOR SESSION_USER SERVER sv5 OPTIONS (user 'SESSION_USER'); -CREATE USER MAPPING FOR PUBLIC SERVER sv6 OPTIONS (user 'PUBLIC'); -CREATE USER MAPPING FOR "Public" SERVER sv7 OPTIONS (user '"Public"'); -CREATE USER MAPPING FOR regress_testrolx SERVER sv8 OPTIONS (user 'regress_testrolx'); +CREATE USER MAPPING FOR CURRENT_ROLE SERVER sv3 OPTIONS (user 'CURRENT_ROLE'); +CREATE USER MAPPING FOR USER SERVER sv4 OPTIONS (user 'USER'); +CREATE USER MAPPING FOR "user" SERVER sv5 OPTIONS (user '"USER"'); +CREATE USER MAPPING FOR SESSION_USER SERVER sv6 OPTIONS (user 'SESSION_USER'); +CREATE USER MAPPING FOR PUBLIC SERVER sv7 OPTIONS (user 'PUBLIC'); +CREATE USER MAPPING FOR "Public" SERVER sv8 OPTIONS (user '"Public"'); +CREATE USER MAPPING FOR regress_testrolx SERVER sv9 OPTIONS (user 'regress_testrolx'); SELECT * FROM chkumapping(); -- DROP USER MAPPING IF EXISTS @@ -371,21 +387,22 @@ DROP USER MAPPING IF EXISTS FOR CURRENT_USER SERVER sv1; SELECT * FROM chkumapping(); DROP USER MAPPING IF EXISTS FOR "current_user" SERVER sv2; SELECT * FROM chkumapping(); -DROP USER MAPPING IF EXISTS FOR USER SERVER sv3; +DROP USER MAPPING IF EXISTS FOR CURRENT_USER SERVER sv3; +SELECT * FROM chkumapping(); +DROP USER MAPPING IF EXISTS FOR USER SERVER sv4; SELECT * FROM chkumapping(); -DROP USER MAPPING IF EXISTS FOR "user" SERVER sv4; +DROP USER MAPPING IF EXISTS FOR "user" SERVER sv5; SELECT * FROM chkumapping(); -DROP USER MAPPING IF EXISTS FOR SESSION_USER SERVER sv5; +DROP USER MAPPING IF EXISTS FOR SESSION_USER SERVER sv6; SELECT * FROM chkumapping(); -DROP USER MAPPING IF EXISTS FOR PUBLIC SERVER sv6; +DROP USER MAPPING IF EXISTS FOR PUBLIC SERVER sv7; SELECT * FROM chkumapping(); -DROP USER MAPPING IF EXISTS FOR "Public" SERVER sv7; +DROP USER MAPPING IF EXISTS FOR "Public" SERVER sv8; SELECT * FROM chkumapping(); -DROP USER MAPPING IF EXISTS FOR regress_testrolx SERVER sv8; +DROP USER MAPPING IF EXISTS FOR regress_testrolx SERVER sv9; SELECT * FROM chkumapping(); -DROP USER MAPPING IF EXISTS FOR CURRENT_ROLE SERVER sv9; --error -DROP USER MAPPING IF EXISTS FOR nonexistent SERVER sv9; -- error +DROP USER MAPPING IF EXISTS FOR nonexistent SERVER sv10; -- error -- GRANT/REVOKE GRANT regress_testrol0 TO pg_signal_backend; -- success @@ -410,38 +427,38 @@ REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2) FROM PUBLIC; GRANT ALL PRIVILEGES ON FUNCTION testagg1(int2) TO PUBLIC; GRANT ALL PRIVILEGES ON FUNCTION testagg2(int2) TO CURRENT_USER; GRANT ALL PRIVILEGES ON FUNCTION testagg3(int2) TO "current_user"; -GRANT ALL PRIVILEGES ON FUNCTION testagg4(int2) TO SESSION_USER; -GRANT ALL PRIVILEGES ON FUNCTION testagg5(int2) TO "Public"; -GRANT ALL PRIVILEGES ON FUNCTION testagg6(int2) TO regress_testrolx; -GRANT ALL PRIVILEGES ON FUNCTION testagg7(int2) TO "public"; -GRANT ALL PRIVILEGES ON FUNCTION testagg8(int2) +GRANT ALL PRIVILEGES ON FUNCTION testagg4(int2) TO CURRENT_ROLE; +GRANT ALL PRIVILEGES ON FUNCTION testagg5(int2) TO SESSION_USER; +GRANT ALL PRIVILEGES ON FUNCTION testagg6(int2) TO "Public"; +GRANT ALL PRIVILEGES ON FUNCTION testagg7(int2) TO regress_testrolx; +GRANT ALL PRIVILEGES ON FUNCTION testagg8(int2) TO "public"; +GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO current_user, public, regress_testrolx; SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; -GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO CURRENT_ROLE; --error -GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO USER; --error -GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO NONE; --error -GRANT ALL PRIVILEGES ON FUNCTION testagg9(int2) TO "none"; --error +GRANT ALL PRIVILEGES ON FUNCTION testagga(int2) TO USER; --error +GRANT ALL PRIVILEGES ON FUNCTION testagga(int2) TO NONE; --error +GRANT ALL PRIVILEGES ON FUNCTION testagga(int2) TO "none"; --error SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; REVOKE ALL PRIVILEGES ON FUNCTION testagg1(int2) FROM PUBLIC; REVOKE ALL PRIVILEGES ON FUNCTION testagg2(int2) FROM CURRENT_USER; REVOKE ALL PRIVILEGES ON FUNCTION testagg3(int2) FROM "current_user"; -REVOKE ALL PRIVILEGES ON FUNCTION testagg4(int2) FROM SESSION_USER; -REVOKE ALL PRIVILEGES ON FUNCTION testagg5(int2) FROM "Public"; -REVOKE ALL PRIVILEGES ON FUNCTION testagg6(int2) FROM regress_testrolx; -REVOKE ALL PRIVILEGES ON FUNCTION testagg7(int2) FROM "public"; -REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2) +REVOKE ALL PRIVILEGES ON FUNCTION testagg4(int2) FROM CURRENT_ROLE; +REVOKE ALL PRIVILEGES ON FUNCTION testagg5(int2) FROM SESSION_USER; +REVOKE ALL PRIVILEGES ON FUNCTION testagg6(int2) FROM "Public"; +REVOKE ALL PRIVILEGES ON FUNCTION testagg7(int2) FROM regress_testrolx; +REVOKE ALL PRIVILEGES ON FUNCTION testagg8(int2) FROM "public"; +REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM current_user, public, regress_testrolx; SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; -REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM CURRENT_ROLE; --error -REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM USER; --error -REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM NONE; --error -REVOKE ALL PRIVILEGES ON FUNCTION testagg9(int2) FROM "none"; --error +REVOKE ALL PRIVILEGES ON FUNCTION testagga(int2) FROM USER; --error +REVOKE ALL PRIVILEGES ON FUNCTION testagga(int2) FROM NONE; --error +REVOKE ALL PRIVILEGES ON FUNCTION testagga(int2) FROM "none"; --error SELECT proname, proacl FROM pg_proc WHERE proname LIKE 'testagg_'; @@ -481,7 +498,7 @@ REVOKE pg_read_all_settings FROM regress_role_haspriv; \c DROP SCHEMA test_roles_schema; -DROP OWNED BY regress_testrol0, "Public", "current_user", regress_testrol1, regress_testrol2, regress_testrolx CASCADE; +DROP OWNED BY regress_testrol0, "Public", "current_role", "current_user", regress_testrol1, regress_testrol2, regress_testrolx CASCADE; DROP ROLE regress_testrol0, regress_testrol1, regress_testrol2, regress_testrolx; -DROP ROLE "Public", "None", "current_user", "session_user", "user"; +DROP ROLE "Public", "None", "current_role", "current_user", "session_user", "user"; DROP ROLE regress_role_haspriv, regress_role_nopriv; diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm index 1488bffa2ba34..ebcaeb44fec50 100644 --- a/src/test/perl/PostgresNode.pm +++ b/src/test/perl/PostgresNode.pm @@ -469,13 +469,15 @@ sub init { print $conf "wal_level = replica\n"; } - print $conf "max_wal_senders = 5\n"; - print $conf "max_replication_slots = 5\n"; - print $conf "max_wal_size = 128MB\n"; - print $conf "shared_buffers = 1MB\n"; + print $conf "max_wal_senders = 10\n"; + print $conf "max_replication_slots = 10\n"; print $conf "wal_log_hints = on\n"; print $conf "hot_standby = on\n"; + # conservative settings to ensure we can run multiple postmasters: + print $conf "shared_buffers = 1MB\n"; print $conf "max_connections = 10\n"; + # limit disk space consumption, too: + print $conf "max_wal_size = 128MB\n"; } else { @@ -551,8 +553,10 @@ sub backup my $name = $self->name; print "# Taking pg_basebackup $backup_name from node \"$name\"\n"; - TestLib::system_or_bail('pg_basebackup', '-D', $backup_path, '-h', - $self->host, '-p', $self->port, '--no-sync'); + TestLib::system_or_bail( + 'pg_basebackup', '-D', $backup_path, '-h', + $self->host, '-p', $self->port, '--checkpoint', + 'fast', '--no-sync'); print "# Backup finished\n"; return; } diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index cbe87f8684318..1baf6bd00173a 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -43,6 +43,7 @@ package TestLib; use strict; use warnings; +use Carp; use Config; use Cwd; use Exporter 'import'; @@ -421,7 +422,7 @@ sub slurp_dir { my ($dir) = @_; opendir(my $dh, $dir) - or die "could not opendir \"$dir\": $!"; + or croak "could not opendir \"$dir\": $!"; my @direntries = readdir $dh; closedir $dh; return @direntries; @@ -443,19 +444,19 @@ sub slurp_file if ($Config{osname} ne 'MSWin32') { open(my $in, '<', $filename) - or die "could not read \"$filename\": $!"; + or croak "could not read \"$filename\": $!"; $contents = <$in>; close $in; } else { my $fHandle = createFile($filename, "r", "rwd") - or die "could not open \"$filename\": $^E"; + or croak "could not open \"$filename\": $^E"; OsFHandleOpen(my $fh = IO::Handle->new(), $fHandle, 'r') - or die "could not read \"$filename\": $^E\n"; + or croak "could not read \"$filename\": $^E\n"; $contents = <$fh>; CloseHandle($fHandle) - or die "could not close \"$filename\": $^E\n"; + or croak "could not close \"$filename\": $^E\n"; } $contents =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; return $contents; @@ -474,7 +475,7 @@ sub append_to_file { my ($filename, $str) = @_; open my $fh, ">>", $filename - or die "could not write \"$filename\": $!"; + or croak "could not write \"$filename\": $!"; print $fh $str; close $fh; return; diff --git a/src/test/recovery/t/020_archive_status.pl b/src/test/recovery/t/020_archive_status.pl index c726453417b11..eb5c04c411584 100644 --- a/src/test/recovery/t/020_archive_status.pl +++ b/src/test/recovery/t/020_archive_status.pl @@ -64,7 +64,7 @@ FROM pg_stat_archiver }), "0|$segment_name_1", - 'pg_stat_archiver failed to archive $segment_name_1'); + "pg_stat_archiver failed to archive $segment_name_1"); # Crash the cluster for the next test in charge of checking that non-archived # WAL segments are not removed. diff --git a/src/test/recovery/t/021_row_visibility.pl b/src/test/recovery/t/021_row_visibility.pl new file mode 100644 index 0000000000000..8a466e56e0fdc --- /dev/null +++ b/src/test/recovery/t/021_row_visibility.pl @@ -0,0 +1,196 @@ +# Checks that snapshots on standbys behave in a minimally reasonable +# way. +use strict; +use warnings; + +use PostgresNode; +use TestLib; +use Test::More tests => 10; +use Config; + +# Initialize primary node +my $node_primary = get_new_node('primary'); +$node_primary->init(allows_streaming => 1); +$node_primary->append_conf('postgresql.conf', 'max_prepared_transactions=10'); +$node_primary->start; + +# Initialize with empty test table +$node_primary->safe_psql('postgres', + 'CREATE TABLE public.test_visibility (data text not null)'); + +# Take backup +my $backup_name = 'my_backup'; +$node_primary->backup($backup_name); + +# Create streaming standby from backup +my $node_standby = get_new_node('standby'); +$node_standby->init_from_backup($node_primary, $backup_name, + has_streaming => 1); +$node_standby->append_conf('postgresql.conf', 'max_prepared_transactions=10'); +$node_standby->start; + +# To avoid hanging while expecting some specific input from a psql +# instance being driven by us, add a timeout high enough that it +# should never trigger even on very slow machines, unless something +# is really wrong. +my $psql_timeout = IPC::Run::timer(30); + +# One psql to primary and standby each, for all queries. That allows +# to check uncommitted changes being replicated and such. +my %psql_primary = (stdin => '', stdout => '', stderr => ''); +$psql_primary{run} = + IPC::Run::start( + ['psql', '-XA', '-f', '-', '-d', $node_primary->connstr('postgres')], + '<', \$psql_primary{stdin}, + '>', \$psql_primary{stdout}, + '2>', \$psql_primary{stderr}, + $psql_timeout); + +my %psql_standby = ('stdin' => '', 'stdout' => '', 'stderr' => ''); +$psql_standby{run} = + IPC::Run::start( + ['psql', '-XA', '-f', '-', '-d', $node_standby->connstr('postgres')], + '<', \$psql_standby{stdin}, + '>', \$psql_standby{stdout}, + '2>', \$psql_standby{stderr}, + $psql_timeout); + +# +# 1. Check initial data is the same +# +ok(send_query_and_wait(\%psql_standby, + q/SELECT * FROM test_visibility ORDER BY data;/, + qr/^\(0 rows\)$/m), + 'data not visible'); + +# +# 2. Check if an INSERT is replayed and visible +# +$node_primary->psql('postgres', "INSERT INTO test_visibility VALUES ('first insert')"); +$node_primary->wait_for_catchup($node_standby, 'replay', + $node_primary->lsn('insert')); + +ok(send_query_and_wait(\%psql_standby, + q[SELECT * FROM test_visibility ORDER BY data;], + qr/first insert.*\n\(1 row\)/m), + 'insert visible'); + +# +# 3. Verify that uncommitted changes aren't visible. +# +ok(send_query_and_wait(\%psql_primary, + q[ +BEGIN; +UPDATE test_visibility SET data = 'first update' RETURNING data; + ], + qr/^UPDATE 1$/m), + 'UPDATE'); + +$node_primary->psql('postgres', "SELECT txid_current();"); # ensure WAL flush +$node_primary->wait_for_catchup($node_standby, 'replay', + $node_primary->lsn('insert')); + +ok(send_query_and_wait(\%psql_standby, + q[SELECT * FROM test_visibility ORDER BY data;], + qr/first insert.*\n\(1 row\)/m), + 'uncommitted update invisible'); + +# +# 4. That a commit turns 3. visible +# +ok(send_query_and_wait(\%psql_primary, + q[COMMIT;], + qr/^COMMIT$/m), + 'COMMIT'); + +$node_primary->wait_for_catchup($node_standby, 'replay', + $node_primary->lsn('insert')); + +ok(send_query_and_wait(\%psql_standby, + q[SELECT * FROM test_visibility ORDER BY data;], + qr/first update\n\(1 row\)$/m), + 'committed update visible'); + +# +# 5. Check that changes in prepared xacts is invisible +# +ok(send_query_and_wait(\%psql_primary, q[ +DELETE from test_visibility; -- delete old data, so we start with clean slate +BEGIN; +INSERT INTO test_visibility VALUES('inserted in prepared will_commit'); +PREPARE TRANSACTION 'will_commit';], + qr/^PREPARE TRANSACTION$/m), + 'prepared will_commit'); + +ok(send_query_and_wait(\%psql_primary, q[ +BEGIN; +INSERT INTO test_visibility VALUES('inserted in prepared will_abort'); +PREPARE TRANSACTION 'will_abort'; + ], + qr/^PREPARE TRANSACTION$/m), + 'prepared will_abort'); + +$node_primary->wait_for_catchup($node_standby, 'replay', + $node_primary->lsn('insert')); + +ok(send_query_and_wait(\%psql_standby, + q[SELECT * FROM test_visibility ORDER BY data;], + qr/^\(0 rows\)$/m), + 'uncommitted prepared invisible'); + +# For some variation, finish prepared xacts via separate connections +$node_primary->safe_psql('postgres', + "COMMIT PREPARED 'will_commit';"); +$node_primary->safe_psql('postgres', + "ROLLBACK PREPARED 'will_abort';"); +$node_primary->wait_for_catchup($node_standby, 'replay', + $node_primary->lsn('insert')); + +ok(send_query_and_wait(\%psql_standby, + q[SELECT * FROM test_visibility ORDER BY data;], + qr/will_commit.*\n\(1 row\)$/m), + 'finished prepared visible'); + +$node_primary->stop; +$node_standby->stop; + +# Send query, wait until string matches +sub send_query_and_wait +{ + my ($psql, $query, $untl) = @_; + my $ret; + + # send query + $$psql{stdin} .= $query; + $$psql{stdin} .= "\n"; + + # wait for query results + $$psql{run}->pump_nb(); + while (1) + { + # See PostgresNode.pm's psql() + $$psql{stdout} =~ s/\r\n/\n/g if $Config{osname} eq 'msys'; + + last if $$psql{stdout} =~ /$untl/; + + if ($psql_timeout->is_expired) + { + BAIL_OUT("aborting wait: program timed out\n". + "stream contents: >>$$psql{stdout}<<\n". + "pattern searched for: $untl\n"); + return 0; + } + if (not $$psql{run}->pumpable()) + { + BAIL_OUT("aborting wait: program died\n". + "stream contents: >>$$psql{stdout}<<\n". + "pattern searched for: $untl\n"); + return 0; + } + $$psql{run}->pump(); + } + + $$psql{stdout} = ''; + + return 1; +} diff --git a/src/test/regress/expected/alter_generic.out b/src/test/regress/expected/alter_generic.out index 1e50b69ea50f3..505eb7ede53c8 100644 --- a/src/test/regress/expected/alter_generic.out +++ b/src/test/regress/expected/alter_generic.out @@ -505,10 +505,10 @@ CREATE OPERATOR FAMILY alt_opf19 USING btree; ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 test_opclass_options_func(internal, text[], bool); ERROR: function test_opclass_options_func(internal, text[], boolean) does not exist ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) btint42cmp(int4, int2); -ERROR: invalid opclass options parsing function -HINT: Valid signature of opclass options parsing function is '(internal) RETURNS void'. +ERROR: invalid operator class options parsing function +HINT: Valid signature of operator class options parsing function is (internal) RETURNS void. ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4, int2) btint42cmp(int4, int2); -ERROR: left and right associated data types for opclass options parsing functions must match +ERROR: left and right associated data types for operator class options parsing functions must match ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) test_opclass_options_func(internal); -- Ok ALTER OPERATOR FAMILY alt_opf19 USING btree DROP FUNCTION 5 (int4, int4); DROP OPERATOR FAMILY alt_opf19 USING btree; diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out index f56615393ec32..0ce6ee4622d4b 100644 --- a/src/test/regress/expected/alter_table.out +++ b/src/test/regress/expected/alter_table.out @@ -3868,6 +3868,8 @@ SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_1'::reg CREATE TABLE fail_part (LIKE part_1 INCLUDING CONSTRAINTS); ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); ERROR: partition "fail_part" would overlap partition "part_1" +LINE 1: ...LE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); + ^ DROP TABLE fail_part; -- check that an existing table can be attached as a default partition CREATE TABLE def_part (LIKE list_parted INCLUDING CONSTRAINTS); @@ -3877,6 +3879,8 @@ ALTER TABLE list_parted ATTACH PARTITION def_part DEFAULT; CREATE TABLE fail_def_part (LIKE part_1 INCLUDING CONSTRAINTS); ALTER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT; ERROR: partition "fail_def_part" conflicts with existing default partition "def_part" +LINE 1: ...ER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT; + ^ -- check validation when attaching list partitions CREATE TABLE list_parted2 ( a int, @@ -3946,6 +3950,8 @@ CREATE TABLE partr_def1 PARTITION OF range_parted DEFAULT; CREATE TABLE partr_def2 (LIKE part1 INCLUDING CONSTRAINTS); ALTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT; ERROR: partition "partr_def2" conflicts with existing default partition "partr_def1" +LINE 1: ...LTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT; + ^ -- Overlapping partitions cannot be attached, hence, following should give error INSERT INTO partr_def1 VALUES (2, 10); CREATE TABLE part3 (LIKE range_parted); @@ -4066,8 +4072,12 @@ CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 4, REMAIN CREATE TABLE fail_part (LIKE hpart_1); ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 4); ERROR: partition "fail_part" would overlap partition "hpart_1" +LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU... + ^ ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 0); ERROR: partition "fail_part" would overlap partition "hpart_1" +LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU... + ^ DROP TABLE fail_part; -- check validation when attaching hash partitions -- check that violating rows are correctly reported diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out index e40287d25a413..c64f0719e7b6c 100644 --- a/src/test/regress/expected/copy2.out +++ b/src/test/regress/expected/copy2.out @@ -28,6 +28,53 @@ COPY x (a, b, c, d, e) from stdin; -- non-existent column in column list: should fail COPY x (xyz) from stdin; ERROR: column "xyz" of relation "x" does not exist +-- redundant options +COPY x from stdin (format CSV, FORMAT CSV); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (format CSV, FORMAT CSV); + ^ +COPY x from stdin (freeze off, freeze on); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (freeze off, freeze on); + ^ +COPY x from stdin (delimiter ',', delimiter ','); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (delimiter ',', delimiter ','); + ^ +COPY x from stdin (null ' ', null ' '); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (null ' ', null ' '); + ^ +COPY x from stdin (header off, header on); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (header off, header on); + ^ +COPY x from stdin (quote ':', quote ':'); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (quote ':', quote ':'); + ^ +COPY x from stdin (escape ':', escape ':'); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (escape ':', escape ':'); + ^ +COPY x from stdin (force_quote (a), force_quote *); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (force_quote (a), force_quote *); + ^ +COPY x from stdin (force_not_null (a), force_not_null (b)); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (force_not_null (a), force_not_null (b)); + ^ +COPY x from stdin (force_null (a), force_null (b)); +ERROR: conflicting or redundant options +COPY x from stdin (convert_selectively (a), convert_selectively (b)); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (convert_selectively (a), convert_selectiv... + ^ +COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii'); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii... + ^ -- too many columns in column list: should fail COPY x (a, b, c, d, e, d, c) from stdin; ERROR: column "d" specified more than once diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out index 64c0c668593e8..6ace7662ee1f7 100644 --- a/src/test/regress/expected/create_index.out +++ b/src/test/regress/expected/create_index.out @@ -523,8 +523,8 @@ SELECT * FROM point_tbl ORDER BY f1 <-> '0,1'; SELECT * FROM point_tbl ORDER BY f1 <-> '0,1'; f1 ------------------- - (0,0) (1e-300,-1e-300) + (0,0) (-3,4) (-10,0) (10,10) @@ -561,8 +561,8 @@ SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1'; SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1'; f1 ------------------- - (0,0) (1e-300,-1e-300) + (0,0) (-3,4) (-10,0) (10,10) @@ -584,8 +584,8 @@ SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0 SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; f1 ------------------ - (0,0) (1e-300,-1e-300) + (0,0) (-3,4) (-10,0) (10,10) diff --git a/src/test/regress/expected/create_operator.out b/src/test/regress/expected/create_operator.out index 54e8b791595dc..530327759140c 100644 --- a/src/test/regress/expected/create_operator.out +++ b/src/test/regress/expected/create_operator.out @@ -15,17 +15,15 @@ CREATE OPERATOR <% ( negator = >=% ); CREATE OPERATOR @#@ ( - rightarg = int8, -- left unary - procedure = numeric_fac -); -CREATE OPERATOR #@# ( - leftarg = int8, -- right unary - procedure = numeric_fac + rightarg = int8, -- prefix + procedure = factorial ); CREATE OPERATOR #%# ( - leftarg = int8, -- right unary - procedure = numeric_fac + leftarg = int8, -- fail, postfix is no longer supported + procedure = factorial ); +ERROR: operator right argument type must be specified +DETAIL: Postfix operators are not supported. -- Test operator created above SELECT point '(1,2)' <% widget '(0,0,3)' AS t, point '(1,2)' <% widget '(0,0,1)' AS f; @@ -35,12 +33,23 @@ SELECT point '(1,2)' <% widget '(0,0,3)' AS t, (1 row) -- Test comments -COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad right unary'; -ERROR: operator does not exist: integer ###### --- => is disallowed now +COMMENT ON OPERATOR ###### (NONE, int4) IS 'bad prefix'; +ERROR: operator does not exist: ###### integer +COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad postfix'; +ERROR: postfix operators are not supported +COMMENT ON OPERATOR ###### (int4, int8) IS 'bad infix'; +ERROR: operator does not exist: integer ###### bigint +-- Check that DROP on a nonexistent op behaves sanely, too +DROP OPERATOR ###### (NONE, int4); +ERROR: operator does not exist: ###### integer +DROP OPERATOR ###### (int4, NONE); +ERROR: postfix operators are not supported +DROP OPERATOR ###### (int4, int8); +ERROR: operator does not exist: integer ###### bigint +-- => is disallowed as an operator name now CREATE OPERATOR => ( - leftarg = int8, -- right unary - procedure = numeric_fac + rightarg = int8, + procedure = factorial ); ERROR: syntax error at or near "=>" LINE 1: CREATE OPERATOR => ( @@ -49,15 +58,20 @@ LINE 1: CREATE OPERATOR => ( -- (=> is tested elsewhere) -- this is legal because ! is not allowed in sql ops CREATE OPERATOR !=- ( - leftarg = int8, -- right unary - procedure = numeric_fac + rightarg = int8, + procedure = factorial ); -SELECT 2 !=-; +SELECT !=- 10; ?column? ---------- - 2 + 3628800 (1 row) +-- postfix operators don't work anymore +SELECT 10 !=-; +ERROR: syntax error at or near ";" +LINE 1: SELECT 10 !=-; + ^ -- make sure lexer returns != as <> even in edge cases SELECT 2 !=/**/ 1, 2 !=/**/ 2; ?column? | ?column? @@ -127,8 +141,8 @@ GRANT USAGE ON SCHEMA schema_op1 TO PUBLIC; REVOKE USAGE ON SCHEMA schema_op1 FROM regress_rol_op1; SET ROLE regress_rol_op1; CREATE OPERATOR schema_op1.#*# ( - leftarg = int8, -- right unary - procedure = numeric_fac + rightarg = int8, + procedure = factorial ); ERROR: permission denied for schema schema_op1 ROLLBACK; @@ -136,7 +150,7 @@ ROLLBACK; BEGIN TRANSACTION; CREATE OPERATOR #*# ( leftarg = SETOF int8, - procedure = numeric_fac + procedure = factorial ); ERROR: SETOF type not allowed for operator argument ROLLBACK; @@ -144,7 +158,7 @@ ROLLBACK; BEGIN TRANSACTION; CREATE OPERATOR #*# ( rightarg = SETOF int8, - procedure = numeric_fac + procedure = factorial ); ERROR: SETOF type not allowed for operator argument ROLLBACK; @@ -167,19 +181,19 @@ CREATE OPERATOR === ( ROLLBACK; -- Should fail. Invalid attribute CREATE OPERATOR #@%# ( - leftarg = int8, -- right unary - procedure = numeric_fac, + rightarg = int8, + procedure = factorial, invalid_att = int8 ); WARNING: operator attribute "invalid_att" not recognized --- Should fail. At least leftarg or rightarg should be mandatorily specified +-- Should fail. At least rightarg should be mandatorily specified CREATE OPERATOR #@%# ( - procedure = numeric_fac + procedure = factorial ); -ERROR: at least one of leftarg or rightarg must be specified +ERROR: operator argument types must be specified -- Should fail. Procedure should be mandatorily specified CREATE OPERATOR #@%# ( - leftarg = int8 + rightarg = int8 ); ERROR: operator function must be specified -- Should fail. CREATE OPERATOR requires USAGE on TYPE diff --git a/src/test/regress/expected/create_procedure.out b/src/test/regress/expected/create_procedure.out index 211a42cefa039..3838fa2324da4 100644 --- a/src/test/regress/expected/create_procedure.out +++ b/src/test/regress/expected/create_procedure.out @@ -146,6 +146,19 @@ AS $$ SELECT a = b; $$; CALL ptest7(least('a', 'b'), 'a'); +-- OUT parameters +CREATE PROCEDURE ptest9(OUT a int) +LANGUAGE SQL +AS $$ +INSERT INTO cp_test VALUES (1, 'a'); +SELECT 1; +$$; +CALL ptest9(NULL); + a +--- + 1 +(1 row) + -- various error cases CALL version(); -- error: not a procedure ERROR: version() is not a procedure @@ -165,9 +178,6 @@ CREATE PROCEDURE ptestx() LANGUAGE SQL STRICT AS $$ INSERT INTO cp_test VALUES ( ERROR: invalid attribute in procedure definition LINE 1: CREATE PROCEDURE ptestx() LANGUAGE SQL STRICT AS $$ INSERT I... ^ -CREATE PROCEDURE ptestx(OUT a int) LANGUAGE SQL AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; -ERROR: procedures cannot have OUT arguments -HINT: INOUT arguments are permitted. ALTER PROCEDURE ptest1(text) STRICT; ERROR: invalid attribute in procedure definition LINE 1: ALTER PROCEDURE ptest1(text) STRICT; diff --git a/src/test/regress/expected/create_table.out b/src/test/regress/expected/create_table.out index 1c72f23bc93b8..1fc266dd65cbe 100644 --- a/src/test/regress/expected/create_table.out +++ b/src/test/regress/expected/create_table.out @@ -652,8 +652,6 @@ CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (genera ERROR: set-returning functions are not allowed in partition bound LINE 1: ...expr_fail PARTITION OF list_parted FOR VALUES IN (generate_s... ^ -CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ('1' collate "POSIX"); -ERROR: collations are not supported by type integer CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((1+1) collate "POSIX"); ERROR: collations are not supported by type integer LINE 1: ...ail PARTITION OF list_parted FOR VALUES IN ((1+1) collate "P... @@ -677,6 +675,8 @@ LINE 1: ...BLE fail_part PARTITION OF list_parted FOR VALUES WITH (MODU... CREATE TABLE part_default PARTITION OF list_parted DEFAULT; CREATE TABLE fail_default_part PARTITION OF list_parted DEFAULT; ERROR: partition "fail_default_part" conflicts with existing default partition "part_default" +LINE 1: ...TE TABLE fail_default_part PARTITION OF list_parted DEFAULT; + ^ -- specified literal can't be cast to the partition column data type CREATE TABLE bools ( a bool @@ -702,6 +702,8 @@ CREATE TABLE bigintp_10 PARTITION OF bigintp FOR VALUES IN (10); -- fails due to overlap: CREATE TABLE bigintp_10_2 PARTITION OF bigintp FOR VALUES IN ('10'); ERROR: partition "bigintp_10_2" would overlap partition "bigintp_10" +LINE 1: ...ABLE bigintp_10_2 PARTITION OF bigintp FOR VALUES IN ('10'); + ^ DROP TABLE bigintp; CREATE TABLE range_parted ( a date @@ -823,8 +825,12 @@ CREATE TABLE part_ab PARTITION OF list_parted2 FOR VALUES IN ('a', 'b'); CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT; CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN (null); ERROR: partition "fail_part" would overlap partition "part_null_z" +LINE 1: ...LE fail_part PARTITION OF list_parted2 FOR VALUES IN (null); + ^ CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('b', 'c'); ERROR: partition "fail_part" would overlap partition "part_ab" +LINE 1: ...ail_part PARTITION OF list_parted2 FOR VALUES IN ('b', 'c'); + ^ -- check default partition overlap INSERT INTO list_parted2 VALUES('X'); CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('W', 'X', 'Y'); @@ -835,28 +841,42 @@ CREATE TABLE range_parted2 ( -- trying to create range partition with empty range CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (0); ERROR: empty range bound specified for partition "fail_part" +LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (0); + ^ DETAIL: Specified lower bound (1) is greater than or equal to upper bound (0). -- note that the range '[1, 1)' has no elements CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (1); ERROR: empty range bound specified for partition "fail_part" +LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (1); + ^ DETAIL: Specified lower bound (1) is greater than or equal to upper bound (1). CREATE TABLE part0 PARTITION OF range_parted2 FOR VALUES FROM (minvalue) TO (1); CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (minvalue) TO (2); ERROR: partition "fail_part" would overlap partition "part0" +LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (minvalue) ... + ^ CREATE TABLE part1 PARTITION OF range_parted2 FOR VALUES FROM (1) TO (10); CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (9) TO (maxvalue); ERROR: partition "fail_part" would overlap partition "part1" +LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (9) TO (max... + ^ CREATE TABLE part2 PARTITION OF range_parted2 FOR VALUES FROM (20) TO (30); CREATE TABLE part3 PARTITION OF range_parted2 FOR VALUES FROM (30) TO (40); CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (30); ERROR: partition "fail_part" would overlap partition "part2" +LINE 1: ...art PARTITION OF range_parted2 FOR VALUES FROM (10) TO (30); + ^ CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (50); ERROR: partition "fail_part" would overlap partition "part2" +LINE 1: ...art PARTITION OF range_parted2 FOR VALUES FROM (10) TO (50); + ^ -- Create a default partition for range partitioned table CREATE TABLE range2_default PARTITION OF range_parted2 DEFAULT; -- More than one default partition is not allowed, so this should give error CREATE TABLE fail_default_part PARTITION OF range_parted2 DEFAULT; ERROR: partition "fail_default_part" conflicts with existing default partition "range2_default" +LINE 1: ... TABLE fail_default_part PARTITION OF range_parted2 DEFAULT; + ^ -- Check if the range for default partitions overlap INSERT INTO range_parted2 VALUES (85); CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (80) TO (90); @@ -870,17 +890,23 @@ CREATE TABLE range_parted3 ( CREATE TABLE part00 PARTITION OF range_parted3 FOR VALUES FROM (0, minvalue) TO (0, maxvalue); CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (0, minvalue) TO (0, 1); ERROR: partition "fail_part" would overlap partition "part00" +LINE 1: ..._part PARTITION OF range_parted3 FOR VALUES FROM (0, minvalu... + ^ CREATE TABLE part10 PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, 1); CREATE TABLE part11 PARTITION OF range_parted3 FOR VALUES FROM (1, 1) TO (1, 10); CREATE TABLE part12 PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, maxvalue); CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, 20); ERROR: partition "fail_part" would overlap partition "part12" +LINE 1: ...rt PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1,... + ^ CREATE TABLE range3_default PARTITION OF range_parted3 DEFAULT; -- cannot create a partition that says column b is allowed to range -- from -infinity to +infinity, while there exist partitions that have -- more specific ranges CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, maxvalue); ERROR: partition "fail_part" would overlap partition "part10" +LINE 1: ..._part PARTITION OF range_parted3 FOR VALUES FROM (1, minvalu... + ^ -- check for partition bound overlap and other invalid specifications for the hash partition CREATE TABLE hash_parted2 ( a varchar @@ -892,6 +918,8 @@ CREATE TABLE h2part_4 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMA -- overlap with part_4 CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); ERROR: partition "fail_part" would overlap partition "h2part_4" +LINE 1: ...LE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODU... + ^ -- modulus must be greater than zero CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 0, REMAINDER 1); ERROR: modulus for hash partition must be a positive integer @@ -986,6 +1014,13 @@ DETAIL: Failing row contains (1, null). Partition of: parted_notnull_inh_test FOR VALUES IN (1) drop table parted_notnull_inh_test; +-- check that collations are assigned in partition bound expressions +create table parted_boolean_col (a bool, b text) partition by list(a); +create table parted_boolean_less partition of parted_boolean_col + for values in ('foo' < 'bar'); +create table parted_boolean_greater partition of parted_boolean_col + for values in ('foo' > 'bar'); +drop table parted_boolean_col; -- check for a conflicting COLLATE clause create table parted_collate_must_match (a text collate "C", b text collate "C") partition by range (a); @@ -996,26 +1031,15 @@ create table parted_collate_must_match1 partition of parted_collate_must_match create table parted_collate_must_match2 partition of parted_collate_must_match (b collate "POSIX") for values from ('m') to ('z'); drop table parted_collate_must_match; --- check that specifying incompatible collations for partition bound --- expressions fails promptly +-- check that non-matching collations for partition bound +-- expressions are coerced to the right collation create table test_part_coll_posix (a text) partition by range (a collate "POSIX"); --- fail +-- ok, collation is implicitly coerced create table test_part_coll partition of test_part_coll_posix for values from ('a' collate "C") to ('g'); -ERROR: collation of partition bound value for column "a" does not match partition key collation "POSIX" -LINE 1: ...artition of test_part_coll_posix for values from ('a' collat... - ^ --- ok -create table test_part_coll partition of test_part_coll_posix for values from ('a' collate "POSIX") to ('g'); -- ok create table test_part_coll2 partition of test_part_coll_posix for values from ('g') to ('m'); --- using a cast expression uses the target type's default collation --- fail +-- ok, collation is implicitly coerced create table test_part_coll_cast partition of test_part_coll_posix for values from (name 'm' collate "C") to ('s'); -ERROR: collation of partition bound value for column "a" does not match partition key collation "POSIX" -LINE 1: ...ion of test_part_coll_posix for values from (name 'm' collat... - ^ --- ok -create table test_part_coll_cast partition of test_part_coll_posix for values from (name 'm' collate "POSIX") to ('s'); -- ok; partition collation silently overrides the default collation of type 'name' create table test_part_coll_cast2 partition of test_part_coll_posix for values from (name 's') to ('z'); drop table test_part_coll_posix; diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out index e3edbd8b511cd..912c73d351ef9 100644 --- a/src/test/regress/expected/create_table_like.out +++ b/src/test/regress/expected/create_table_like.out @@ -421,6 +421,24 @@ CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1); NOTICE: merging column "a" with inherited definition ERROR: column "a" has a storage parameter conflict DETAIL: MAIN versus EXTENDED +-- Check that LIKE isn't confused by a system catalog of the same name +CREATE TABLE pg_attrdef (LIKE ctlt1 INCLUDING ALL); +\d+ public.pg_attrdef + Table "public.pg_attrdef" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------+-----------+----------+---------+----------+--------------+------------- + a | text | | not null | | main | | A + b | text | | | | extended | | B +Indexes: + "pg_attrdef_pkey" PRIMARY KEY, btree (a) + "pg_attrdef_b_idx" btree (b) + "pg_attrdef_expr_idx" btree ((a || b)) +Check constraints: + "ctlt1_a_check" CHECK (length(a) > 2) +Statistics objects: + "public"."pg_attrdef_a_b_stat" (ndistinct, dependencies, mcv) ON a, b FROM public.pg_attrdef + +DROP TABLE public.pg_attrdef; DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE; NOTICE: drop cascades to table inhe -- LIKE must respect NO INHERIT property of constraints diff --git a/src/test/regress/expected/date.out b/src/test/regress/expected/date.out index 4cdf1635f2a0a..1b921ce215b58 100644 --- a/src/test/regress/expected/date.out +++ b/src/test/regress/expected/date.out @@ -21,9 +21,10 @@ INSERT INTO DATE_TBL VALUES ('2000-04-03'); INSERT INTO DATE_TBL VALUES ('2038-04-08'); INSERT INTO DATE_TBL VALUES ('2039-04-09'); INSERT INTO DATE_TBL VALUES ('2040-04-10'); -SELECT f1 AS "Fifteen" FROM DATE_TBL; - Fifteen ------------- +INSERT INTO DATE_TBL VALUES ('2040-04-10 BC'); +SELECT f1 FROM DATE_TBL; + f1 +--------------- 04-09-1957 06-13-1957 02-28-1996 @@ -39,11 +40,12 @@ SELECT f1 AS "Fifteen" FROM DATE_TBL; 04-08-2038 04-09-2039 04-10-2040 -(15 rows) + 04-10-2040 BC +(16 rows) -SELECT f1 AS "Nine" FROM DATE_TBL WHERE f1 < '2000-01-01'; - Nine ------------- +SELECT f1 FROM DATE_TBL WHERE f1 < '2000-01-01'; + f1 +--------------- 04-09-1957 06-13-1957 02-28-1996 @@ -53,11 +55,12 @@ SELECT f1 AS "Nine" FROM DATE_TBL WHERE f1 < '2000-01-01'; 02-28-1997 03-01-1997 03-02-1997 -(9 rows) + 04-10-2040 BC +(10 rows) -SELECT f1 AS "Three" FROM DATE_TBL +SELECT f1 FROM DATE_TBL WHERE f1 BETWEEN '2000-01-01' AND '2001-01-01'; - Three + f1 ------------ 04-01-2000 04-02-2000 @@ -860,7 +863,8 @@ SELECT f1 - date '2000-01-01' AS "Days From 2K" FROM DATE_TBL; 13977 14343 14710 -(15 rows) + -1475115 +(16 rows) SELECT f1 - date 'epoch' AS "Days From Epoch" FROM DATE_TBL; Days From Epoch @@ -880,7 +884,8 @@ SELECT f1 - date 'epoch' AS "Days From Epoch" FROM DATE_TBL; 24934 25300 25667 -(15 rows) + -1464158 +(16 rows) SELECT date 'yesterday' - date 'today' AS "One day"; One day @@ -920,6 +925,43 @@ SELECT date 'tomorrow' - date 'yesterday' AS "Two days"; -- -- test extract! +-- +SELECT f1 as "date", + date_part('year', f1) AS year, + date_part('month', f1) AS month, + date_part('day', f1) AS day, + date_part('quarter', f1) AS quarter, + date_part('decade', f1) AS decade, + date_part('century', f1) AS century, + date_part('millennium', f1) AS millennium, + date_part('isoyear', f1) AS isoyear, + date_part('week', f1) AS week, + date_part('dow', f1) AS dow, + date_part('isodow', f1) AS isodow, + date_part('doy', f1) AS doy, + date_part('julian', f1) AS julian, + date_part('epoch', f1) AS epoch + FROM date_tbl; + date | year | month | day | quarter | decade | century | millennium | isoyear | week | dow | isodow | doy | julian | epoch +---------------+-------+-------+-----+---------+--------+---------+------------+---------+------+-----+--------+-----+---------+--------------- + 04-09-1957 | 1957 | 4 | 9 | 2 | 195 | 20 | 2 | 1957 | 15 | 2 | 2 | 99 | 2435938 | -401760000 + 06-13-1957 | 1957 | 6 | 13 | 2 | 195 | 20 | 2 | 1957 | 24 | 4 | 4 | 164 | 2436003 | -396144000 + 02-28-1996 | 1996 | 2 | 28 | 1 | 199 | 20 | 2 | 1996 | 9 | 3 | 3 | 59 | 2450142 | 825465600 + 02-29-1996 | 1996 | 2 | 29 | 1 | 199 | 20 | 2 | 1996 | 9 | 4 | 4 | 60 | 2450143 | 825552000 + 03-01-1996 | 1996 | 3 | 1 | 1 | 199 | 20 | 2 | 1996 | 9 | 5 | 5 | 61 | 2450144 | 825638400 + 03-02-1996 | 1996 | 3 | 2 | 1 | 199 | 20 | 2 | 1996 | 9 | 6 | 6 | 62 | 2450145 | 825724800 + 02-28-1997 | 1997 | 2 | 28 | 1 | 199 | 20 | 2 | 1997 | 9 | 5 | 5 | 59 | 2450508 | 857088000 + 03-01-1997 | 1997 | 3 | 1 | 1 | 199 | 20 | 2 | 1997 | 9 | 6 | 6 | 60 | 2450509 | 857174400 + 03-02-1997 | 1997 | 3 | 2 | 1 | 199 | 20 | 2 | 1997 | 9 | 0 | 7 | 61 | 2450510 | 857260800 + 04-01-2000 | 2000 | 4 | 1 | 2 | 200 | 20 | 2 | 2000 | 13 | 6 | 6 | 92 | 2451636 | 954547200 + 04-02-2000 | 2000 | 4 | 2 | 2 | 200 | 20 | 2 | 2000 | 13 | 0 | 7 | 93 | 2451637 | 954633600 + 04-03-2000 | 2000 | 4 | 3 | 2 | 200 | 20 | 2 | 2000 | 14 | 1 | 1 | 94 | 2451638 | 954720000 + 04-08-2038 | 2038 | 4 | 8 | 2 | 203 | 21 | 3 | 2038 | 14 | 4 | 4 | 98 | 2465522 | 2154297600 + 04-09-2039 | 2039 | 4 | 9 | 2 | 203 | 21 | 3 | 2039 | 14 | 6 | 6 | 99 | 2465888 | 2185920000 + 04-10-2040 | 2040 | 4 | 10 | 2 | 204 | 21 | 3 | 2040 | 15 | 2 | 2 | 101 | 2466255 | 2217628800 + 04-10-2040 BC | -2040 | 4 | 10 | 2 | -204 | -21 | -3 | -2040 | 15 | 1 | 1 | 100 | 976430 | -126503251200 +(16 rows) + -- -- epoch -- @@ -1111,6 +1153,132 @@ SELECT EXTRACT(CENTURY FROM TIMESTAMP '1970-03-20 04:30:00.00000'); -- 20 20 (1 row) +-- +-- all possible fields +-- +SELECT EXTRACT(MICROSECONDS FROM DATE '2020-08-11'); + date_part +----------- + 0 +(1 row) + +SELECT EXTRACT(MILLISECONDS FROM DATE '2020-08-11'); + date_part +----------- + 0 +(1 row) + +SELECT EXTRACT(SECOND FROM DATE '2020-08-11'); + date_part +----------- + 0 +(1 row) + +SELECT EXTRACT(MINUTE FROM DATE '2020-08-11'); + date_part +----------- + 0 +(1 row) + +SELECT EXTRACT(HOUR FROM DATE '2020-08-11'); + date_part +----------- + 0 +(1 row) + +SELECT EXTRACT(DAY FROM DATE '2020-08-11'); + date_part +----------- + 11 +(1 row) + +SELECT EXTRACT(MONTH FROM DATE '2020-08-11'); + date_part +----------- + 8 +(1 row) + +SELECT EXTRACT(YEAR FROM DATE '2020-08-11'); + date_part +----------- + 2020 +(1 row) + +SELECT EXTRACT(DECADE FROM DATE '2020-08-11'); + date_part +----------- + 202 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '2020-08-11'); + date_part +----------- + 21 +(1 row) + +SELECT EXTRACT(MILLENNIUM FROM DATE '2020-08-11'); + date_part +----------- + 3 +(1 row) + +SELECT EXTRACT(ISOYEAR FROM DATE '2020-08-11'); + date_part +----------- + 2020 +(1 row) + +SELECT EXTRACT(QUARTER FROM DATE '2020-08-11'); + date_part +----------- + 3 +(1 row) + +SELECT EXTRACT(WEEK FROM DATE '2020-08-11'); + date_part +----------- + 33 +(1 row) + +SELECT EXTRACT(DOW FROM DATE '2020-08-11'); + date_part +----------- + 2 +(1 row) + +SELECT EXTRACT(ISODOW FROM DATE '2020-08-11'); + date_part +----------- + 2 +(1 row) + +SELECT EXTRACT(DOY FROM DATE '2020-08-11'); + date_part +----------- + 224 +(1 row) + +SELECT EXTRACT(TIMEZONE FROM DATE '2020-08-11'); +ERROR: timestamp units "timezone" not supported +CONTEXT: SQL function "date_part" statement 1 +SELECT EXTRACT(TIMEZONE_M FROM DATE '2020-08-11'); +ERROR: timestamp units "timezone_m" not supported +CONTEXT: SQL function "date_part" statement 1 +SELECT EXTRACT(TIMEZONE_H FROM DATE '2020-08-11'); +ERROR: timestamp units "timezone_h" not supported +CONTEXT: SQL function "date_part" statement 1 +SELECT EXTRACT(EPOCH FROM DATE '2020-08-11'); + date_part +------------ + 1597104000 +(1 row) + +SELECT EXTRACT(JULIAN FROM DATE '2020-08-11'); + date_part +----------- + 2459073 +(1 row) + -- -- test trunc function! -- @@ -1439,6 +1607,8 @@ select make_time(8, 20, 0.0); (1 row) -- should fail +select make_date(0, 7, 15); +ERROR: date field value out of range: 0-07-15 select make_date(2013, 2, 30); ERROR: date field value out of range: 2013-02-30 select make_date(2013, 13, 1); diff --git a/src/test/regress/expected/errors.out b/src/test/regress/expected/errors.out index a525aa2f93778..1e7b5a7046197 100644 --- a/src/test/regress/expected/errors.out +++ b/src/test/regress/expected/errors.out @@ -440,13 +440,3 @@ NULL); ERROR: syntax error at or near "NUL" LINE 16: ...L, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 I... ^ --- Check that stack depth detection mechanism works and --- max_stack_depth is not set too high. The full error report is not --- very stable, so show only SQLSTATE and primary error message. -create function infinite_recurse() returns int as -'select infinite_recurse()' language sql; -\set VERBOSITY sqlstate -select infinite_recurse(); -ERROR: 54001 -\echo :LAST_ERROR_MESSAGE -stack depth limit exceeded diff --git a/src/test/regress/expected/expressions.out b/src/test/regress/expected/expressions.out index 4f4deaec2231c..05a6eb07b2e74 100644 --- a/src/test/regress/expected/expressions.out +++ b/src/test/regress/expected/expressions.out @@ -121,7 +121,7 @@ select count(*) from date_tbl where f1 not between '1997-01-01' and '1998-01-01'; count ------- - 12 + 13 (1 row) explain (costs off) @@ -155,6 +155,6 @@ select count(*) from date_tbl where f1 not between symmetric '1997-01-01' and '1998-01-01'; count ------- - 12 + 13 (1 row) diff --git a/src/test/regress/expected/hash_func.out b/src/test/regress/expected/hash_func.out index da0948e95a935..e6e3410aaa183 100644 --- a/src/test/regress/expected/hash_func.out +++ b/src/test/regress/expected/hash_func.out @@ -16,8 +16,8 @@ WHERE hashint2(v)::bit(32) != hashint2extended(v, 0)::bit(32) (0 rows) SELECT v as value, hashint4(v)::bit(32) as standard, - hashint4extended(v, 0)::bit(32) as extended0, - hashint4extended(v, 1)::bit(32) as extended1 + hashint4extended(v, 0)::bit(32) as extended0, + hashint4extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashint4(v)::bit(32) != hashint4extended(v, 0)::bit(32) OR hashint4(v)::bit(32) = hashint4extended(v, 1)::bit(32); @@ -26,8 +26,8 @@ WHERE hashint4(v)::bit(32) != hashint4extended(v, 0)::bit(32) (0 rows) SELECT v as value, hashint8(v)::bit(32) as standard, - hashint8extended(v, 0)::bit(32) as extended0, - hashint8extended(v, 1)::bit(32) as extended1 + hashint8extended(v, 0)::bit(32) as extended0, + hashint8extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashint8(v)::bit(32) != hashint8extended(v, 0)::bit(32) OR hashint8(v)::bit(32) = hashint8extended(v, 1)::bit(32); @@ -36,8 +36,8 @@ WHERE hashint8(v)::bit(32) != hashint8extended(v, 0)::bit(32) (0 rows) SELECT v as value, hashfloat4(v)::bit(32) as standard, - hashfloat4extended(v, 0)::bit(32) as extended0, - hashfloat4extended(v, 1)::bit(32) as extended1 + hashfloat4extended(v, 0)::bit(32) as extended0, + hashfloat4extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashfloat4(v)::bit(32) != hashfloat4extended(v, 0)::bit(32) OR hashfloat4(v)::bit(32) = hashfloat4extended(v, 1)::bit(32); @@ -46,8 +46,8 @@ WHERE hashfloat4(v)::bit(32) != hashfloat4extended(v, 0)::bit(32) (0 rows) SELECT v as value, hashfloat8(v)::bit(32) as standard, - hashfloat8extended(v, 0)::bit(32) as extended0, - hashfloat8extended(v, 1)::bit(32) as extended1 + hashfloat8extended(v, 0)::bit(32) as extended0, + hashfloat8extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashfloat8(v)::bit(32) != hashfloat8extended(v, 0)::bit(32) OR hashfloat8(v)::bit(32) = hashfloat8extended(v, 1)::bit(32); @@ -56,8 +56,8 @@ WHERE hashfloat8(v)::bit(32) != hashfloat8extended(v, 0)::bit(32) (0 rows) SELECT v as value, hashoid(v)::bit(32) as standard, - hashoidextended(v, 0)::bit(32) as extended0, - hashoidextended(v, 1)::bit(32) as extended1 + hashoidextended(v, 0)::bit(32) as extended0, + hashoidextended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashoid(v)::bit(32) != hashoidextended(v, 0)::bit(32) OR hashoid(v)::bit(32) = hashoidextended(v, 1)::bit(32); @@ -66,8 +66,8 @@ WHERE hashoid(v)::bit(32) != hashoidextended(v, 0)::bit(32) (0 rows) SELECT v as value, hashchar(v)::bit(32) as standard, - hashcharextended(v, 0)::bit(32) as extended0, - hashcharextended(v, 1)::bit(32) as extended1 + hashcharextended(v, 0)::bit(32) as extended0, + hashcharextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::"char"), ('1'), ('x'), ('X'), ('p'), ('N')) x(v) WHERE hashchar(v)::bit(32) != hashcharextended(v, 0)::bit(32) OR hashchar(v)::bit(32) = hashcharextended(v, 1)::bit(32); @@ -76,10 +76,10 @@ WHERE hashchar(v)::bit(32) != hashcharextended(v, 0)::bit(32) (0 rows) SELECT v as value, hashname(v)::bit(32) as standard, - hashnameextended(v, 0)::bit(32) as extended0, - hashnameextended(v, 1)::bit(32) as extended1 + hashnameextended(v, 0)::bit(32) as extended0, + hashnameextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), - ('muop28x03'), ('yi3nm0d73')) x(v) + ('muop28x03'), ('yi3nm0d73')) x(v) WHERE hashname(v)::bit(32) != hashnameextended(v, 0)::bit(32) OR hashname(v)::bit(32) = hashnameextended(v, 1)::bit(32); value | standard | extended0 | extended1 @@ -87,10 +87,10 @@ WHERE hashname(v)::bit(32) != hashnameextended(v, 0)::bit(32) (0 rows) SELECT v as value, hashtext(v)::bit(32) as standard, - hashtextextended(v, 0)::bit(32) as extended0, - hashtextextended(v, 1)::bit(32) as extended1 + hashtextextended(v, 0)::bit(32) as extended0, + hashtextextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), - ('muop28x03'), ('yi3nm0d73')) x(v) + ('muop28x03'), ('yi3nm0d73')) x(v) WHERE hashtext(v)::bit(32) != hashtextextended(v, 0)::bit(32) OR hashtext(v)::bit(32) = hashtextextended(v, 1)::bit(32); value | standard | extended0 | extended1 @@ -98,8 +98,8 @@ WHERE hashtext(v)::bit(32) != hashtextextended(v, 0)::bit(32) (0 rows) SELECT v as value, hashoidvector(v)::bit(32) as standard, - hashoidvectorextended(v, 0)::bit(32) as extended0, - hashoidvectorextended(v, 1)::bit(32) as extended1 + hashoidvectorextended(v, 0)::bit(32) as extended0, + hashoidvectorextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::oidvector), ('0 1 2 3 4'), ('17 18 19 20'), ('42 43 42 45'), ('550273 550273 570274'), ('207112489 207112499 21512 2155 372325 1363252')) x(v) @@ -110,8 +110,8 @@ WHERE hashoidvector(v)::bit(32) != hashoidvectorextended(v, 0)::bit(32) (0 rows) SELECT v as value, hash_aclitem(v)::bit(32) as standard, - hash_aclitem_extended(v, 0)::bit(32) as extended0, - hash_aclitem_extended(v, 1)::bit(32) as extended1 + hash_aclitem_extended(v, 0)::bit(32) as extended0, + hash_aclitem_extended(v, 1)::bit(32) as extended1 FROM (SELECT DISTINCT(relacl[1]) FROM pg_class LIMIT 10) x(v) WHERE hash_aclitem(v)::bit(32) != hash_aclitem_extended(v, 0)::bit(32) OR hash_aclitem(v)::bit(32) = hash_aclitem_extended(v, 1)::bit(32); @@ -120,10 +120,10 @@ WHERE hash_aclitem(v)::bit(32) != hash_aclitem_extended(v, 0)::bit(32) (0 rows) SELECT v as value, hashmacaddr(v)::bit(32) as standard, - hashmacaddrextended(v, 0)::bit(32) as extended0, - hashmacaddrextended(v, 1)::bit(32) as extended1 + hashmacaddrextended(v, 0)::bit(32) as extended0, + hashmacaddrextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::macaddr), ('08:00:2b:01:02:04'), ('08:00:2b:01:02:04'), - ('e2:7f:51:3e:70:49'), ('d6:a9:4a:78:1c:d5'), + ('e2:7f:51:3e:70:49'), ('d6:a9:4a:78:1c:d5'), ('ea:29:b1:5e:1f:a5')) x(v) WHERE hashmacaddr(v)::bit(32) != hashmacaddrextended(v, 0)::bit(32) OR hashmacaddr(v)::bit(32) = hashmacaddrextended(v, 1)::bit(32); @@ -132,10 +132,10 @@ WHERE hashmacaddr(v)::bit(32) != hashmacaddrextended(v, 0)::bit(32) (0 rows) SELECT v as value, hashinet(v)::bit(32) as standard, - hashinetextended(v, 0)::bit(32) as extended0, - hashinetextended(v, 1)::bit(32) as extended1 + hashinetextended(v, 0)::bit(32) as extended0, + hashinetextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::inet), ('192.168.100.128/25'), ('192.168.100.0/8'), - ('172.168.10.126/16'), ('172.18.103.126/24'), ('192.188.13.16/32')) x(v) + ('172.168.10.126/16'), ('172.18.103.126/24'), ('192.188.13.16/32')) x(v) WHERE hashinet(v)::bit(32) != hashinetextended(v, 0)::bit(32) OR hashinet(v)::bit(32) = hashinetextended(v, 1)::bit(32); value | standard | extended0 | extended1 @@ -143,8 +143,8 @@ WHERE hashinet(v)::bit(32) != hashinetextended(v, 0)::bit(32) (0 rows) SELECT v as value, hash_numeric(v)::bit(32) as standard, - hash_numeric_extended(v, 0)::bit(32) as extended0, - hash_numeric_extended(v, 1)::bit(32) as extended1 + hash_numeric_extended(v, 0)::bit(32) as extended0, + hash_numeric_extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1.149484958), (17.149484958), (42.149484958), (149484958.550273), (2071124898672)) x(v) WHERE hash_numeric(v)::bit(32) != hash_numeric_extended(v, 0)::bit(32) @@ -154,8 +154,8 @@ WHERE hash_numeric(v)::bit(32) != hash_numeric_extended(v, 0)::bit(32) (0 rows) SELECT v as value, hashmacaddr8(v)::bit(32) as standard, - hashmacaddr8extended(v, 0)::bit(32) as extended0, - hashmacaddr8extended(v, 1)::bit(32) as extended1 + hashmacaddr8extended(v, 0)::bit(32) as extended0, + hashmacaddr8extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::macaddr8), ('08:00:2b:01:02:04:36:49'), ('08:00:2b:01:02:04:f0:e8'), ('e2:7f:51:3e:70:49:16:29'), ('d6:a9:4a:78:1c:d5:47:32'), ('ea:29:b1:5e:1f:a5')) x(v) @@ -166,8 +166,8 @@ WHERE hashmacaddr8(v)::bit(32) != hashmacaddr8extended(v, 0)::bit(32) (0 rows) SELECT v as value, hash_array(v)::bit(32) as standard, - hash_array_extended(v, 0)::bit(32) as extended0, - hash_array_extended(v, 1)::bit(32) as extended1 + hash_array_extended(v, 0)::bit(32) as extended0, + hash_array_extended(v, 1)::bit(32) as extended1 FROM (VALUES ('{0}'::int4[]), ('{0,1,2,3,4}'), ('{17,18,19,20}'), ('{42,34,65,98}'), ('{550273,590027, 870273}'), ('{207112489, 807112489}')) x(v) @@ -178,10 +178,10 @@ WHERE hash_array(v)::bit(32) != hash_array_extended(v, 0)::bit(32) (0 rows) SELECT v as value, hashbpchar(v)::bit(32) as standard, - hashbpcharextended(v, 0)::bit(32) as extended0, - hashbpcharextended(v, 1)::bit(32) as extended1 + hashbpcharextended(v, 0)::bit(32) as extended0, + hashbpcharextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), - ('muop28x03'), ('yi3nm0d73')) x(v) + ('muop28x03'), ('yi3nm0d73')) x(v) WHERE hashbpchar(v)::bit(32) != hashbpcharextended(v, 0)::bit(32) OR hashbpchar(v)::bit(32) = hashbpcharextended(v, 1)::bit(32); value | standard | extended0 | extended1 @@ -189,8 +189,8 @@ WHERE hashbpchar(v)::bit(32) != hashbpcharextended(v, 0)::bit(32) (0 rows) SELECT v as value, time_hash(v)::bit(32) as standard, - time_hash_extended(v, 0)::bit(32) as extended0, - time_hash_extended(v, 1)::bit(32) as extended1 + time_hash_extended(v, 0)::bit(32) as extended0, + time_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::time), ('11:09:59'), ('1:09:59'), ('11:59:59'), ('7:9:59'), ('5:15:59')) x(v) WHERE time_hash(v)::bit(32) != time_hash_extended(v, 0)::bit(32) @@ -200,10 +200,10 @@ WHERE time_hash(v)::bit(32) != time_hash_extended(v, 0)::bit(32) (0 rows) SELECT v as value, timetz_hash(v)::bit(32) as standard, - timetz_hash_extended(v, 0)::bit(32) as extended0, - timetz_hash_extended(v, 1)::bit(32) as extended1 + timetz_hash_extended(v, 0)::bit(32) as extended0, + timetz_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::timetz), ('00:11:52.518762-07'), ('00:11:52.51762-08'), - ('00:11:52.62-01'), ('00:11:52.62+01'), ('11:59:59+04')) x(v) + ('00:11:52.62-01'), ('00:11:52.62+01'), ('11:59:59+04')) x(v) WHERE timetz_hash(v)::bit(32) != timetz_hash_extended(v, 0)::bit(32) OR timetz_hash(v)::bit(32) = timetz_hash_extended(v, 1)::bit(32); value | standard | extended0 | extended1 @@ -211,12 +211,12 @@ WHERE timetz_hash(v)::bit(32) != timetz_hash_extended(v, 0)::bit(32) (0 rows) SELECT v as value, interval_hash(v)::bit(32) as standard, - interval_hash_extended(v, 0)::bit(32) as extended0, - interval_hash_extended(v, 1)::bit(32) as extended1 + interval_hash_extended(v, 0)::bit(32) as extended0, + interval_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::interval), ('5 month 7 day 46 minutes'), ('1 year 7 day 46 minutes'), - ('1 year 7 month 20 day 46 minutes'), ('5 month'), - ('17 year 11 month 7 day 9 hours 46 minutes 5 seconds')) x(v) + ('1 year 7 month 20 day 46 minutes'), ('5 month'), + ('17 year 11 month 7 day 9 hours 46 minutes 5 seconds')) x(v) WHERE interval_hash(v)::bit(32) != interval_hash_extended(v, 0)::bit(32) OR interval_hash(v)::bit(32) = interval_hash_extended(v, 1)::bit(32); value | standard | extended0 | extended1 @@ -224,11 +224,11 @@ WHERE interval_hash(v)::bit(32) != interval_hash_extended(v, 0)::bit(32) (0 rows) SELECT v as value, timestamp_hash(v)::bit(32) as standard, - timestamp_hash_extended(v, 0)::bit(32) as extended0, - timestamp_hash_extended(v, 1)::bit(32) as extended1 + timestamp_hash_extended(v, 0)::bit(32) as extended0, + timestamp_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::timestamp), ('2017-08-22 00:09:59.518762'), ('2015-08-20 00:11:52.51762-08'), - ('2017-05-22 00:11:52.62-01'), + ('2017-05-22 00:11:52.62-01'), ('2013-08-22 00:11:52.62+01'), ('2013-08-22 11:59:59+04')) x(v) WHERE timestamp_hash(v)::bit(32) != timestamp_hash_extended(v, 0)::bit(32) OR timestamp_hash(v)::bit(32) = timestamp_hash_extended(v, 1)::bit(32); @@ -237,12 +237,12 @@ WHERE timestamp_hash(v)::bit(32) != timestamp_hash_extended(v, 0)::bit(32) (0 rows) SELECT v as value, uuid_hash(v)::bit(32) as standard, - uuid_hash_extended(v, 0)::bit(32) as extended0, - uuid_hash_extended(v, 1)::bit(32) as extended1 + uuid_hash_extended(v, 0)::bit(32) as extended0, + uuid_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::uuid), ('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'), - ('5a9ba4ac-8d6f-11e7-bb31-be2e44b06b34'), + ('5a9ba4ac-8d6f-11e7-bb31-be2e44b06b34'), ('99c6705c-d939-461c-a3c9-1690ad64ed7b'), - ('7deed3ca-8d6f-11e7-bb31-be2e44b06b34'), + ('7deed3ca-8d6f-11e7-bb31-be2e44b06b34'), ('9ad46d4f-6f2a-4edd-aadb-745993928e1e')) x(v) WHERE uuid_hash(v)::bit(32) != uuid_hash_extended(v, 0)::bit(32) OR uuid_hash(v)::bit(32) = uuid_hash_extended(v, 1)::bit(32); @@ -251,10 +251,10 @@ WHERE uuid_hash(v)::bit(32) != uuid_hash_extended(v, 0)::bit(32) (0 rows) SELECT v as value, pg_lsn_hash(v)::bit(32) as standard, - pg_lsn_hash_extended(v, 0)::bit(32) as extended0, - pg_lsn_hash_extended(v, 1)::bit(32) as extended1 + pg_lsn_hash_extended(v, 0)::bit(32) as extended0, + pg_lsn_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::pg_lsn), ('16/B374D84'), ('30/B374D84'), - ('255/B374D84'), ('25/B379D90'), ('900/F37FD90')) x(v) + ('255/B374D84'), ('25/B379D90'), ('900/F37FD90')) x(v) WHERE pg_lsn_hash(v)::bit(32) != pg_lsn_hash_extended(v, 0)::bit(32) OR pg_lsn_hash(v)::bit(32) = pg_lsn_hash_extended(v, 1)::bit(32); value | standard | extended0 | extended1 @@ -263,8 +263,8 @@ WHERE pg_lsn_hash(v)::bit(32) != pg_lsn_hash_extended(v, 0)::bit(32) CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy'); SELECT v as value, hashenum(v)::bit(32) as standard, - hashenumextended(v, 0)::bit(32) as extended0, - hashenumextended(v, 1)::bit(32) as extended1 + hashenumextended(v, 0)::bit(32) as extended0, + hashenumextended(v, 1)::bit(32) as extended1 FROM (VALUES ('sad'::mood), ('ok'), ('happy')) x(v) WHERE hashenum(v)::bit(32) != hashenumextended(v, 0)::bit(32) OR hashenum(v)::bit(32) = hashenumextended(v, 1)::bit(32); @@ -274,12 +274,12 @@ WHERE hashenum(v)::bit(32) != hashenumextended(v, 0)::bit(32) DROP TYPE mood; SELECT v as value, jsonb_hash(v)::bit(32) as standard, - jsonb_hash_extended(v, 0)::bit(32) as extended0, - jsonb_hash_extended(v, 1)::bit(32) as extended1 + jsonb_hash_extended(v, 0)::bit(32) as extended0, + jsonb_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::jsonb), - ('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'), - ('{"foo": [true, "bar"], "tags": {"e": 1, "f": null}}'), - ('{"g": {"h": "value"}}')) x(v) + ('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'), + ('{"foo": [true, "bar"], "tags": {"e": 1, "f": null}}'), + ('{"g": {"h": "value"}}')) x(v) WHERE jsonb_hash(v)::bit(32) != jsonb_hash_extended(v, 0)::bit(32) OR jsonb_hash(v)::bit(32) = jsonb_hash_extended(v, 1)::bit(32); value | standard | extended0 | extended1 @@ -287,11 +287,11 @@ WHERE jsonb_hash(v)::bit(32) != jsonb_hash_extended(v, 0)::bit(32) (0 rows) SELECT v as value, hash_range(v)::bit(32) as standard, - hash_range_extended(v, 0)::bit(32) as extended0, - hash_range_extended(v, 1)::bit(32) as extended1 + hash_range_extended(v, 0)::bit(32) as extended0, + hash_range_extended(v, 1)::bit(32) as extended1 FROM (VALUES (int4range(10, 20)), (int4range(23, 43)), - (int4range(5675, 550273)), - (int4range(550274, 1550274)), (int4range(1550275, 208112489))) x(v) + (int4range(5675, 550273)), + (int4range(550274, 1550274)), (int4range(1550275, 208112489))) x(v) WHERE hash_range(v)::bit(32) != hash_range_extended(v, 0)::bit(32) OR hash_range(v)::bit(32) = hash_range_extended(v, 1)::bit(32); value | standard | extended0 | extended1 diff --git a/src/test/regress/expected/horology.out b/src/test/regress/expected/horology.out index c8c33a0fc067d..d56decd994000 100644 --- a/src/test/regress/expected/horology.out +++ b/src/test/regress/expected/horology.out @@ -2,6 +2,12 @@ -- HOROLOGY -- SET DateStyle = 'Postgres, MDY'; +SHOW TimeZone; -- Many of these tests depend on the prevailing setting + TimeZone +---------- + PST8PDT +(1 row) + -- -- Test various input formats -- @@ -2076,6 +2082,72 @@ SELECT '' AS "16", f1 AS "timestamp", date(f1) AS date DROP TABLE TEMP_TIMESTAMP; -- +-- Comparisons between datetime types, especially overflow cases +--- +SELECT '2202020-10-05'::date::timestamp; -- fail +ERROR: date out of range for timestamp +SELECT '2202020-10-05'::date > '2020-10-05'::timestamp as t; + t +--- + t +(1 row) + +SELECT '2020-10-05'::timestamp > '2202020-10-05'::date as f; + f +--- + f +(1 row) + +SELECT '2202020-10-05'::date::timestamptz; -- fail +ERROR: date out of range for timestamp +SELECT '2202020-10-05'::date > '2020-10-05'::timestamptz as t; + t +--- + t +(1 row) + +SELECT '2020-10-05'::timestamptz > '2202020-10-05'::date as f; + f +--- + f +(1 row) + +-- This conversion may work depending on timezone +SELECT '4714-11-24 BC'::date::timestamptz; + timestamptz +--------------------------------- + Mon Nov 24 00:00:00 4714 PST BC +(1 row) + +SET TimeZone = 'UTC-2'; +SELECT '4714-11-24 BC'::date::timestamptz; -- fail +ERROR: date out of range for timestamp +SELECT '4714-11-24 BC'::date < '2020-10-05'::timestamptz as t; + t +--- + t +(1 row) + +SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::date as t; + t +--- + t +(1 row) + +SELECT '4714-11-24 BC'::timestamp < '2020-10-05'::timestamptz as t; + t +--- + t +(1 row) + +SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::timestamp as t; + t +--- + t +(1 row) + +RESET TimeZone; +-- -- Formats -- SET DateStyle TO 'US,Postgres'; @@ -2916,6 +2988,45 @@ SELECT to_date('2458872', 'J'); 01-23-2020 (1 row) +-- +-- Check handling of BC dates +-- +SELECT to_date('44-02-01 BC','YYYY-MM-DD BC'); + to_date +--------------- + 02-01-0044 BC +(1 row) + +SELECT to_date('-44-02-01','YYYY-MM-DD'); + to_date +--------------- + 02-01-0044 BC +(1 row) + +SELECT to_date('-44-02-01 BC','YYYY-MM-DD BC'); + to_date +------------ + 02-01-0044 +(1 row) + +SELECT to_timestamp('44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC'); + to_timestamp +--------------------------------- + Fri Feb 01 11:12:13 0044 PST BC +(1 row) + +SELECT to_timestamp('-44-02-01 11:12:13','YYYY-MM-DD HH24:MI:SS'); + to_timestamp +--------------------------------- + Fri Feb 01 11:12:13 0044 PST BC +(1 row) + +SELECT to_timestamp('-44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC'); + to_timestamp +------------------------------ + Mon Feb 01 11:12:13 0044 PST +(1 row) + -- -- Check handling of multiple spaces in format and/or input -- @@ -3183,6 +3294,12 @@ SELECT to_date('2016 366', 'YYYY DDD'); -- ok SELECT to_date('2016 367', 'YYYY DDD'); ERROR: date/time field value out of range: "2016 367" +SELECT to_date('0000-02-01','YYYY-MM-DD'); -- allowed, though it shouldn't be + to_date +--------------- + 02-01-0001 BC +(1 row) + -- -- Check behavior with SQL-style fixed-GMT-offset time zone (cf bug #8572) -- diff --git a/src/test/regress/expected/identity.out b/src/test/regress/expected/identity.out index 7ac9df767f553..2238f896f9a48 100644 --- a/src/test/regress/expected/identity.out +++ b/src/test/regress/expected/identity.out @@ -335,7 +335,7 @@ SELECT * FROM itest6; 102 | (3 rows) -SELECT table_name, column_name, is_identity, identity_generation FROM information_schema.columns WHERE table_name = 'itest6'; +SELECT table_name, column_name, is_identity, identity_generation FROM information_schema.columns WHERE table_name = 'itest6' ORDER BY 1, 2; table_name | column_name | is_identity | identity_generation ------------+-------------+-------------+--------------------- itest6 | a | YES | BY DEFAULT diff --git a/src/test/regress/expected/indexing.out b/src/test/regress/expected/indexing.out index 7e78a07af8b8b..c93f4470c92ca 100644 --- a/src/test/regress/expected/indexing.out +++ b/src/test/regress/expected/indexing.out @@ -907,16 +907,16 @@ Indexes: drop table idxpart; -- Failing to use the full partition key is not allowed create table idxpart (a int unique, b int) partition by range (a, b); -ERROR: insufficient columns in UNIQUE constraint definition +ERROR: unique constraint on partitioned table must include all partitioning columns DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. create table idxpart (a int, b int unique) partition by range (a, b); -ERROR: insufficient columns in UNIQUE constraint definition +ERROR: unique constraint on partitioned table must include all partitioning columns DETAIL: UNIQUE constraint on table "idxpart" lacks column "a" which is part of the partition key. create table idxpart (a int primary key, b int) partition by range (b, a); -ERROR: insufficient columns in PRIMARY KEY constraint definition +ERROR: unique constraint on partitioned table must include all partitioning columns DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. create table idxpart (a int, b int primary key) partition by range (b, a); -ERROR: insufficient columns in PRIMARY KEY constraint definition +ERROR: unique constraint on partitioned table must include all partitioning columns DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "a" which is part of the partition key. -- OK if you use them in some other order create table idxpart (a int, b int, c text, primary key (a, b, c)) partition by range (b, c, a); @@ -936,7 +936,7 @@ DETAIL: UNIQUE constraints cannot be used when partition keys include expressio -- use ALTER TABLE to add a primary key create table idxpart (a int, b int, c text) partition by range (a, b); alter table idxpart add primary key (a); -- not an incomplete one though -ERROR: insufficient columns in PRIMARY KEY constraint definition +ERROR: unique constraint on partitioned table must include all partitioning columns DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. alter table idxpart add primary key (a, b); -- this works \d idxpart @@ -967,7 +967,7 @@ drop table idxpart; -- use ALTER TABLE to add a unique constraint create table idxpart (a int, b int) partition by range (a, b); alter table idxpart add unique (a); -- not an incomplete one though -ERROR: insufficient columns in UNIQUE constraint definition +ERROR: unique constraint on partitioned table must include all partitioning columns DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. alter table idxpart add unique (b, a); -- this works \d idxpart @@ -1017,7 +1017,7 @@ drop table idxpart; create table idxpart (a int, b int, primary key (a)) partition by range (a); create table idxpart2 partition of idxpart for values from (0) to (1000) partition by range (b); -- fail -ERROR: insufficient columns in PRIMARY KEY constraint definition +ERROR: unique constraint on partitioned table must include all partitioning columns DETAIL: PRIMARY KEY constraint on table "idxpart2" lacks column "b" which is part of the partition key. drop table idxpart; -- Ditto for the ATTACH PARTITION case @@ -1025,7 +1025,7 @@ create table idxpart (a int unique, b int) partition by range (a); create table idxpart1 (a int not null, b int, unique (a, b)) partition by range (a, b); alter table idxpart attach partition idxpart1 for values from (1) to (1000); -ERROR: insufficient columns in UNIQUE constraint definition +ERROR: unique constraint on partitioned table must include all partitioning columns DETAIL: UNIQUE constraint on table "idxpart1" lacks column "b" which is part of the partition key. DROP TABLE idxpart, idxpart1; -- Multi-layer partitioning works correctly in this case: @@ -1278,7 +1278,7 @@ insert into covidxpart values (4, 1); ERROR: duplicate key value violates unique constraint "covidxpart4_a_b_idx" DETAIL: Key (a)=(4) already exists. create unique index on covidxpart (b) include (a); -- should fail -ERROR: insufficient columns in UNIQUE constraint definition +ERROR: unique constraint on partitioned table must include all partitioning columns DETAIL: UNIQUE constraint on table "covidxpart" lacks column "a" which is part of the partition key. -- check that detaching a partition also detaches the primary key constraint create table parted_pk_detach_test (a int primary key) partition by list (a); diff --git a/src/test/regress/expected/infinite_recurse.out b/src/test/regress/expected/infinite_recurse.out new file mode 100644 index 0000000000000..aa102fadd839a --- /dev/null +++ b/src/test/regress/expected/infinite_recurse.out @@ -0,0 +1,24 @@ +-- Check that stack depth detection mechanism works and +-- max_stack_depth is not set too high. +create function infinite_recurse() returns int as +'select infinite_recurse()' language sql; +-- Unfortunately, up till mid 2020 the Linux kernel had a bug in PPC64 +-- signal handling that would cause this test to crash if it happened +-- to receive an sinval catchup interrupt while the stack is deep: +-- https://bugzilla.kernel.org/show_bug.cgi?id=205183 +-- It is likely to be many years before that bug disappears from all +-- production kernels, so disable this test on such platforms. +-- (We still create the function, so as not to have a cross-platform +-- difference in the end state of the regression database.) +SELECT version() ~ 'powerpc64[^,]*-linux-gnu' + AS skip_test \gset +\if :skip_test +\quit +\endif +-- The full error report is not very stable, so we show only SQLSTATE +-- and primary error message. +\set VERBOSITY sqlstate +select infinite_recurse(); +ERROR: 54001 +\echo :LAST_ERROR_MESSAGE +stack depth limit exceeded diff --git a/src/test/regress/expected/infinite_recurse_1.out b/src/test/regress/expected/infinite_recurse_1.out new file mode 100644 index 0000000000000..b2c99a0d0d41f --- /dev/null +++ b/src/test/regress/expected/infinite_recurse_1.out @@ -0,0 +1,16 @@ +-- Check that stack depth detection mechanism works and +-- max_stack_depth is not set too high. +create function infinite_recurse() returns int as +'select infinite_recurse()' language sql; +-- Unfortunately, up till mid 2020 the Linux kernel had a bug in PPC64 +-- signal handling that would cause this test to crash if it happened +-- to receive an sinval catchup interrupt while the stack is deep: +-- https://bugzilla.kernel.org/show_bug.cgi?id=205183 +-- It is likely to be many years before that bug disappears from all +-- production kernels, so disable this test on such platforms. +-- (We still create the function, so as not to have a cross-platform +-- difference in the end state of the regression database.) +SELECT version() ~ 'powerpc64[^,]*-linux-gnu' + AS skip_test \gset +\if :skip_test +\quit diff --git a/src/test/regress/expected/insert.out b/src/test/regress/expected/insert.out index eb9d45be5e5f4..da50ee3b670aa 100644 --- a/src/test/regress/expected/insert.out +++ b/src/test/regress/expected/insert.out @@ -818,9 +818,7 @@ drop role regress_coldesc_role; drop table inserttest3; drop table brtrigpartcon; drop function brtrigpartcon1trigf(); --- check that "do nothing" BR triggers work with tuple-routing (this checks --- that estate->es_result_relation_info is appropriately set/reset for each --- routed tuple) +-- check that "do nothing" BR triggers work with tuple-routing create table donothingbrtrig_test (a int, b text) partition by list (a); create table donothingbrtrig_test1 (b text, a int); create table donothingbrtrig_test2 (c text, b text, a int); diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out index 1338b2b23e173..ff157ceb1c19f 100644 --- a/src/test/regress/expected/insert_conflict.out +++ b/src/test/regress/expected/insert_conflict.out @@ -50,14 +50,12 @@ explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on con Insert on insertconflicttest Conflict Resolution: UPDATE Conflict Arbiter Indexes: op_index_key, collation_index_key, both_index_key - Conflict Filter: (alternatives: SubPlan 1 or hashed SubPlan 2) + Conflict Filter: (SubPlan 1) -> Result SubPlan 1 -> Index Only Scan using both_index_expr_key on insertconflicttest ii Index Cond: (key = excluded.key) - SubPlan 2 - -> Seq Scan on insertconflicttest ii_1 -(10 rows) +(8 rows) -- Neither collation nor operator class specifications are required -- -- supplying them merely *limits* matches to indexes with matching opclasses diff --git a/src/test/regress/expected/jsonb_jsonpath.out b/src/test/regress/expected/jsonb_jsonpath.out index 57332111b427b..508ddd797ed59 100644 --- a/src/test/regress/expected/jsonb_jsonpath.out +++ b/src/test/regress/expected/jsonb_jsonpath.out @@ -1722,6 +1722,16 @@ select jsonb_path_query('"12:34:56 +05:20"', '$.datetime("HH24:MI:SS TZH:TZM").t "time with time zone" (1 row) +select jsonb_path_query('"10-03-2017T12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); + jsonb_path_query +----------------------- + "2017-03-10T12:34:56" +(1 row) + +select jsonb_path_query('"10-03-2017t12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); +ERROR: unmatched format character "T" +select jsonb_path_query('"10-03-2017 12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); +ERROR: unmatched format character "T" set time zone '+00'; select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); jsonb_path_query @@ -1877,30 +1887,39 @@ select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime()'); "2017-03-10T12:34:56" (1 row) -select jsonb_path_query('"2017-03-10 12:34:56 +3"', '$.datetime().type()'); +select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime().type()'); jsonb_path_query ---------------------------- "timestamp with time zone" (1 row) -select jsonb_path_query('"2017-03-10 12:34:56 +3"', '$.datetime()'); +select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime()'); jsonb_path_query ----------------------------- "2017-03-10T12:34:56+03:00" (1 row) -select jsonb_path_query('"2017-03-10 12:34:56 +3:10"', '$.datetime().type()'); +select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime().type()'); jsonb_path_query ---------------------------- "timestamp with time zone" (1 row) -select jsonb_path_query('"2017-03-10 12:34:56 +3:10"', '$.datetime()'); +select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime()'); jsonb_path_query ----------------------------- "2017-03-10T12:34:56+03:10" (1 row) +select jsonb_path_query('"2017-03-10T12:34:56+3:10"', '$.datetime()'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:56+03:10" +(1 row) + +select jsonb_path_query('"2017-03-10t12:34:56+3:10"', '$.datetime()'); +ERROR: datetime format is not recognized: "2017-03-10t12:34:56+3:10" +HINT: Use a datetime template argument to specify the input data format. select jsonb_path_query('"12:34:56"', '$.datetime().type()'); jsonb_path_query -------------------------- @@ -1913,25 +1932,25 @@ select jsonb_path_query('"12:34:56"', '$.datetime()'); "12:34:56" (1 row) -select jsonb_path_query('"12:34:56 +3"', '$.datetime().type()'); +select jsonb_path_query('"12:34:56+3"', '$.datetime().type()'); jsonb_path_query ----------------------- "time with time zone" (1 row) -select jsonb_path_query('"12:34:56 +3"', '$.datetime()'); +select jsonb_path_query('"12:34:56+3"', '$.datetime()'); jsonb_path_query ------------------ "12:34:56+03:00" (1 row) -select jsonb_path_query('"12:34:56 +3:10"', '$.datetime().type()'); +select jsonb_path_query('"12:34:56+3:10"', '$.datetime().type()'); jsonb_path_query ----------------------- "time with time zone" (1 row) -select jsonb_path_query('"12:34:56 +3:10"', '$.datetime()'); +select jsonb_path_query('"12:34:56+3:10"', '$.datetime()'); jsonb_path_query ------------------ "12:34:56+03:10" @@ -1940,22 +1959,22 @@ select jsonb_path_query('"12:34:56 +3:10"', '$.datetime()'); set time zone '+00'; -- date comparison select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); -ERROR: cannot convert value from date to timestamptz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from date to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); -ERROR: cannot convert value from date to timestamptz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from date to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); -ERROR: cannot convert value from date to timestamptz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from date to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); jsonb_path_query_tz ----------------------------- @@ -1965,7 +1984,7 @@ select jsonb_path_query_tz( (3 rows) select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); jsonb_path_query_tz ----------------------------- @@ -1977,7 +1996,7 @@ select jsonb_path_query_tz( (5 rows) select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); jsonb_path_query_tz ----------------------------- @@ -1987,22 +2006,22 @@ select jsonb_path_query_tz( -- time comparison select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); -ERROR: cannot convert value from time to timetz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); -ERROR: cannot convert value from time to timetz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); -ERROR: cannot convert value from time to timetz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); jsonb_path_query_tz --------------------- @@ -2011,7 +2030,7 @@ select jsonb_path_query_tz( (2 rows) select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); jsonb_path_query_tz --------------------- @@ -2021,7 +2040,7 @@ select jsonb_path_query_tz( (3 rows) select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); jsonb_path_query_tz --------------------- @@ -2032,22 +2051,22 @@ select jsonb_path_query_tz( -- timetz comparison select jsonb_path_query( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); -ERROR: cannot convert value from time to timetz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); -ERROR: cannot convert value from time to timetz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); -ERROR: cannot convert value from time to timetz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query_tz( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); jsonb_path_query_tz --------------------- @@ -2055,7 +2074,7 @@ select jsonb_path_query_tz( (1 row) select jsonb_path_query_tz( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); jsonb_path_query_tz --------------------- @@ -2067,7 +2086,7 @@ select jsonb_path_query_tz( (5 rows) select jsonb_path_query_tz( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); jsonb_path_query_tz --------------------- @@ -2078,22 +2097,22 @@ select jsonb_path_query_tz( -- timestamp comparison select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -ERROR: cannot convert value from timestamp to timestamptz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -ERROR: cannot convert value from timestamp to timestamptz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -ERROR: cannot convert value from timestamp to timestamptz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); jsonb_path_query_tz ----------------------------- @@ -2102,7 +2121,7 @@ select jsonb_path_query_tz( (2 rows) select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); jsonb_path_query_tz ----------------------------- @@ -2114,7 +2133,7 @@ select jsonb_path_query_tz( (5 rows) select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); jsonb_path_query_tz ----------------------------- @@ -2125,22 +2144,22 @@ select jsonb_path_query_tz( -- timestamptz comparison select jsonb_path_query( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -ERROR: cannot convert value from timestamp to timestamptz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -ERROR: cannot convert value from timestamp to timestamptz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -ERROR: cannot convert value from timestamp to timestamptz without timezone usage -HINT: Use *_tz() function for timezone support. +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. select jsonb_path_query_tz( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); jsonb_path_query_tz ----------------------------- @@ -2149,7 +2168,7 @@ select jsonb_path_query_tz( (2 rows) select jsonb_path_query_tz( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); jsonb_path_query_tz ----------------------------- @@ -2162,7 +2181,7 @@ select jsonb_path_query_tz( (6 rows) select jsonb_path_query_tz( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); jsonb_path_query_tz ----------------------------- diff --git a/src/test/regress/expected/limit.out b/src/test/regress/expected/limit.out index e6f6809fbee8a..b75afcc01a399 100644 --- a/src/test/regress/expected/limit.out +++ b/src/test/regress/expected/limit.out @@ -623,7 +623,7 @@ SELECT thousand SELECT ''::text AS two, unique1, unique2, stringu1 FROM onek WHERE unique1 > 50 FETCH FIRST 2 ROW WITH TIES; -ERROR: WITH TIES options can not be specified without ORDER BY clause +ERROR: WITH TIES cannot be specified without ORDER BY clause -- test ruleutils CREATE VIEW limit_thousand_v_1 AS SELECT thousand FROM onek WHERE thousand < 995 ORDER BY thousand FETCH FIRST 5 ROWS WITH TIES OFFSET 10; @@ -657,7 +657,7 @@ View definition: CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995 ORDER BY thousand FETCH FIRST NULL ROWS WITH TIES; -- fails -ERROR: row count cannot be NULL in FETCH FIRST ... WITH TIES clause +ERROR: row count cannot be null in FETCH FIRST ... WITH TIES clause CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995 ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES; \d+ limit_thousand_v_3 diff --git a/src/test/regress/expected/numeric.out b/src/test/regress/expected/numeric.out index 86940ec683861..cb782d0e2a8f2 100644 --- a/src/test/regress/expected/numeric.out +++ b/src/test/regress/expected/numeric.out @@ -1316,10 +1316,8 @@ SELECT width_bucket('NaN', 3.0, 4.0, 888); ERROR: operand, lower bound, and upper bound cannot be NaN SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888); ERROR: operand, lower bound, and upper bound cannot be NaN -SELECT width_bucket('inf', 3.0, 4.0, 888); -ERROR: operand, lower bound, and upper bound cannot be infinity SELECT width_bucket(2.0, 3.0, '-inf', 888); -ERROR: operand, lower bound, and upper bound cannot be infinity +ERROR: lower and upper bounds must be finite SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888); ERROR: lower and upper bounds must be finite -- normal operation @@ -1362,8 +1360,19 @@ SELECT 10.0000000000001 | 6 | 6 | 0 | 0 | 5 | 5 | 21 | 21 | 8 | 8 (19 rows) --- for float8 only, check positive and negative infinity: we require +-- Check positive and negative infinity: we require -- finite bucket bounds, but allow an infinite operand +SELECT width_bucket(0.0::numeric, 'Infinity'::numeric, 5, 10); -- error +ERROR: lower and upper bounds must be finite +SELECT width_bucket(0.0::numeric, 5, '-Infinity'::numeric, 20); -- error +ERROR: lower and upper bounds must be finite +SELECT width_bucket('Infinity'::numeric, 1, 10, 10), + width_bucket('-Infinity'::numeric, 1, 10, 10); + width_bucket | width_bucket +--------------+-------------- + 11 | 0 +(1 row) + SELECT width_bucket(0.0::float8, 'Infinity'::float8, 5, 10); -- error ERROR: lower and upper bounds must be finite SELECT width_bucket(0.0::float8, 5, '-Infinity'::float8, 20); -- error @@ -1376,6 +1385,46 @@ SELECT width_bucket('Infinity'::float8, 1, 10, 10), (1 row) DROP TABLE width_bucket_test; +-- Simple test for roundoff error when results should be exact +SELECT x, width_bucket(x::float8, 10, 100, 9) as flt, + width_bucket(x::numeric, 10, 100, 9) as num +FROM generate_series(0, 110, 10) x; + x | flt | num +-----+-----+----- + 0 | 0 | 0 + 10 | 1 | 1 + 20 | 2 | 2 + 30 | 3 | 3 + 40 | 4 | 4 + 50 | 5 | 5 + 60 | 6 | 6 + 70 | 7 | 7 + 80 | 8 | 8 + 90 | 9 | 9 + 100 | 10 | 10 + 110 | 10 | 10 +(12 rows) + +SELECT x, width_bucket(x::float8, 100, 10, 9) as flt, + width_bucket(x::numeric, 100, 10, 9) as num +FROM generate_series(0, 110, 10) x; + x | flt | num +-----+-----+----- + 0 | 10 | 10 + 10 | 10 | 10 + 20 | 9 | 9 + 30 | 8 | 8 + 40 | 7 | 7 + 50 | 6 | 6 + 60 | 5 | 5 + 70 | 4 | 4 + 80 | 3 | 3 + 90 | 2 | 2 + 100 | 1 | 1 + 110 | 0 | 0 +(12 rows) + +-- -- TO_CHAR() -- SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999') @@ -2972,16 +3021,10 @@ ERROR: value overflows numeric format -- -- Tests for factorial -- -SELECT 4!; - ?column? ----------- - 24 -(1 row) - -SELECT !!3; - ?column? ----------- - 6 +SELECT factorial(4); + factorial +----------- + 24 (1 row) SELECT factorial(15); @@ -2990,16 +3033,14 @@ SELECT factorial(15); 1307674368000 (1 row) -SELECT 100000!; +SELECT factorial(100000); ERROR: value overflows numeric format -SELECT 0!; - ?column? ----------- - 1 +SELECT factorial(0); + factorial +----------- + 1 (1 row) -SELECT -4!; -ERROR: factorial of a negative number is undefined SELECT factorial(-4); ERROR: factorial of a negative number is undefined -- diff --git a/src/test/regress/expected/opr_sanity.out b/src/test/regress/expected/opr_sanity.out index 1b3c146e4cc95..7825a765cd7bf 100644 --- a/src/test/regress/expected/opr_sanity.out +++ b/src/test/regress/expected/opr_sanity.out @@ -1066,7 +1066,7 @@ WHERE condefault AND -- Look for illegal values in pg_operator fields. SELECT p1.oid, p1.oprname FROM pg_operator as p1 -WHERE (p1.oprkind != 'b' AND p1.oprkind != 'l' AND p1.oprkind != 'r') OR +WHERE (p1.oprkind != 'b' AND p1.oprkind != 'l') OR p1.oprresult = 0 OR p1.oprcode = 0; oid | oprname -----+--------- @@ -1077,8 +1077,7 @@ SELECT p1.oid, p1.oprname FROM pg_operator as p1 WHERE (p1.oprleft = 0 and p1.oprkind != 'l') OR (p1.oprleft != 0 and p1.oprkind = 'l') OR - (p1.oprright = 0 and p1.oprkind != 'r') OR - (p1.oprright != 0 and p1.oprkind = 'r'); + p1.oprright = 0; oid | oprname -----+--------- (0 rows) @@ -1285,18 +1284,6 @@ WHERE p1.oprcode = p2.oid AND -----+---------+-----+--------- (0 rows) -SELECT p1.oid, p1.oprname, p2.oid, p2.proname -FROM pg_operator AS p1, pg_proc AS p2 -WHERE p1.oprcode = p2.oid AND - p1.oprkind = 'r' AND - (p2.pronargs != 1 - OR NOT binary_coercible(p2.prorettype, p1.oprresult) - OR NOT binary_coercible(p1.oprleft, p2.proargtypes[0]) - OR p1.oprright != 0); - oid | oprname | oid | proname ------+---------+-----+--------- -(0 rows) - -- If the operator is mergejoinable or hashjoinable, its underlying function -- should not be volatile. SELECT p1.oid, p1.oprname, p2.oid, p2.proname diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out index 585e7243752ce..0057f41caaf14 100644 --- a/src/test/regress/expected/partition_join.out +++ b/src/test/regress/expected/partition_join.out @@ -4567,7 +4567,7 @@ ANALYZE plt3_adv; -- This tests that when merging partitions from plt1_adv and plt2_adv in -- merge_list_bounds(), process_outer_partition() returns an already-assigned -- merged partition when re-called with plt1_adv_p1 for the second list value --- '0001' of that partitin +-- '0001' of that partition EXPLAIN (COSTS OFF) SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; QUERY PLAN diff --git a/src/test/regress/expected/rangefuncs.out b/src/test/regress/expected/rangefuncs.out index 7eced28452037..06bd129fd2281 100644 --- a/src/test/regress/expected/rangefuncs.out +++ b/src/test/regress/expected/rangefuncs.out @@ -2109,6 +2109,63 @@ select * from testrngfunc(); 7.136178 | 7.14 (1 row) +create or replace function testrngfunc() returns setof rngfunc_type as $$ + select 1, 2 union select 3, 4 order by 1; +$$ language sql immutable; +explain (verbose, costs off) +select testrngfunc(); + QUERY PLAN +------------------------- + ProjectSet + Output: testrngfunc() + -> Result +(3 rows) + +select testrngfunc(); + testrngfunc +----------------- + (1.000000,2.00) + (3.000000,4.00) +(2 rows) + +explain (verbose, costs off) +select * from testrngfunc(); + QUERY PLAN +---------------------------------------------------------- + Subquery Scan on "*SELECT*" + Output: "*SELECT*"."?column?", "*SELECT*"."?column?_1" + -> Unique + Output: (1), (2) + -> Sort + Output: (1), (2) + Sort Key: (1), (2) + -> Append + -> Result + Output: 1, 2 + -> Result + Output: 3, 4 +(12 rows) + +select * from testrngfunc(); + f1 | f2 +----------+------ + 1.000000 | 2.00 + 3.000000 | 4.00 +(2 rows) + +-- Check a couple of error cases while we're here +select * from testrngfunc() as t(f1 int8,f2 int8); -- fail, composite result +ERROR: a column definition list is redundant for a function returning a named composite type +LINE 1: select * from testrngfunc() as t(f1 int8,f2 int8); + ^ +select * from pg_get_keywords() as t(f1 int8,f2 int8); -- fail, OUT params +ERROR: a column definition list is redundant for a function with OUT parameters +LINE 1: select * from pg_get_keywords() as t(f1 int8,f2 int8); + ^ +select * from sin(3) as t(f1 int8,f2 int8); -- fail, scalar result type +ERROR: a column definition list is only allowed for functions returning "record" +LINE 1: select * from sin(3) as t(f1 int8,f2 int8); + ^ drop type rngfunc_type cascade; NOTICE: drop cascades to function testrngfunc() -- diff --git a/src/test/regress/expected/reindex_catalog.out b/src/test/regress/expected/reindex_catalog.out index 4b5fba4949391..204f056c9a56c 100644 --- a/src/test/regress/expected/reindex_catalog.out +++ b/src/test/regress/expected/reindex_catalog.out @@ -36,3 +36,13 @@ REINDEX INDEX pg_index_indexrelid_index; -- non-mapped, non-shared, critical REINDEX INDEX pg_index_indrelid_index; -- non-mapped, non-shared, non-critical REINDEX INDEX pg_database_oid_index; -- mapped, shared, critical REINDEX INDEX pg_shdescription_o_c_index; -- mapped, shared, non-critical +-- Check the same REINDEX INDEX statements under parallelism. +BEGIN; +SET min_parallel_table_scan_size = 0; +REINDEX INDEX pg_class_oid_index; -- mapped, non-shared, critical +REINDEX INDEX pg_class_relname_nsp_index; -- mapped, non-shared, non-critical +REINDEX INDEX pg_index_indexrelid_index; -- non-mapped, non-shared, critical +REINDEX INDEX pg_index_indrelid_index; -- non-mapped, non-shared, non-critical +REINDEX INDEX pg_database_oid_index; -- mapped, shared, critical +REINDEX INDEX pg_shdescription_o_c_index; -- mapped, shared, non-critical +ROLLBACK; diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 2a18dc423e2bf..492cdcf74c36a 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -2018,6 +2018,12 @@ pg_stat_replication| SELECT s.pid, FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, sslcompression, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, leader_pid) JOIN pg_stat_get_wal_senders() w(pid, state, sent_lsn, write_lsn, flush_lsn, replay_lsn, write_lag, flush_lag, replay_lag, sync_priority, sync_state, reply_time) ON ((s.pid = w.pid))) LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); +pg_stat_replication_slots| SELECT s.slot_name, + s.spill_txns, + s.spill_count, + s.spill_bytes, + s.stats_reset + FROM pg_stat_get_replication_slots() s(slot_name, spill_txns, spill_count, spill_bytes, stats_reset); pg_stat_slru| SELECT s.name, s.blks_zeroed, s.blks_hit, @@ -2129,6 +2135,9 @@ pg_stat_user_tables| SELECT pg_stat_all_tables.relid, pg_stat_all_tables.autoanalyze_count FROM pg_stat_all_tables WHERE ((pg_stat_all_tables.schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (pg_stat_all_tables.schemaname !~ '^pg_toast'::text)); +pg_stat_wal| SELECT w.wal_buffers_full, + w.stats_reset + FROM pg_stat_get_wal() w(wal_buffers_full, stats_reset); pg_stat_wal_receiver| SELECT s.pid, s.status, s.receive_start_lsn, diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out index 8c667d786a212..4c3edd213fb84 100644 --- a/src/test/regress/expected/stats_ext.out +++ b/src/test/regress/expected/stats_ext.out @@ -102,6 +102,15 @@ WARNING: statistics object "public.ab1_a_b_stats" could not be computed for rel ALTER TABLE ab1 ALTER a SET STATISTICS -1; -- setting statistics target 0 skips the statistics, without printing any message, so check catalog ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; +\d ab1 + Table "public.ab1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Statistics objects: + "public"."ab1_a_b_stats" (ndistinct, dependencies, mcv) ON a, b FROM ab1; STATISTICS 0 + ANALYZE ab1; SELECT stxname, stxdndistinct, stxddependencies, stxdmcv FROM pg_statistic_ext s, pg_statistic_ext_data d @@ -113,6 +122,15 @@ SELECT stxname, stxdndistinct, stxddependencies, stxdmcv (1 row) ALTER STATISTICS ab1_a_b_stats SET STATISTICS -1; +\d+ ab1 + Table "public.ab1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | + b | integer | | | | plain | | +Statistics objects: + "public"."ab1_a_b_stats" (ndistinct, dependencies, mcv) ON a, b FROM ab1 + -- partial analyze doesn't build stats either ANALYZE ab1 (a); WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1" diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out index b81923f2e7410..9d56cdacf37db 100644 --- a/src/test/regress/expected/subselect.out +++ b/src/test/regress/expected/subselect.out @@ -874,6 +874,53 @@ select * from int8_tbl where q1 in (select c1 from inner_text); (2 rows) rollback; -- to get rid of the bogus operator +-- +-- Test resolution of hashed vs non-hashed implementation of EXISTS subplan +-- +explain (costs off) +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0); + QUERY PLAN +-------------------------------------------------------------- + Aggregate + -> Seq Scan on tenk1 t + Filter: ((hashed SubPlan 2) OR (ten < 0)) + SubPlan 2 + -> Index Only Scan using tenk1_unique1 on tenk1 k +(5 rows) + +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0); + count +------- + 10000 +(1 row) + +explain (costs off) +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0) + and thousand = 1; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on tenk1 t + Recheck Cond: (thousand = 1) + Filter: ((SubPlan 1) OR (ten < 0)) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand = 1) + SubPlan 1 + -> Index Only Scan using tenk1_unique1 on tenk1 k + Index Cond: (unique1 = t.unique2) +(9 rows) + +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0) + and thousand = 1; + count +------- + 10 +(1 row) + -- -- Test case for planner bug with nested EXISTS handling -- diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out index 1cffc3349d602..81bdacf59daa5 100644 --- a/src/test/regress/expected/sysviews.out +++ b/src/test/regress/expected/sysviews.out @@ -76,6 +76,13 @@ select count(*) >= 0 as ok from pg_prepared_xacts; t (1 row) +-- There must be only one record +select count(*) = 1 as ok from pg_stat_wal; + ok +---- + t +(1 row) + -- This is to record the prevailing planner enable_foo settings during -- a regression test run. select name, setting from pg_settings where name like 'enable%'; diff --git a/src/test/regress/expected/timestamp.out b/src/test/regress/expected/timestamp.out index 5f97505a30744..96551160901da 100644 --- a/src/test/regress/expected/timestamp.out +++ b/src/test/regress/expected/timestamp.out @@ -1704,9 +1704,18 @@ SELECT '' AS to_char_12, to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff (4 rows) -- timestamp numeric fields constructor -SELECT make_timestamp(2014,12,28,6,30,45.887); +SELECT make_timestamp(2014, 12, 28, 6, 30, 45.887); make_timestamp ------------------------------ Sun Dec 28 06:30:45.887 2014 (1 row) +SELECT make_timestamp(-44, 3, 15, 12, 30, 15); + make_timestamp +----------------------------- + Fri Mar 15 12:30:15 0044 BC +(1 row) + +-- should fail +select make_timestamp(0, 7, 15, 12, 30, 15); +ERROR: date field value out of range: 0-07-15 diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out index 5e76b3a47e7da..57efa290207a2 100644 --- a/src/test/regress/expected/triggers.out +++ b/src/test/regress/expected/triggers.out @@ -2512,6 +2512,62 @@ select tgrelid::regclass, count(*) from pg_trigger (5 rows) drop table trg_clone; +-- Test the interaction between ALTER TABLE .. DISABLE TRIGGER and +-- both kinds of inheritance. Historically, legacy inheritance has +-- not recursed to children, so that behavior is preserved. +create table parent (a int); +create table child1 () inherits (parent); +create function trig_nothing() returns trigger language plpgsql + as $$ begin return null; end $$; +create trigger tg after insert on parent + for each row execute function trig_nothing(); +create trigger tg after insert on child1 + for each row execute function trig_nothing(); +alter table parent disable trigger tg; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; + tgrelid | tgname | tgenabled +---------+--------+----------- + child1 | tg | O + parent | tg | D +(2 rows) + +alter table only parent enable always trigger tg; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; + tgrelid | tgname | tgenabled +---------+--------+----------- + child1 | tg | O + parent | tg | A +(2 rows) + +drop table parent, child1; +create table parent (a int) partition by list (a); +create table child1 partition of parent for values in (1); +create trigger tg after insert on parent + for each row execute procedure trig_nothing(); +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; + tgrelid | tgname | tgenabled +---------+--------+----------- + child1 | tg | O + parent | tg | O +(2 rows) + +alter table only parent enable always trigger tg; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; + tgrelid | tgname | tgenabled +---------+--------+----------- + child1 | tg | O + parent | tg | A +(2 rows) + +drop table parent, child1; -- -- Test the interaction between transition tables and both kinds of -- inheritance. We'll dump the contents of the transition tables in a diff --git a/src/test/regress/expected/updatable_views.out b/src/test/regress/expected/updatable_views.out index 5de53f2782aaf..caed1c19ec76c 100644 --- a/src/test/regress/expected/updatable_views.out +++ b/src/test/regress/expected/updatable_views.out @@ -1869,9 +1869,7 @@ EXPLAIN (costs off) INSERT INTO rw_view1 VALUES (5); SubPlan 1 -> Index Only Scan using ref_tbl_pkey on ref_tbl r Index Cond: (a = b.a) - SubPlan 2 - -> Seq Scan on ref_tbl r_1 -(7 rows) +(5 rows) EXPLAIN (costs off) UPDATE rw_view1 SET a = a + 5; QUERY PLAN @@ -1885,9 +1883,7 @@ EXPLAIN (costs off) UPDATE rw_view1 SET a = a + 5; SubPlan 1 -> Index Only Scan using ref_tbl_pkey on ref_tbl r_1 Index Cond: (a = b.a) - SubPlan 2 - -> Seq Scan on ref_tbl r_2 -(11 rows) +(9 rows) DROP TABLE base_tbl, ref_tbl CASCADE; NOTICE: drop cascades to view rw_view1 @@ -2301,8 +2297,8 @@ SELECT * FROM v1 WHERE a=8; EXPLAIN (VERBOSE, COSTS OFF) UPDATE v1 SET a=100 WHERE snoop(a) AND leakproof(a) AND a < 7 AND a != 6; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------- Update on public.t1 Update on public.t1 Update on public.t11 t1_1 @@ -2311,32 +2307,26 @@ UPDATE v1 SET a=100 WHERE snoop(a) AND leakproof(a) AND a < 7 AND a != 6; -> Index Scan using t1_a_idx on public.t1 Output: 100, t1.b, t1.c, t1.ctid Index Cond: ((t1.a > 5) AND (t1.a < 7)) - Filter: ((t1.a <> 6) AND (alternatives: SubPlan 1 or hashed SubPlan 2) AND snoop(t1.a) AND leakproof(t1.a)) + Filter: ((t1.a <> 6) AND (SubPlan 1) AND snoop(t1.a) AND leakproof(t1.a)) SubPlan 1 -> Append -> Seq Scan on public.t12 t12_1 Filter: (t12_1.a = t1.a) -> Seq Scan on public.t111 t12_2 Filter: (t12_2.a = t1.a) - SubPlan 2 - -> Append - -> Seq Scan on public.t12 t12_4 - Output: t12_4.a - -> Seq Scan on public.t111 t12_5 - Output: t12_5.a -> Index Scan using t11_a_idx on public.t11 t1_1 Output: 100, t1_1.b, t1_1.c, t1_1.d, t1_1.ctid Index Cond: ((t1_1.a > 5) AND (t1_1.a < 7)) - Filter: ((t1_1.a <> 6) AND (alternatives: SubPlan 1 or hashed SubPlan 2) AND snoop(t1_1.a) AND leakproof(t1_1.a)) + Filter: ((t1_1.a <> 6) AND (SubPlan 1) AND snoop(t1_1.a) AND leakproof(t1_1.a)) -> Index Scan using t12_a_idx on public.t12 t1_2 Output: 100, t1_2.b, t1_2.c, t1_2.e, t1_2.ctid Index Cond: ((t1_2.a > 5) AND (t1_2.a < 7)) - Filter: ((t1_2.a <> 6) AND (alternatives: SubPlan 1 or hashed SubPlan 2) AND snoop(t1_2.a) AND leakproof(t1_2.a)) + Filter: ((t1_2.a <> 6) AND (SubPlan 1) AND snoop(t1_2.a) AND leakproof(t1_2.a)) -> Index Scan using t111_a_idx on public.t111 t1_3 Output: 100, t1_3.b, t1_3.c, t1_3.d, t1_3.e, t1_3.ctid Index Cond: ((t1_3.a > 5) AND (t1_3.a < 7)) - Filter: ((t1_3.a <> 6) AND (alternatives: SubPlan 1 or hashed SubPlan 2) AND snoop(t1_3.a) AND leakproof(t1_3.a)) -(33 rows) + Filter: ((t1_3.a <> 6) AND (SubPlan 1) AND snoop(t1_3.a) AND leakproof(t1_3.a)) +(27 rows) UPDATE v1 SET a=100 WHERE snoop(a) AND leakproof(a) AND a < 7 AND a != 6; SELECT * FROM v1 WHERE a=100; -- Nothing should have been changed to 100 @@ -2351,8 +2341,8 @@ SELECT * FROM t1 WHERE a=100; -- Nothing should have been changed to 100 EXPLAIN (VERBOSE, COSTS OFF) UPDATE v1 SET a=a+1 WHERE snoop(a) AND leakproof(a) AND a = 8; - QUERY PLAN ---------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------- Update on public.t1 Update on public.t1 Update on public.t11 t1_1 @@ -2361,32 +2351,26 @@ UPDATE v1 SET a=a+1 WHERE snoop(a) AND leakproof(a) AND a = 8; -> Index Scan using t1_a_idx on public.t1 Output: (t1.a + 1), t1.b, t1.c, t1.ctid Index Cond: ((t1.a > 5) AND (t1.a = 8)) - Filter: ((alternatives: SubPlan 1 or hashed SubPlan 2) AND snoop(t1.a) AND leakproof(t1.a)) + Filter: ((SubPlan 1) AND snoop(t1.a) AND leakproof(t1.a)) SubPlan 1 -> Append -> Seq Scan on public.t12 t12_1 Filter: (t12_1.a = t1.a) -> Seq Scan on public.t111 t12_2 Filter: (t12_2.a = t1.a) - SubPlan 2 - -> Append - -> Seq Scan on public.t12 t12_4 - Output: t12_4.a - -> Seq Scan on public.t111 t12_5 - Output: t12_5.a -> Index Scan using t11_a_idx on public.t11 t1_1 Output: (t1_1.a + 1), t1_1.b, t1_1.c, t1_1.d, t1_1.ctid Index Cond: ((t1_1.a > 5) AND (t1_1.a = 8)) - Filter: ((alternatives: SubPlan 1 or hashed SubPlan 2) AND snoop(t1_1.a) AND leakproof(t1_1.a)) + Filter: ((SubPlan 1) AND snoop(t1_1.a) AND leakproof(t1_1.a)) -> Index Scan using t12_a_idx on public.t12 t1_2 Output: (t1_2.a + 1), t1_2.b, t1_2.c, t1_2.e, t1_2.ctid Index Cond: ((t1_2.a > 5) AND (t1_2.a = 8)) - Filter: ((alternatives: SubPlan 1 or hashed SubPlan 2) AND snoop(t1_2.a) AND leakproof(t1_2.a)) + Filter: ((SubPlan 1) AND snoop(t1_2.a) AND leakproof(t1_2.a)) -> Index Scan using t111_a_idx on public.t111 t1_3 Output: (t1_3.a + 1), t1_3.b, t1_3.c, t1_3.d, t1_3.e, t1_3.ctid Index Cond: ((t1_3.a > 5) AND (t1_3.a = 8)) - Filter: ((alternatives: SubPlan 1 or hashed SubPlan 2) AND snoop(t1_3.a) AND leakproof(t1_3.a)) -(33 rows) + Filter: ((SubPlan 1) AND snoop(t1_3.a) AND leakproof(t1_3.a)) +(27 rows) UPDATE v1 SET a=a+1 WHERE snoop(a) AND leakproof(a) AND a = 8; NOTICE: snooped value: 8 diff --git a/src/test/regress/expected/window.out b/src/test/regress/expected/window.out index 13c91c9916fa9..21c6cac491f1c 100644 --- a/src/test/regress/expected/window.out +++ b/src/test/regress/expected/window.out @@ -3200,6 +3200,50 @@ FROM empsalary; -> Seq Scan on empsalary (5 rows) +-- Test incremental sorting +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + empno, + salary, + enroll_date, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp + FROM empsalary) emp +WHERE first_emp = 1 OR last_emp = 1; + QUERY PLAN +----------------------------------------------------------------------------------- + Subquery Scan on emp + Filter: ((emp.first_emp = 1) OR (emp.last_emp = 1)) + -> WindowAgg + -> Incremental Sort + Sort Key: empsalary.depname, empsalary.enroll_date + Presorted Key: empsalary.depname + -> WindowAgg + -> Sort + Sort Key: empsalary.depname, empsalary.enroll_date DESC + -> Seq Scan on empsalary +(10 rows) + +SELECT * FROM + (SELECT depname, + empno, + salary, + enroll_date, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp + FROM empsalary) emp +WHERE first_emp = 1 OR last_emp = 1; + depname | empno | salary | enroll_date | first_emp | last_emp +-----------+-------+--------+-------------+-----------+---------- + develop | 8 | 6000 | 10-01-2006 | 1 | 5 + develop | 7 | 4200 | 01-01-2008 | 5 | 1 + personnel | 2 | 3900 | 12-23-2006 | 1 | 2 + personnel | 5 | 3500 | 12-10-2007 | 2 | 1 + sales | 1 | 5000 | 10-01-2006 | 1 | 3 + sales | 4 | 4800 | 08-08-2007 | 3 | 1 +(6 rows) + -- cleanup DROP TABLE empsalary; -- test user-defined window function with named args and default args diff --git a/src/test/regress/expected/with.out b/src/test/regress/expected/with.out index 67eaeb4f3ed4a..457f3bf04fa81 100644 --- a/src/test/regress/expected/with.out +++ b/src/test/regress/expected/with.out @@ -579,79 +579,79 @@ insert into graph values (1, 4, 'arc 1 -> 4'), (4, 5, 'arc 4 -> 5'), (5, 1, 'arc 5 -> 1'); -with recursive search_graph(f, t, label, path, cycle) as ( - select *, array[row(g.f, g.t)], false from graph g +with recursive search_graph(f, t, label, is_cycle, path) as ( + select *, false, array[row(g.f, g.t)] from graph g union all - select g.*, path || row(g.f, g.t), row(g.f, g.t) = any(path) + select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) from graph g, search_graph sg - where g.f = sg.t and not cycle + where g.f = sg.t and not is_cycle ) select * from search_graph; - f | t | label | path | cycle ----+---+------------+-------------------------------------------+------- - 1 | 2 | arc 1 -> 2 | {"(1,2)"} | f - 1 | 3 | arc 1 -> 3 | {"(1,3)"} | f - 2 | 3 | arc 2 -> 3 | {"(2,3)"} | f - 1 | 4 | arc 1 -> 4 | {"(1,4)"} | f - 4 | 5 | arc 4 -> 5 | {"(4,5)"} | f - 5 | 1 | arc 5 -> 1 | {"(5,1)"} | f - 1 | 2 | arc 1 -> 2 | {"(5,1)","(1,2)"} | f - 1 | 3 | arc 1 -> 3 | {"(5,1)","(1,3)"} | f - 1 | 4 | arc 1 -> 4 | {"(5,1)","(1,4)"} | f - 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"} | f - 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"} | f - 5 | 1 | arc 5 -> 1 | {"(4,5)","(5,1)"} | f - 1 | 2 | arc 1 -> 2 | {"(4,5)","(5,1)","(1,2)"} | f - 1 | 3 | arc 1 -> 3 | {"(4,5)","(5,1)","(1,3)"} | f - 1 | 4 | arc 1 -> 4 | {"(4,5)","(5,1)","(1,4)"} | f - 2 | 3 | arc 2 -> 3 | {"(5,1)","(1,2)","(2,3)"} | f - 4 | 5 | arc 4 -> 5 | {"(5,1)","(1,4)","(4,5)"} | f - 5 | 1 | arc 5 -> 1 | {"(1,4)","(4,5)","(5,1)"} | f - 1 | 2 | arc 1 -> 2 | {"(1,4)","(4,5)","(5,1)","(1,2)"} | f - 1 | 3 | arc 1 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,3)"} | f - 1 | 4 | arc 1 -> 4 | {"(1,4)","(4,5)","(5,1)","(1,4)"} | t - 2 | 3 | arc 2 -> 3 | {"(4,5)","(5,1)","(1,2)","(2,3)"} | f - 4 | 5 | arc 4 -> 5 | {"(4,5)","(5,1)","(1,4)","(4,5)"} | t - 5 | 1 | arc 5 -> 1 | {"(5,1)","(1,4)","(4,5)","(5,1)"} | t - 2 | 3 | arc 2 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} | f + f | t | label | is_cycle | path +---+---+------------+----------+------------------------------------------- + 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} + 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} (25 rows) -- ordering by the path column has same effect as SEARCH DEPTH FIRST -with recursive search_graph(f, t, label, path, cycle) as ( - select *, array[row(g.f, g.t)], false from graph g +with recursive search_graph(f, t, label, is_cycle, path) as ( + select *, false, array[row(g.f, g.t)] from graph g union all - select g.*, path || row(g.f, g.t), row(g.f, g.t) = any(path) + select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) from graph g, search_graph sg - where g.f = sg.t and not cycle + where g.f = sg.t and not is_cycle ) select * from search_graph order by path; - f | t | label | path | cycle ----+---+------------+-------------------------------------------+------- - 1 | 2 | arc 1 -> 2 | {"(1,2)"} | f - 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"} | f - 1 | 3 | arc 1 -> 3 | {"(1,3)"} | f - 1 | 4 | arc 1 -> 4 | {"(1,4)"} | f - 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"} | f - 5 | 1 | arc 5 -> 1 | {"(1,4)","(4,5)","(5,1)"} | f - 1 | 2 | arc 1 -> 2 | {"(1,4)","(4,5)","(5,1)","(1,2)"} | f - 2 | 3 | arc 2 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} | f - 1 | 3 | arc 1 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,3)"} | f - 1 | 4 | arc 1 -> 4 | {"(1,4)","(4,5)","(5,1)","(1,4)"} | t - 2 | 3 | arc 2 -> 3 | {"(2,3)"} | f - 4 | 5 | arc 4 -> 5 | {"(4,5)"} | f - 5 | 1 | arc 5 -> 1 | {"(4,5)","(5,1)"} | f - 1 | 2 | arc 1 -> 2 | {"(4,5)","(5,1)","(1,2)"} | f - 2 | 3 | arc 2 -> 3 | {"(4,5)","(5,1)","(1,2)","(2,3)"} | f - 1 | 3 | arc 1 -> 3 | {"(4,5)","(5,1)","(1,3)"} | f - 1 | 4 | arc 1 -> 4 | {"(4,5)","(5,1)","(1,4)"} | f - 4 | 5 | arc 4 -> 5 | {"(4,5)","(5,1)","(1,4)","(4,5)"} | t - 5 | 1 | arc 5 -> 1 | {"(5,1)"} | f - 1 | 2 | arc 1 -> 2 | {"(5,1)","(1,2)"} | f - 2 | 3 | arc 2 -> 3 | {"(5,1)","(1,2)","(2,3)"} | f - 1 | 3 | arc 1 -> 3 | {"(5,1)","(1,3)"} | f - 1 | 4 | arc 1 -> 4 | {"(5,1)","(1,4)"} | f - 4 | 5 | arc 4 -> 5 | {"(5,1)","(1,4)","(4,5)"} | f - 5 | 1 | arc 5 -> 1 | {"(5,1)","(1,4)","(4,5)","(5,1)"} | t + f | t | label | is_cycle | path +---+---+------------+----------+------------------------------------------- + 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} + 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} + 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} + 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} + 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} (25 rows) -- diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index 026ea880cde3a..ae89ed7f0b40b 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -55,7 +55,7 @@ test: create_index create_index_spgist create_view index_including index_includi # ---------- # Another group of parallel tests # ---------- -test: create_aggregate create_function_3 create_cast constraints triggers select inherit typed_table vacuum drop_if_exists updatable_views roleattributes create_am hash_func errors +test: create_aggregate create_function_3 create_cast constraints triggers select inherit typed_table vacuum drop_if_exists updatable_views roleattributes create_am hash_func errors infinite_recurse # ---------- # sanity_check does a vacuum, affecting the sort order of SELECT * diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index 74fd026856eab..23d7d0beb2e8d 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -566,7 +566,7 @@ convert_sourcefiles_in(const char *source_subdir, const char *dest_dir, const ch initStringInfo(&line); - while (pg_get_line_append(infile, &line)) + while (pg_get_line_buf(infile, &line)) { replace_string(&line, "@abs_srcdir@", inputdir); replace_string(&line, "@abs_builddir@", outputdir); @@ -574,7 +574,6 @@ convert_sourcefiles_in(const char *source_subdir, const char *dest_dir, const ch replace_string(&line, "@libdir@", dlpath); replace_string(&line, "@DLSUFFIX@", DLSUFFIX); fputs(line.data, outfile); - resetStringInfo(&line); } pfree(line.data); diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c index 02397f2eb1047..09bc42a8c0f2f 100644 --- a/src/test/regress/regress.c +++ b/src/test/regress/regress.c @@ -720,6 +720,14 @@ test_atomic_uint32(void) EXPECT_EQ_U32(pg_atomic_read_u32(&var), (uint32) INT_MAX + 1); EXPECT_EQ_U32(pg_atomic_sub_fetch_u32(&var, INT_MAX), 1); pg_atomic_sub_fetch_u32(&var, 1); + expected = PG_INT16_MAX; + EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1)); + expected = PG_INT16_MAX + 1; + EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1)); + expected = PG_INT16_MIN; + EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1)); + expected = PG_INT16_MIN - 1; + EXPECT_TRUE(!pg_atomic_compare_exchange_u32(&var, &expected, 1)); /* fail exchange because of old expected */ expected = 10; diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule index 979d9261197d3..525bdc804f612 100644 --- a/src/test/regress/serial_schedule +++ b/src/test/regress/serial_schedule @@ -83,6 +83,7 @@ test: roleattributes test: create_am test: hash_func test: errors +test: infinite_recurse test: sanity_check test: select_into test: select_distinct diff --git a/src/test/regress/sql/copy2.sql b/src/test/regress/sql/copy2.sql index 902f4fac19a42..b3c16af48eec8 100644 --- a/src/test/regress/sql/copy2.sql +++ b/src/test/regress/sql/copy2.sql @@ -53,6 +53,20 @@ COPY x (a, b, c, d, e) from stdin; -- non-existent column in column list: should fail COPY x (xyz) from stdin; +-- redundant options +COPY x from stdin (format CSV, FORMAT CSV); +COPY x from stdin (freeze off, freeze on); +COPY x from stdin (delimiter ',', delimiter ','); +COPY x from stdin (null ' ', null ' '); +COPY x from stdin (header off, header on); +COPY x from stdin (quote ':', quote ':'); +COPY x from stdin (escape ':', escape ':'); +COPY x from stdin (force_quote (a), force_quote *); +COPY x from stdin (force_not_null (a), force_not_null (b)); +COPY x from stdin (force_null (a), force_null (b)); +COPY x from stdin (convert_selectively (a), convert_selectively (b)); +COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii'); + -- too many columns in column list: should fail COPY x (a, b, c, d, e, d, c) from stdin; diff --git a/src/test/regress/sql/create_operator.sql b/src/test/regress/sql/create_operator.sql index 8b6fd0bb43d62..4ff2c0ff21678 100644 --- a/src/test/regress/sql/create_operator.sql +++ b/src/test/regress/sql/create_operator.sql @@ -18,18 +18,13 @@ CREATE OPERATOR <% ( ); CREATE OPERATOR @#@ ( - rightarg = int8, -- left unary - procedure = numeric_fac -); - -CREATE OPERATOR #@# ( - leftarg = int8, -- right unary - procedure = numeric_fac + rightarg = int8, -- prefix + procedure = factorial ); CREATE OPERATOR #%# ( - leftarg = int8, -- right unary - procedure = numeric_fac + leftarg = int8, -- fail, postfix is no longer supported + procedure = factorial ); -- Test operator created above @@ -37,12 +32,19 @@ SELECT point '(1,2)' <% widget '(0,0,3)' AS t, point '(1,2)' <% widget '(0,0,1)' AS f; -- Test comments -COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad right unary'; +COMMENT ON OPERATOR ###### (NONE, int4) IS 'bad prefix'; +COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad postfix'; +COMMENT ON OPERATOR ###### (int4, int8) IS 'bad infix'; --- => is disallowed now +-- Check that DROP on a nonexistent op behaves sanely, too +DROP OPERATOR ###### (NONE, int4); +DROP OPERATOR ###### (int4, NONE); +DROP OPERATOR ###### (int4, int8); + +-- => is disallowed as an operator name now CREATE OPERATOR => ( - leftarg = int8, -- right unary - procedure = numeric_fac + rightarg = int8, + procedure = factorial ); -- lexing of <=, >=, <>, != has a number of edge cases @@ -50,10 +52,12 @@ CREATE OPERATOR => ( -- this is legal because ! is not allowed in sql ops CREATE OPERATOR !=- ( - leftarg = int8, -- right unary - procedure = numeric_fac + rightarg = int8, + procedure = factorial ); -SELECT 2 !=-; +SELECT !=- 10; +-- postfix operators don't work anymore +SELECT 10 !=-; -- make sure lexer returns != as <> even in edge cases SELECT 2 !=/**/ 1, 2 !=/**/ 2; SELECT 2 !=-- comment to be removed by psql @@ -84,8 +88,8 @@ GRANT USAGE ON SCHEMA schema_op1 TO PUBLIC; REVOKE USAGE ON SCHEMA schema_op1 FROM regress_rol_op1; SET ROLE regress_rol_op1; CREATE OPERATOR schema_op1.#*# ( - leftarg = int8, -- right unary - procedure = numeric_fac + rightarg = int8, + procedure = factorial ); ROLLBACK; @@ -94,7 +98,7 @@ ROLLBACK; BEGIN TRANSACTION; CREATE OPERATOR #*# ( leftarg = SETOF int8, - procedure = numeric_fac + procedure = factorial ); ROLLBACK; @@ -103,7 +107,7 @@ ROLLBACK; BEGIN TRANSACTION; CREATE OPERATOR #*# ( rightarg = SETOF int8, - procedure = numeric_fac + procedure = factorial ); ROLLBACK; @@ -128,19 +132,19 @@ ROLLBACK; -- Should fail. Invalid attribute CREATE OPERATOR #@%# ( - leftarg = int8, -- right unary - procedure = numeric_fac, + rightarg = int8, + procedure = factorial, invalid_att = int8 ); --- Should fail. At least leftarg or rightarg should be mandatorily specified +-- Should fail. At least rightarg should be mandatorily specified CREATE OPERATOR #@%# ( - procedure = numeric_fac + procedure = factorial ); -- Should fail. Procedure should be mandatorily specified CREATE OPERATOR #@%# ( - leftarg = int8 + rightarg = int8 ); -- Should fail. CREATE OPERATOR requires USAGE on TYPE diff --git a/src/test/regress/sql/create_procedure.sql b/src/test/regress/sql/create_procedure.sql index 89b96d580ffa4..2ef1c82ceabe2 100644 --- a/src/test/regress/sql/create_procedure.sql +++ b/src/test/regress/sql/create_procedure.sql @@ -112,6 +112,18 @@ $$; CALL ptest7(least('a', 'b'), 'a'); +-- OUT parameters + +CREATE PROCEDURE ptest9(OUT a int) +LANGUAGE SQL +AS $$ +INSERT INTO cp_test VALUES (1, 'a'); +SELECT 1; +$$; + +CALL ptest9(NULL); + + -- various error cases CALL version(); -- error: not a procedure @@ -119,7 +131,6 @@ CALL sum(1); -- error: not a procedure CREATE PROCEDURE ptestx() LANGUAGE SQL WINDOW AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; CREATE PROCEDURE ptestx() LANGUAGE SQL STRICT AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; -CREATE PROCEDURE ptestx(OUT a int) LANGUAGE SQL AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; ALTER PROCEDURE ptest1(text) STRICT; ALTER FUNCTION ptest1(text) VOLATILE; -- error: not a function diff --git a/src/test/regress/sql/create_table.sql b/src/test/regress/sql/create_table.sql index 9b1adcb8adda9..cee822aa8b6ee 100644 --- a/src/test/regress/sql/create_table.sql +++ b/src/test/regress/sql/create_table.sql @@ -549,7 +549,6 @@ CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(so CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(1)); CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((select 1)); CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (generate_series(4, 6)); -CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ('1' collate "POSIX"); CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((1+1) collate "POSIX"); -- syntax does not allow empty list of values for list partitions @@ -802,6 +801,14 @@ insert into parted_notnull_inh_test (b) values (null); \d parted_notnull_inh_test1 drop table parted_notnull_inh_test; +-- check that collations are assigned in partition bound expressions +create table parted_boolean_col (a bool, b text) partition by list(a); +create table parted_boolean_less partition of parted_boolean_col + for values in ('foo' < 'bar'); +create table parted_boolean_greater partition of parted_boolean_col + for values in ('foo' > 'bar'); +drop table parted_boolean_col; + -- check for a conflicting COLLATE clause create table parted_collate_must_match (a text collate "C", b text collate "C") partition by range (a); @@ -813,23 +820,16 @@ create table parted_collate_must_match2 partition of parted_collate_must_match (b collate "POSIX") for values from ('m') to ('z'); drop table parted_collate_must_match; --- check that specifying incompatible collations for partition bound --- expressions fails promptly +-- check that non-matching collations for partition bound +-- expressions are coerced to the right collation create table test_part_coll_posix (a text) partition by range (a collate "POSIX"); --- fail +-- ok, collation is implicitly coerced create table test_part_coll partition of test_part_coll_posix for values from ('a' collate "C") to ('g'); -- ok -create table test_part_coll partition of test_part_coll_posix for values from ('a' collate "POSIX") to ('g'); --- ok create table test_part_coll2 partition of test_part_coll_posix for values from ('g') to ('m'); - --- using a cast expression uses the target type's default collation - --- fail +-- ok, collation is implicitly coerced create table test_part_coll_cast partition of test_part_coll_posix for values from (name 'm' collate "C") to ('s'); --- ok -create table test_part_coll_cast partition of test_part_coll_posix for values from (name 'm' collate "POSIX") to ('s'); -- ok; partition collation silently overrides the default collation of type 'name' create table test_part_coll_cast2 partition of test_part_coll_posix for values from (name 's') to ('z'); diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql index f0a8a56b76fad..e484bac0a4613 100644 --- a/src/test/regress/sql/create_table_like.sql +++ b/src/test/regress/sql/create_table_like.sql @@ -163,6 +163,11 @@ SELECT s.stxname, objsubid, description FROM pg_description, pg_statistic_ext s CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4); CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1); +-- Check that LIKE isn't confused by a system catalog of the same name +CREATE TABLE pg_attrdef (LIKE ctlt1 INCLUDING ALL); +\d+ public.pg_attrdef +DROP TABLE public.pg_attrdef; + DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE; -- LIKE must respect NO INHERIT property of constraints diff --git a/src/test/regress/sql/date.sql b/src/test/regress/sql/date.sql index 1c3adf70ced7f..7a734fb1a0562 100644 --- a/src/test/regress/sql/date.sql +++ b/src/test/regress/sql/date.sql @@ -20,12 +20,13 @@ INSERT INTO DATE_TBL VALUES ('2000-04-03'); INSERT INTO DATE_TBL VALUES ('2038-04-08'); INSERT INTO DATE_TBL VALUES ('2039-04-09'); INSERT INTO DATE_TBL VALUES ('2040-04-10'); +INSERT INTO DATE_TBL VALUES ('2040-04-10 BC'); -SELECT f1 AS "Fifteen" FROM DATE_TBL; +SELECT f1 FROM DATE_TBL; -SELECT f1 AS "Nine" FROM DATE_TBL WHERE f1 < '2000-01-01'; +SELECT f1 FROM DATE_TBL WHERE f1 < '2000-01-01'; -SELECT f1 AS "Three" FROM DATE_TBL +SELECT f1 FROM DATE_TBL WHERE f1 BETWEEN '2000-01-01' AND '2001-01-01'; -- @@ -218,6 +219,23 @@ SELECT date 'tomorrow' - date 'yesterday' AS "Two days"; -- -- test extract! -- +SELECT f1 as "date", + date_part('year', f1) AS year, + date_part('month', f1) AS month, + date_part('day', f1) AS day, + date_part('quarter', f1) AS quarter, + date_part('decade', f1) AS decade, + date_part('century', f1) AS century, + date_part('millennium', f1) AS millennium, + date_part('isoyear', f1) AS isoyear, + date_part('week', f1) AS week, + date_part('dow', f1) AS dow, + date_part('isodow', f1) AS isodow, + date_part('doy', f1) AS doy, + date_part('julian', f1) AS julian, + date_part('epoch', f1) AS epoch + FROM date_tbl; +-- -- epoch -- SELECT EXTRACT(EPOCH FROM DATE '1970-01-01'); -- 0 @@ -264,6 +282,31 @@ SELECT EXTRACT(DECADE FROM DATE '0012-12-31 BC'); -- -2 SELECT EXTRACT(CENTURY FROM NOW())>=21 AS True; -- true SELECT EXTRACT(CENTURY FROM TIMESTAMP '1970-03-20 04:30:00.00000'); -- 20 -- +-- all possible fields +-- +SELECT EXTRACT(MICROSECONDS FROM DATE '2020-08-11'); +SELECT EXTRACT(MILLISECONDS FROM DATE '2020-08-11'); +SELECT EXTRACT(SECOND FROM DATE '2020-08-11'); +SELECT EXTRACT(MINUTE FROM DATE '2020-08-11'); +SELECT EXTRACT(HOUR FROM DATE '2020-08-11'); +SELECT EXTRACT(DAY FROM DATE '2020-08-11'); +SELECT EXTRACT(MONTH FROM DATE '2020-08-11'); +SELECT EXTRACT(YEAR FROM DATE '2020-08-11'); +SELECT EXTRACT(DECADE FROM DATE '2020-08-11'); +SELECT EXTRACT(CENTURY FROM DATE '2020-08-11'); +SELECT EXTRACT(MILLENNIUM FROM DATE '2020-08-11'); +SELECT EXTRACT(ISOYEAR FROM DATE '2020-08-11'); +SELECT EXTRACT(QUARTER FROM DATE '2020-08-11'); +SELECT EXTRACT(WEEK FROM DATE '2020-08-11'); +SELECT EXTRACT(DOW FROM DATE '2020-08-11'); +SELECT EXTRACT(ISODOW FROM DATE '2020-08-11'); +SELECT EXTRACT(DOY FROM DATE '2020-08-11'); +SELECT EXTRACT(TIMEZONE FROM DATE '2020-08-11'); +SELECT EXTRACT(TIMEZONE_M FROM DATE '2020-08-11'); +SELECT EXTRACT(TIMEZONE_H FROM DATE '2020-08-11'); +SELECT EXTRACT(EPOCH FROM DATE '2020-08-11'); +SELECT EXTRACT(JULIAN FROM DATE '2020-08-11'); +-- -- test trunc function! -- SELECT DATE_TRUNC('MILLENNIUM', TIMESTAMP '1970-03-20 04:30:00.00000'); -- 1001 @@ -335,6 +378,7 @@ select make_date(2013, 7, 15); select make_date(-44, 3, 15); select make_time(8, 20, 0.0); -- should fail +select make_date(0, 7, 15); select make_date(2013, 2, 30); select make_date(2013, 13, 1); select make_date(2013, 11, -1); diff --git a/src/test/regress/sql/errors.sql b/src/test/regress/sql/errors.sql index 86b672538a17a..66a56b28f62b8 100644 --- a/src/test/regress/sql/errors.sql +++ b/src/test/regress/sql/errors.sql @@ -364,12 +364,3 @@ INT4 UNIQUE NOT NULL); - --- Check that stack depth detection mechanism works and --- max_stack_depth is not set too high. The full error report is not --- very stable, so show only SQLSTATE and primary error message. -create function infinite_recurse() returns int as -'select infinite_recurse()' language sql; -\set VERBOSITY sqlstate -select infinite_recurse(); -\echo :LAST_ERROR_MESSAGE diff --git a/src/test/regress/sql/hash_func.sql b/src/test/regress/sql/hash_func.sql index b7ce8b21a3a08..a3e2decc2c4b7 100644 --- a/src/test/regress/sql/hash_func.sql +++ b/src/test/regress/sql/hash_func.sql @@ -14,66 +14,66 @@ WHERE hashint2(v)::bit(32) != hashint2extended(v, 0)::bit(32) OR hashint2(v)::bit(32) = hashint2extended(v, 1)::bit(32); SELECT v as value, hashint4(v)::bit(32) as standard, - hashint4extended(v, 0)::bit(32) as extended0, - hashint4extended(v, 1)::bit(32) as extended1 + hashint4extended(v, 0)::bit(32) as extended0, + hashint4extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashint4(v)::bit(32) != hashint4extended(v, 0)::bit(32) OR hashint4(v)::bit(32) = hashint4extended(v, 1)::bit(32); SELECT v as value, hashint8(v)::bit(32) as standard, - hashint8extended(v, 0)::bit(32) as extended0, - hashint8extended(v, 1)::bit(32) as extended1 + hashint8extended(v, 0)::bit(32) as extended0, + hashint8extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashint8(v)::bit(32) != hashint8extended(v, 0)::bit(32) OR hashint8(v)::bit(32) = hashint8extended(v, 1)::bit(32); SELECT v as value, hashfloat4(v)::bit(32) as standard, - hashfloat4extended(v, 0)::bit(32) as extended0, - hashfloat4extended(v, 1)::bit(32) as extended1 + hashfloat4extended(v, 0)::bit(32) as extended0, + hashfloat4extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashfloat4(v)::bit(32) != hashfloat4extended(v, 0)::bit(32) OR hashfloat4(v)::bit(32) = hashfloat4extended(v, 1)::bit(32); SELECT v as value, hashfloat8(v)::bit(32) as standard, - hashfloat8extended(v, 0)::bit(32) as extended0, - hashfloat8extended(v, 1)::bit(32) as extended1 + hashfloat8extended(v, 0)::bit(32) as extended0, + hashfloat8extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashfloat8(v)::bit(32) != hashfloat8extended(v, 0)::bit(32) OR hashfloat8(v)::bit(32) = hashfloat8extended(v, 1)::bit(32); SELECT v as value, hashoid(v)::bit(32) as standard, - hashoidextended(v, 0)::bit(32) as extended0, - hashoidextended(v, 1)::bit(32) as extended1 + hashoidextended(v, 0)::bit(32) as extended0, + hashoidextended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) WHERE hashoid(v)::bit(32) != hashoidextended(v, 0)::bit(32) OR hashoid(v)::bit(32) = hashoidextended(v, 1)::bit(32); SELECT v as value, hashchar(v)::bit(32) as standard, - hashcharextended(v, 0)::bit(32) as extended0, - hashcharextended(v, 1)::bit(32) as extended1 + hashcharextended(v, 0)::bit(32) as extended0, + hashcharextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::"char"), ('1'), ('x'), ('X'), ('p'), ('N')) x(v) WHERE hashchar(v)::bit(32) != hashcharextended(v, 0)::bit(32) OR hashchar(v)::bit(32) = hashcharextended(v, 1)::bit(32); SELECT v as value, hashname(v)::bit(32) as standard, - hashnameextended(v, 0)::bit(32) as extended0, - hashnameextended(v, 1)::bit(32) as extended1 + hashnameextended(v, 0)::bit(32) as extended0, + hashnameextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), - ('muop28x03'), ('yi3nm0d73')) x(v) + ('muop28x03'), ('yi3nm0d73')) x(v) WHERE hashname(v)::bit(32) != hashnameextended(v, 0)::bit(32) OR hashname(v)::bit(32) = hashnameextended(v, 1)::bit(32); SELECT v as value, hashtext(v)::bit(32) as standard, - hashtextextended(v, 0)::bit(32) as extended0, - hashtextextended(v, 1)::bit(32) as extended1 + hashtextextended(v, 0)::bit(32) as extended0, + hashtextextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), - ('muop28x03'), ('yi3nm0d73')) x(v) + ('muop28x03'), ('yi3nm0d73')) x(v) WHERE hashtext(v)::bit(32) != hashtextextended(v, 0)::bit(32) OR hashtext(v)::bit(32) = hashtextextended(v, 1)::bit(32); SELECT v as value, hashoidvector(v)::bit(32) as standard, - hashoidvectorextended(v, 0)::bit(32) as extended0, - hashoidvectorextended(v, 1)::bit(32) as extended1 + hashoidvectorextended(v, 0)::bit(32) as extended0, + hashoidvectorextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::oidvector), ('0 1 2 3 4'), ('17 18 19 20'), ('42 43 42 45'), ('550273 550273 570274'), ('207112489 207112499 21512 2155 372325 1363252')) x(v) @@ -81,40 +81,40 @@ WHERE hashoidvector(v)::bit(32) != hashoidvectorextended(v, 0)::bit(32) OR hashoidvector(v)::bit(32) = hashoidvectorextended(v, 1)::bit(32); SELECT v as value, hash_aclitem(v)::bit(32) as standard, - hash_aclitem_extended(v, 0)::bit(32) as extended0, - hash_aclitem_extended(v, 1)::bit(32) as extended1 + hash_aclitem_extended(v, 0)::bit(32) as extended0, + hash_aclitem_extended(v, 1)::bit(32) as extended1 FROM (SELECT DISTINCT(relacl[1]) FROM pg_class LIMIT 10) x(v) WHERE hash_aclitem(v)::bit(32) != hash_aclitem_extended(v, 0)::bit(32) OR hash_aclitem(v)::bit(32) = hash_aclitem_extended(v, 1)::bit(32); SELECT v as value, hashmacaddr(v)::bit(32) as standard, - hashmacaddrextended(v, 0)::bit(32) as extended0, - hashmacaddrextended(v, 1)::bit(32) as extended1 + hashmacaddrextended(v, 0)::bit(32) as extended0, + hashmacaddrextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::macaddr), ('08:00:2b:01:02:04'), ('08:00:2b:01:02:04'), - ('e2:7f:51:3e:70:49'), ('d6:a9:4a:78:1c:d5'), + ('e2:7f:51:3e:70:49'), ('d6:a9:4a:78:1c:d5'), ('ea:29:b1:5e:1f:a5')) x(v) WHERE hashmacaddr(v)::bit(32) != hashmacaddrextended(v, 0)::bit(32) OR hashmacaddr(v)::bit(32) = hashmacaddrextended(v, 1)::bit(32); SELECT v as value, hashinet(v)::bit(32) as standard, - hashinetextended(v, 0)::bit(32) as extended0, - hashinetextended(v, 1)::bit(32) as extended1 + hashinetextended(v, 0)::bit(32) as extended0, + hashinetextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::inet), ('192.168.100.128/25'), ('192.168.100.0/8'), - ('172.168.10.126/16'), ('172.18.103.126/24'), ('192.188.13.16/32')) x(v) + ('172.168.10.126/16'), ('172.18.103.126/24'), ('192.188.13.16/32')) x(v) WHERE hashinet(v)::bit(32) != hashinetextended(v, 0)::bit(32) OR hashinet(v)::bit(32) = hashinetextended(v, 1)::bit(32); SELECT v as value, hash_numeric(v)::bit(32) as standard, - hash_numeric_extended(v, 0)::bit(32) as extended0, - hash_numeric_extended(v, 1)::bit(32) as extended1 + hash_numeric_extended(v, 0)::bit(32) as extended0, + hash_numeric_extended(v, 1)::bit(32) as extended1 FROM (VALUES (0), (1.149484958), (17.149484958), (42.149484958), (149484958.550273), (2071124898672)) x(v) WHERE hash_numeric(v)::bit(32) != hash_numeric_extended(v, 0)::bit(32) OR hash_numeric(v)::bit(32) = hash_numeric_extended(v, 1)::bit(32); SELECT v as value, hashmacaddr8(v)::bit(32) as standard, - hashmacaddr8extended(v, 0)::bit(32) as extended0, - hashmacaddr8extended(v, 1)::bit(32) as extended1 + hashmacaddr8extended(v, 0)::bit(32) as extended0, + hashmacaddr8extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::macaddr8), ('08:00:2b:01:02:04:36:49'), ('08:00:2b:01:02:04:f0:e8'), ('e2:7f:51:3e:70:49:16:29'), ('d6:a9:4a:78:1c:d5:47:32'), ('ea:29:b1:5e:1f:a5')) x(v) @@ -122,8 +122,8 @@ WHERE hashmacaddr8(v)::bit(32) != hashmacaddr8extended(v, 0)::bit(32) OR hashmacaddr8(v)::bit(32) = hashmacaddr8extended(v, 1)::bit(32); SELECT v as value, hash_array(v)::bit(32) as standard, - hash_array_extended(v, 0)::bit(32) as extended0, - hash_array_extended(v, 1)::bit(32) as extended1 + hash_array_extended(v, 0)::bit(32) as extended0, + hash_array_extended(v, 1)::bit(32) as extended1 FROM (VALUES ('{0}'::int4[]), ('{0,1,2,3,4}'), ('{17,18,19,20}'), ('{42,34,65,98}'), ('{550273,590027, 870273}'), ('{207112489, 807112489}')) x(v) @@ -131,92 +131,92 @@ WHERE hash_array(v)::bit(32) != hash_array_extended(v, 0)::bit(32) OR hash_array(v)::bit(32) = hash_array_extended(v, 1)::bit(32); SELECT v as value, hashbpchar(v)::bit(32) as standard, - hashbpcharextended(v, 0)::bit(32) as extended0, - hashbpcharextended(v, 1)::bit(32) as extended1 + hashbpcharextended(v, 0)::bit(32) as extended0, + hashbpcharextended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), - ('muop28x03'), ('yi3nm0d73')) x(v) + ('muop28x03'), ('yi3nm0d73')) x(v) WHERE hashbpchar(v)::bit(32) != hashbpcharextended(v, 0)::bit(32) OR hashbpchar(v)::bit(32) = hashbpcharextended(v, 1)::bit(32); SELECT v as value, time_hash(v)::bit(32) as standard, - time_hash_extended(v, 0)::bit(32) as extended0, - time_hash_extended(v, 1)::bit(32) as extended1 + time_hash_extended(v, 0)::bit(32) as extended0, + time_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::time), ('11:09:59'), ('1:09:59'), ('11:59:59'), ('7:9:59'), ('5:15:59')) x(v) WHERE time_hash(v)::bit(32) != time_hash_extended(v, 0)::bit(32) OR time_hash(v)::bit(32) = time_hash_extended(v, 1)::bit(32); SELECT v as value, timetz_hash(v)::bit(32) as standard, - timetz_hash_extended(v, 0)::bit(32) as extended0, - timetz_hash_extended(v, 1)::bit(32) as extended1 + timetz_hash_extended(v, 0)::bit(32) as extended0, + timetz_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::timetz), ('00:11:52.518762-07'), ('00:11:52.51762-08'), - ('00:11:52.62-01'), ('00:11:52.62+01'), ('11:59:59+04')) x(v) + ('00:11:52.62-01'), ('00:11:52.62+01'), ('11:59:59+04')) x(v) WHERE timetz_hash(v)::bit(32) != timetz_hash_extended(v, 0)::bit(32) OR timetz_hash(v)::bit(32) = timetz_hash_extended(v, 1)::bit(32); SELECT v as value, interval_hash(v)::bit(32) as standard, - interval_hash_extended(v, 0)::bit(32) as extended0, - interval_hash_extended(v, 1)::bit(32) as extended1 + interval_hash_extended(v, 0)::bit(32) as extended0, + interval_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::interval), ('5 month 7 day 46 minutes'), ('1 year 7 day 46 minutes'), - ('1 year 7 month 20 day 46 minutes'), ('5 month'), - ('17 year 11 month 7 day 9 hours 46 minutes 5 seconds')) x(v) + ('1 year 7 month 20 day 46 minutes'), ('5 month'), + ('17 year 11 month 7 day 9 hours 46 minutes 5 seconds')) x(v) WHERE interval_hash(v)::bit(32) != interval_hash_extended(v, 0)::bit(32) OR interval_hash(v)::bit(32) = interval_hash_extended(v, 1)::bit(32); SELECT v as value, timestamp_hash(v)::bit(32) as standard, - timestamp_hash_extended(v, 0)::bit(32) as extended0, - timestamp_hash_extended(v, 1)::bit(32) as extended1 + timestamp_hash_extended(v, 0)::bit(32) as extended0, + timestamp_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::timestamp), ('2017-08-22 00:09:59.518762'), ('2015-08-20 00:11:52.51762-08'), - ('2017-05-22 00:11:52.62-01'), + ('2017-05-22 00:11:52.62-01'), ('2013-08-22 00:11:52.62+01'), ('2013-08-22 11:59:59+04')) x(v) WHERE timestamp_hash(v)::bit(32) != timestamp_hash_extended(v, 0)::bit(32) OR timestamp_hash(v)::bit(32) = timestamp_hash_extended(v, 1)::bit(32); SELECT v as value, uuid_hash(v)::bit(32) as standard, - uuid_hash_extended(v, 0)::bit(32) as extended0, - uuid_hash_extended(v, 1)::bit(32) as extended1 + uuid_hash_extended(v, 0)::bit(32) as extended0, + uuid_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::uuid), ('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'), - ('5a9ba4ac-8d6f-11e7-bb31-be2e44b06b34'), + ('5a9ba4ac-8d6f-11e7-bb31-be2e44b06b34'), ('99c6705c-d939-461c-a3c9-1690ad64ed7b'), - ('7deed3ca-8d6f-11e7-bb31-be2e44b06b34'), + ('7deed3ca-8d6f-11e7-bb31-be2e44b06b34'), ('9ad46d4f-6f2a-4edd-aadb-745993928e1e')) x(v) WHERE uuid_hash(v)::bit(32) != uuid_hash_extended(v, 0)::bit(32) OR uuid_hash(v)::bit(32) = uuid_hash_extended(v, 1)::bit(32); SELECT v as value, pg_lsn_hash(v)::bit(32) as standard, - pg_lsn_hash_extended(v, 0)::bit(32) as extended0, - pg_lsn_hash_extended(v, 1)::bit(32) as extended1 + pg_lsn_hash_extended(v, 0)::bit(32) as extended0, + pg_lsn_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::pg_lsn), ('16/B374D84'), ('30/B374D84'), - ('255/B374D84'), ('25/B379D90'), ('900/F37FD90')) x(v) + ('255/B374D84'), ('25/B379D90'), ('900/F37FD90')) x(v) WHERE pg_lsn_hash(v)::bit(32) != pg_lsn_hash_extended(v, 0)::bit(32) OR pg_lsn_hash(v)::bit(32) = pg_lsn_hash_extended(v, 1)::bit(32); CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy'); SELECT v as value, hashenum(v)::bit(32) as standard, - hashenumextended(v, 0)::bit(32) as extended0, - hashenumextended(v, 1)::bit(32) as extended1 + hashenumextended(v, 0)::bit(32) as extended0, + hashenumextended(v, 1)::bit(32) as extended1 FROM (VALUES ('sad'::mood), ('ok'), ('happy')) x(v) WHERE hashenum(v)::bit(32) != hashenumextended(v, 0)::bit(32) OR hashenum(v)::bit(32) = hashenumextended(v, 1)::bit(32); DROP TYPE mood; SELECT v as value, jsonb_hash(v)::bit(32) as standard, - jsonb_hash_extended(v, 0)::bit(32) as extended0, - jsonb_hash_extended(v, 1)::bit(32) as extended1 + jsonb_hash_extended(v, 0)::bit(32) as extended0, + jsonb_hash_extended(v, 1)::bit(32) as extended1 FROM (VALUES (NULL::jsonb), - ('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'), - ('{"foo": [true, "bar"], "tags": {"e": 1, "f": null}}'), - ('{"g": {"h": "value"}}')) x(v) + ('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'), + ('{"foo": [true, "bar"], "tags": {"e": 1, "f": null}}'), + ('{"g": {"h": "value"}}')) x(v) WHERE jsonb_hash(v)::bit(32) != jsonb_hash_extended(v, 0)::bit(32) OR jsonb_hash(v)::bit(32) = jsonb_hash_extended(v, 1)::bit(32); SELECT v as value, hash_range(v)::bit(32) as standard, - hash_range_extended(v, 0)::bit(32) as extended0, - hash_range_extended(v, 1)::bit(32) as extended1 + hash_range_extended(v, 0)::bit(32) as extended0, + hash_range_extended(v, 1)::bit(32) as extended1 FROM (VALUES (int4range(10, 20)), (int4range(23, 43)), - (int4range(5675, 550273)), - (int4range(550274, 1550274)), (int4range(1550275, 208112489))) x(v) + (int4range(5675, 550273)), + (int4range(550274, 1550274)), (int4range(1550275, 208112489))) x(v) WHERE hash_range(v)::bit(32) != hash_range_extended(v, 0)::bit(32) OR hash_range(v)::bit(32) = hash_range_extended(v, 1)::bit(32); diff --git a/src/test/regress/sql/horology.sql b/src/test/regress/sql/horology.sql index c464e6766c697..fa92a80d0e661 100644 --- a/src/test/regress/sql/horology.sql +++ b/src/test/regress/sql/horology.sql @@ -3,6 +3,8 @@ -- SET DateStyle = 'Postgres, MDY'; +SHOW TimeZone; -- Many of these tests depend on the prevailing setting + -- -- Test various input formats -- @@ -279,6 +281,31 @@ SELECT '' AS "16", f1 AS "timestamp", date(f1) AS date DROP TABLE TEMP_TIMESTAMP; +-- +-- Comparisons between datetime types, especially overflow cases +--- + +SELECT '2202020-10-05'::date::timestamp; -- fail +SELECT '2202020-10-05'::date > '2020-10-05'::timestamp as t; +SELECT '2020-10-05'::timestamp > '2202020-10-05'::date as f; + +SELECT '2202020-10-05'::date::timestamptz; -- fail +SELECT '2202020-10-05'::date > '2020-10-05'::timestamptz as t; +SELECT '2020-10-05'::timestamptz > '2202020-10-05'::date as f; + +-- This conversion may work depending on timezone +SELECT '4714-11-24 BC'::date::timestamptz; +SET TimeZone = 'UTC-2'; +SELECT '4714-11-24 BC'::date::timestamptz; -- fail + +SELECT '4714-11-24 BC'::date < '2020-10-05'::timestamptz as t; +SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::date as t; + +SELECT '4714-11-24 BC'::timestamp < '2020-10-05'::timestamptz as t; +SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::timestamp as t; + +RESET TimeZone; + -- -- Formats -- @@ -426,6 +453,17 @@ SELECT to_date('1 4 1902', 'Q MM YYYY'); -- Q is ignored SELECT to_date('3 4 21 01', 'W MM CC YY'); SELECT to_date('2458872', 'J'); +-- +-- Check handling of BC dates +-- + +SELECT to_date('44-02-01 BC','YYYY-MM-DD BC'); +SELECT to_date('-44-02-01','YYYY-MM-DD'); +SELECT to_date('-44-02-01 BC','YYYY-MM-DD BC'); +SELECT to_timestamp('44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC'); +SELECT to_timestamp('-44-02-01 11:12:13','YYYY-MM-DD HH24:MI:SS'); +SELECT to_timestamp('-44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC'); + -- -- Check handling of multiple spaces in format and/or input -- @@ -511,6 +549,7 @@ SELECT to_date('2015 366', 'YYYY DDD'); SELECT to_date('2016 365', 'YYYY DDD'); -- ok SELECT to_date('2016 366', 'YYYY DDD'); -- ok SELECT to_date('2016 367', 'YYYY DDD'); +SELECT to_date('0000-02-01','YYYY-MM-DD'); -- allowed, though it shouldn't be -- -- Check behavior with SQL-style fixed-GMT-offset time zone (cf bug #8572) diff --git a/src/test/regress/sql/identity.sql b/src/test/regress/sql/identity.sql index 1bf2a976eb0b9..d4bc29ab5c300 100644 --- a/src/test/regress/sql/identity.sql +++ b/src/test/regress/sql/identity.sql @@ -208,7 +208,7 @@ INSERT INTO itest6 DEFAULT VALUES; INSERT INTO itest6 DEFAULT VALUES; SELECT * FROM itest6; -SELECT table_name, column_name, is_identity, identity_generation FROM information_schema.columns WHERE table_name = 'itest6'; +SELECT table_name, column_name, is_identity, identity_generation FROM information_schema.columns WHERE table_name = 'itest6' ORDER BY 1, 2; ALTER TABLE itest6 ALTER COLUMN b SET INCREMENT BY 2; -- fail, not identity diff --git a/src/test/regress/sql/infinite_recurse.sql b/src/test/regress/sql/infinite_recurse.sql new file mode 100644 index 0000000000000..151dba4a7aef7 --- /dev/null +++ b/src/test/regress/sql/infinite_recurse.sql @@ -0,0 +1,29 @@ +-- Check that stack depth detection mechanism works and +-- max_stack_depth is not set too high. + +create function infinite_recurse() returns int as +'select infinite_recurse()' language sql; + +-- Unfortunately, up till mid 2020 the Linux kernel had a bug in PPC64 +-- signal handling that would cause this test to crash if it happened +-- to receive an sinval catchup interrupt while the stack is deep: +-- https://bugzilla.kernel.org/show_bug.cgi?id=205183 +-- It is likely to be many years before that bug disappears from all +-- production kernels, so disable this test on such platforms. +-- (We still create the function, so as not to have a cross-platform +-- difference in the end state of the regression database.) + +SELECT version() ~ 'powerpc64[^,]*-linux-gnu' + AS skip_test \gset +\if :skip_test +\quit +\endif + +-- The full error report is not very stable, so we show only SQLSTATE +-- and primary error message. + +\set VERBOSITY sqlstate + +select infinite_recurse(); + +\echo :LAST_ERROR_MESSAGE diff --git a/src/test/regress/sql/insert.sql b/src/test/regress/sql/insert.sql index ffd4aacbc48b1..963faa1614c6b 100644 --- a/src/test/regress/sql/insert.sql +++ b/src/test/regress/sql/insert.sql @@ -542,9 +542,7 @@ drop table inserttest3; drop table brtrigpartcon; drop function brtrigpartcon1trigf(); --- check that "do nothing" BR triggers work with tuple-routing (this checks --- that estate->es_result_relation_info is appropriately set/reset for each --- routed tuple) +-- check that "do nothing" BR triggers work with tuple-routing create table donothingbrtrig_test (a int, b text) partition by list (a); create table donothingbrtrig_test1 (b text, a int); create table donothingbrtrig_test2 (c text, b text, a int); diff --git a/src/test/regress/sql/jsonb_jsonpath.sql b/src/test/regress/sql/jsonb_jsonpath.sql index a50abed95da7c..60f73cb05906c 100644 --- a/src/test/regress/sql/jsonb_jsonpath.sql +++ b/src/test/regress/sql/jsonb_jsonpath.sql @@ -368,6 +368,10 @@ select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH2 select jsonb_path_query('"12:34:56"', '$.datetime("HH24:MI:SS").type()'); select jsonb_path_query('"12:34:56 +05:20"', '$.datetime("HH24:MI:SS TZH:TZM").type()'); +select jsonb_path_query('"10-03-2017T12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); +select jsonb_path_query('"10-03-2017t12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); +select jsonb_path_query('"10-03-2017 12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); + set time zone '+00'; select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); @@ -404,117 +408,119 @@ select jsonb_path_query('"2017-03-10"', '$.datetime().type()'); select jsonb_path_query('"2017-03-10"', '$.datetime()'); select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime().type()'); select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime()'); -select jsonb_path_query('"2017-03-10 12:34:56 +3"', '$.datetime().type()'); -select jsonb_path_query('"2017-03-10 12:34:56 +3"', '$.datetime()'); -select jsonb_path_query('"2017-03-10 12:34:56 +3:10"', '$.datetime().type()'); -select jsonb_path_query('"2017-03-10 12:34:56 +3:10"', '$.datetime()'); +select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime().type()'); +select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime()'); +select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime().type()'); +select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime()'); +select jsonb_path_query('"2017-03-10T12:34:56+3:10"', '$.datetime()'); +select jsonb_path_query('"2017-03-10t12:34:56+3:10"', '$.datetime()'); select jsonb_path_query('"12:34:56"', '$.datetime().type()'); select jsonb_path_query('"12:34:56"', '$.datetime()'); -select jsonb_path_query('"12:34:56 +3"', '$.datetime().type()'); -select jsonb_path_query('"12:34:56 +3"', '$.datetime()'); -select jsonb_path_query('"12:34:56 +3:10"', '$.datetime().type()'); -select jsonb_path_query('"12:34:56 +3:10"', '$.datetime()'); +select jsonb_path_query('"12:34:56+3"', '$.datetime().type()'); +select jsonb_path_query('"12:34:56+3"', '$.datetime()'); +select jsonb_path_query('"12:34:56+3:10"', '$.datetime().type()'); +select jsonb_path_query('"12:34:56+3:10"', '$.datetime()'); set time zone '+00'; -- date comparison select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); select jsonb_path_query( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); select jsonb_path_query_tz( - '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03 +04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03 +04", "2017-03-10 03:00:00 +03"]', + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); -- time comparison select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); select jsonb_path_query( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); select jsonb_path_query_tz( - '["12:34:00", "12:35:00", "12:36:00", "12:35:00 +00", "12:35:00 +01", "13:35:00 +01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +01"]', + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); -- timetz comparison select jsonb_path_query( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); select jsonb_path_query( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); select jsonb_path_query( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); select jsonb_path_query_tz( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); select jsonb_path_query_tz( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); select jsonb_path_query_tz( - '["12:34:00 +01", "12:35:00 +01", "12:36:00 +01", "12:35:00 +02", "12:35:00 -02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); -- timestamp comparison select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); select jsonb_path_query( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); select jsonb_path_query_tz( - '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00 +01", "2017-03-10 13:35:00 +01", "2017-03-10 12:35:00 -01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); -- timestamptz comparison select jsonb_path_query( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); select jsonb_path_query( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); select jsonb_path_query( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); select jsonb_path_query_tz( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); select jsonb_path_query_tz( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); select jsonb_path_query_tz( - '["2017-03-10 12:34:00 +01", "2017-03-10 12:35:00 +01", "2017-03-10 12:36:00 +01", "2017-03-10 12:35:00 +02", "2017-03-10 12:35:00 -02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56 +01"]', + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); -- overflow during comparison diff --git a/src/test/regress/sql/numeric.sql b/src/test/regress/sql/numeric.sql index febb096af23be..76969db22a7d1 100644 --- a/src/test/regress/sql/numeric.sql +++ b/src/test/regress/sql/numeric.sql @@ -831,7 +831,6 @@ SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5); SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888); SELECT width_bucket('NaN', 3.0, 4.0, 888); SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888); -SELECT width_bucket('inf', 3.0, 4.0, 888); SELECT width_bucket(2.0, 3.0, '-inf', 888); SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888); @@ -876,8 +875,12 @@ SELECT width_bucket(operand_f8, -25, 25, 10) AS wb_5f FROM width_bucket_test; --- for float8 only, check positive and negative infinity: we require +-- Check positive and negative infinity: we require -- finite bucket bounds, but allow an infinite operand +SELECT width_bucket(0.0::numeric, 'Infinity'::numeric, 5, 10); -- error +SELECT width_bucket(0.0::numeric, 5, '-Infinity'::numeric, 20); -- error +SELECT width_bucket('Infinity'::numeric, 1, 10, 10), + width_bucket('-Infinity'::numeric, 1, 10, 10); SELECT width_bucket(0.0::float8, 'Infinity'::float8, 5, 10); -- error SELECT width_bucket(0.0::float8, 5, '-Infinity'::float8, 20); -- error SELECT width_bucket('Infinity'::float8, 1, 10, 10), @@ -885,6 +888,15 @@ SELECT width_bucket('Infinity'::float8, 1, 10, 10), DROP TABLE width_bucket_test; +-- Simple test for roundoff error when results should be exact +SELECT x, width_bucket(x::float8, 10, 100, 9) as flt, + width_bucket(x::numeric, 10, 100, 9) as num +FROM generate_series(0, 110, 10) x; +SELECT x, width_bucket(x::float8, 100, 10, 9) as flt, + width_bucket(x::numeric, 100, 10, 9) as num +FROM generate_series(0, 110, 10) x; + +-- -- TO_CHAR() -- SELECT '' AS to_char_1, to_char(val, '9G999G999G999G999G999') @@ -1300,12 +1312,10 @@ SELECT lcm(9999 * (10::numeric)^131068 + (10::numeric^131068 - 1), 2); -- overfl -- -- Tests for factorial -- -SELECT 4!; -SELECT !!3; +SELECT factorial(4); SELECT factorial(15); -SELECT 100000!; -SELECT 0!; -SELECT -4!; +SELECT factorial(100000); +SELECT factorial(0); SELECT factorial(-4); -- diff --git a/src/test/regress/sql/opr_sanity.sql b/src/test/regress/sql/opr_sanity.sql index 7a9180b081524..307aab1deb766 100644 --- a/src/test/regress/sql/opr_sanity.sql +++ b/src/test/regress/sql/opr_sanity.sql @@ -571,7 +571,7 @@ WHERE condefault AND SELECT p1.oid, p1.oprname FROM pg_operator as p1 -WHERE (p1.oprkind != 'b' AND p1.oprkind != 'l' AND p1.oprkind != 'r') OR +WHERE (p1.oprkind != 'b' AND p1.oprkind != 'l') OR p1.oprresult = 0 OR p1.oprcode = 0; -- Look for missing or unwanted operand types @@ -580,8 +580,7 @@ SELECT p1.oid, p1.oprname FROM pg_operator as p1 WHERE (p1.oprleft = 0 and p1.oprkind != 'l') OR (p1.oprleft != 0 and p1.oprkind = 'l') OR - (p1.oprright = 0 and p1.oprkind != 'r') OR - (p1.oprright != 0 and p1.oprkind = 'r'); + p1.oprright = 0; -- Look for conflicting operator definitions (same names and input datatypes). @@ -715,15 +714,6 @@ WHERE p1.oprcode = p2.oid AND OR NOT binary_coercible(p1.oprright, p2.proargtypes[0]) OR p1.oprleft != 0); -SELECT p1.oid, p1.oprname, p2.oid, p2.proname -FROM pg_operator AS p1, pg_proc AS p2 -WHERE p1.oprcode = p2.oid AND - p1.oprkind = 'r' AND - (p2.pronargs != 1 - OR NOT binary_coercible(p2.prorettype, p1.oprresult) - OR NOT binary_coercible(p1.oprleft, p2.proargtypes[0]) - OR p1.oprright != 0); - -- If the operator is mergejoinable or hashjoinable, its underlying function -- should not be volatile. diff --git a/src/test/regress/sql/partition_join.sql b/src/test/regress/sql/partition_join.sql index 73606c86e5143..d97b5b69ffc1c 100644 --- a/src/test/regress/sql/partition_join.sql +++ b/src/test/regress/sql/partition_join.sql @@ -1090,7 +1090,7 @@ ANALYZE plt3_adv; -- This tests that when merging partitions from plt1_adv and plt2_adv in -- merge_list_bounds(), process_outer_partition() returns an already-assigned -- merged partition when re-called with plt1_adv_p1 for the second list value --- '0001' of that partitin +-- '0001' of that partition EXPLAIN (COSTS OFF) SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; diff --git a/src/test/regress/sql/rangefuncs.sql b/src/test/regress/sql/rangefuncs.sql index ae3119a959eb8..3c436028daffd 100644 --- a/src/test/regress/sql/rangefuncs.sql +++ b/src/test/regress/sql/rangefuncs.sql @@ -629,6 +629,22 @@ explain (verbose, costs off) select * from testrngfunc(); select * from testrngfunc(); +create or replace function testrngfunc() returns setof rngfunc_type as $$ + select 1, 2 union select 3, 4 order by 1; +$$ language sql immutable; + +explain (verbose, costs off) +select testrngfunc(); +select testrngfunc(); +explain (verbose, costs off) +select * from testrngfunc(); +select * from testrngfunc(); + +-- Check a couple of error cases while we're here +select * from testrngfunc() as t(f1 int8,f2 int8); -- fail, composite result +select * from pg_get_keywords() as t(f1 int8,f2 int8); -- fail, OUT params +select * from sin(3) as t(f1 int8,f2 int8); -- fail, scalar result type + drop type rngfunc_type cascade; -- diff --git a/src/test/regress/sql/reindex_catalog.sql b/src/test/regress/sql/reindex_catalog.sql index 87ecf52244f1e..8203641cf9d2d 100644 --- a/src/test/regress/sql/reindex_catalog.sql +++ b/src/test/regress/sql/reindex_catalog.sql @@ -39,3 +39,14 @@ REINDEX INDEX pg_index_indexrelid_index; -- non-mapped, non-shared, critical REINDEX INDEX pg_index_indrelid_index; -- non-mapped, non-shared, non-critical REINDEX INDEX pg_database_oid_index; -- mapped, shared, critical REINDEX INDEX pg_shdescription_o_c_index; -- mapped, shared, non-critical + +-- Check the same REINDEX INDEX statements under parallelism. +BEGIN; +SET min_parallel_table_scan_size = 0; +REINDEX INDEX pg_class_oid_index; -- mapped, non-shared, critical +REINDEX INDEX pg_class_relname_nsp_index; -- mapped, non-shared, non-critical +REINDEX INDEX pg_index_indexrelid_index; -- non-mapped, non-shared, critical +REINDEX INDEX pg_index_indrelid_index; -- non-mapped, non-shared, non-critical +REINDEX INDEX pg_database_oid_index; -- mapped, shared, critical +REINDEX INDEX pg_shdescription_o_c_index; -- mapped, shared, non-critical +ROLLBACK; diff --git a/src/test/regress/sql/stats_ext.sql b/src/test/regress/sql/stats_ext.sql index f8d947af9e80d..9781e590a30c6 100644 --- a/src/test/regress/sql/stats_ext.sql +++ b/src/test/regress/sql/stats_ext.sql @@ -72,12 +72,14 @@ ANALYZE ab1; ALTER TABLE ab1 ALTER a SET STATISTICS -1; -- setting statistics target 0 skips the statistics, without printing any message, so check catalog ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; +\d ab1 ANALYZE ab1; SELECT stxname, stxdndistinct, stxddependencies, stxdmcv FROM pg_statistic_ext s, pg_statistic_ext_data d WHERE s.stxname = 'ab1_a_b_stats' AND d.stxoid = s.oid; ALTER STATISTICS ab1_a_b_stats SET STATISTICS -1; +\d+ ab1 -- partial analyze doesn't build stats either ANALYZE ab1 (a); ANALYZE ab1; diff --git a/src/test/regress/sql/subselect.sql b/src/test/regress/sql/subselect.sql index cce8ebdb3d9fc..a25cb6fc5c536 100644 --- a/src/test/regress/sql/subselect.sql +++ b/src/test/regress/sql/subselect.sql @@ -509,6 +509,23 @@ select * from int8_tbl where q1 in (select c1 from inner_text); rollback; -- to get rid of the bogus operator +-- +-- Test resolution of hashed vs non-hashed implementation of EXISTS subplan +-- +explain (costs off) +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0); +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0); + +explain (costs off) +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0) + and thousand = 1; +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0) + and thousand = 1; + -- -- Test case for planner bug with nested EXISTS handling -- diff --git a/src/test/regress/sql/sysviews.sql b/src/test/regress/sql/sysviews.sql index ac4a0e1cbba7e..b9b875bc6abc2 100644 --- a/src/test/regress/sql/sysviews.sql +++ b/src/test/regress/sql/sysviews.sql @@ -37,6 +37,9 @@ select count(*) = 0 as ok from pg_prepared_statements; -- See also prepared_xacts.sql select count(*) >= 0 as ok from pg_prepared_xacts; +-- There must be only one record +select count(*) = 1 as ok from pg_stat_wal; + -- This is to record the prevailing planner enable_foo settings during -- a regression test run. select name, setting from pg_settings where name like 'enable%'; diff --git a/src/test/regress/sql/timestamp.sql b/src/test/regress/sql/timestamp.sql index 7b58c3cfa5fc9..727ee500845c7 100644 --- a/src/test/regress/sql/timestamp.sql +++ b/src/test/regress/sql/timestamp.sql @@ -240,4 +240,7 @@ SELECT '' AS to_char_12, to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff ) d(d); -- timestamp numeric fields constructor -SELECT make_timestamp(2014,12,28,6,30,45.887); +SELECT make_timestamp(2014, 12, 28, 6, 30, 45.887); +SELECT make_timestamp(-44, 3, 15, 12, 30, 15); +-- should fail +select make_timestamp(0, 7, 15, 12, 30, 15); diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql index e228d0a8a5b6e..8f66df9f3b41e 100644 --- a/src/test/regress/sql/triggers.sql +++ b/src/test/regress/sql/triggers.sql @@ -1749,6 +1749,41 @@ select tgrelid::regclass, count(*) from pg_trigger group by tgrelid::regclass order by tgrelid::regclass; drop table trg_clone; +-- Test the interaction between ALTER TABLE .. DISABLE TRIGGER and +-- both kinds of inheritance. Historically, legacy inheritance has +-- not recursed to children, so that behavior is preserved. +create table parent (a int); +create table child1 () inherits (parent); +create function trig_nothing() returns trigger language plpgsql + as $$ begin return null; end $$; +create trigger tg after insert on parent + for each row execute function trig_nothing(); +create trigger tg after insert on child1 + for each row execute function trig_nothing(); +alter table parent disable trigger tg; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; +alter table only parent enable always trigger tg; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; +drop table parent, child1; + +create table parent (a int) partition by list (a); +create table child1 partition of parent for values in (1); +create trigger tg after insert on parent + for each row execute procedure trig_nothing(); +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; +alter table only parent enable always trigger tg; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; +drop table parent, child1; + + -- -- Test the interaction between transition tables and both kinds of -- inheritance. We'll dump the contents of the transition tables in a diff --git a/src/test/regress/sql/window.sql b/src/test/regress/sql/window.sql index af206ca4664e2..9485aebce85c2 100644 --- a/src/test/regress/sql/window.sql +++ b/src/test/regress/sql/window.sql @@ -936,6 +936,28 @@ SELECT lag(1) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno) FROM empsalary; +-- Test incremental sorting +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + empno, + salary, + enroll_date, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp + FROM empsalary) emp +WHERE first_emp = 1 OR last_emp = 1; + +SELECT * FROM + (SELECT depname, + empno, + salary, + enroll_date, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp + FROM empsalary) emp +WHERE first_emp = 1 OR last_emp = 1; + -- cleanup DROP TABLE empsalary; diff --git a/src/test/regress/sql/with.sql b/src/test/regress/sql/with.sql index f85645efdee67..2eea297a7198b 100644 --- a/src/test/regress/sql/with.sql +++ b/src/test/regress/sql/with.sql @@ -308,22 +308,22 @@ insert into graph values (4, 5, 'arc 4 -> 5'), (5, 1, 'arc 5 -> 1'); -with recursive search_graph(f, t, label, path, cycle) as ( - select *, array[row(g.f, g.t)], false from graph g +with recursive search_graph(f, t, label, is_cycle, path) as ( + select *, false, array[row(g.f, g.t)] from graph g union all - select g.*, path || row(g.f, g.t), row(g.f, g.t) = any(path) + select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) from graph g, search_graph sg - where g.f = sg.t and not cycle + where g.f = sg.t and not is_cycle ) select * from search_graph; -- ordering by the path column has same effect as SEARCH DEPTH FIRST -with recursive search_graph(f, t, label, path, cycle) as ( - select *, array[row(g.f, g.t)], false from graph g +with recursive search_graph(f, t, label, is_cycle, path) as ( + select *, false, array[row(g.f, g.t)] from graph g union all - select g.*, path || row(g.f, g.t), row(g.f, g.t) = any(path) + select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) from graph g, search_graph sg - where g.f = sg.t and not cycle + where g.f = sg.t and not is_cycle ) select * from search_graph order by path; diff --git a/src/test/subscription/t/100_bugs.pl b/src/test/subscription/t/100_bugs.pl index 366a7a94350ac..d1e407aacb322 100644 --- a/src/test/subscription/t/100_bugs.pl +++ b/src/test/subscription/t/100_bugs.pl @@ -3,7 +3,7 @@ use warnings; use PostgresNode; use TestLib; -use Test::More tests => 3; +use Test::More tests => 5; # Bug #15114 @@ -100,3 +100,56 @@ ); $node_publisher->stop('fast'); + +# Bug #16643 - https://postgr.es/m/16643-eaadeb2a1a58d28c@postgresql.org +# +# Initial sync doesn't complete; the protocol was not being followed per +# expectations after commit 07082b08cc5d. +my $node_twoways = get_new_node('twoways'); +$node_twoways->init(allows_streaming => 'logical'); +$node_twoways->start; +for my $db (qw(d1 d2)) +{ + $node_twoways->safe_psql('postgres', "CREATE DATABASE $db"); + $node_twoways->safe_psql($db, "CREATE TABLE t (f int)"); + $node_twoways->safe_psql($db, "CREATE TABLE t2 (f int)"); +} + +my $rows = 3000; +$node_twoways->safe_psql( + 'd1', qq{ + INSERT INTO t SELECT * FROM generate_series(1, $rows); + INSERT INTO t2 SELECT * FROM generate_series(1, $rows); + CREATE PUBLICATION testpub FOR TABLE t; + SELECT pg_create_logical_replication_slot('testslot', 'pgoutput'); + }); + +$node_twoways->safe_psql('d2', + "CREATE SUBSCRIPTION testsub CONNECTION \$\$" + . $node_twoways->connstr('d1') + . "\$\$ PUBLICATION testpub WITH (create_slot=false, " + . "slot_name='testslot')"); +$node_twoways->safe_psql( + 'd1', qq{ + INSERT INTO t SELECT * FROM generate_series(1, $rows); + INSERT INTO t2 SELECT * FROM generate_series(1, $rows); + }); +$node_twoways->safe_psql( + 'd1', 'ALTER PUBLICATION testpub ADD TABLE t2'); +$node_twoways->safe_psql( + 'd2', 'ALTER SUBSCRIPTION testsub REFRESH PUBLICATION'); + +# We cannot rely solely on wait_for_catchup() here; it isn't sufficient +# when tablesync workers might still be running. So in addition to that, +# verify that tables are synced. +# XXX maybe this should be integrated in wait_for_catchup() itself. +$node_twoways->wait_for_catchup('testsub'); +my $synced_query = + "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');"; +$node_twoways->poll_query_until('d2', $synced_query) + or die "Timed out while waiting for subscriber to synchronize data"; + +is($node_twoways->safe_psql('d2', "SELECT count(f) FROM t"), + $rows * 2, "2x$rows rows in t"); +is($node_twoways->safe_psql('d2', "SELECT count(f) FROM t2"), + $rows * 2, "2x$rows rows in t2"); diff --git a/src/test/thread/.gitignore b/src/test/thread/.gitignore deleted file mode 100644 index 1d54d546a8cec..0000000000000 --- a/src/test/thread/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/thread_test diff --git a/src/test/thread/Makefile b/src/test/thread/Makefile deleted file mode 100644 index a13c0c6cf5302..0000000000000 --- a/src/test/thread/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -#------------------------------------------------------------------------- -# -# Makefile for tools/thread -# -# Copyright (c) 2003-2020, PostgreSQL Global Development Group -# -# src/test/thread/Makefile -# -#------------------------------------------------------------------------- - -subdir = src/tools/thread -top_builddir = ../../.. -include $(top_builddir)/src/Makefile.global - -override CFLAGS += $(PTHREAD_CFLAGS) - -all: thread_test - -thread_test: thread_test.o -# no need for $LIBS, might not be compiled yet - $(CC) $(CFLAGS) $^ $(LDFLAGS) $(LDFLAGS_EX) $(PTHREAD_LIBS) -o $@$(X) - -clean distclean maintainer-clean: - rm -f thread_test$(X) thread_test.o diff --git a/src/test/thread/README b/src/test/thread/README deleted file mode 100644 index 4da23440f6b53..0000000000000 --- a/src/test/thread/README +++ /dev/null @@ -1,54 +0,0 @@ -src/test/thread/README - -Threading -========= - -This program is run by configure to determine if threading is -properly supported on the platform. - -You can run the program manually to see details, which shows if your -native libc functions are thread-safe, or if we use *_r functions or -thread locking. - -To use this program manually, you must: - - o run "configure" - o compile the main source tree - o compile and run this program - -If your platform requires special thread flags that are not tested by -/config/acx_pthread.m4, add PTHREAD_CFLAGS and PTHREAD_LIBS defines to -your template/${port} file. - -Windows Systems -=============== - -Windows systems do not vary in their thread-safeness in the same way that -other systems might, nor do they generally have pthreads installed, hence -on Windows this test is skipped by the configure program (pthreads is -required by the test program, but not PostgreSQL itself). If you do wish -to test your system however, you can do so as follows: - -1) Install pthreads in you Mingw/Msys environment. You can download pthreads - from ftp://sources.redhat.com/pub/pthreads-win32/. - -2) Build the test program: - - gcc -o thread_test.exe \ - -D_REENTRANT \ - -D_THREAD_SAFE \ - -D_POSIX_PTHREAD_SEMANTICS \ - -I../../../src/include/port/win32 \ - thread_test.c \ - -lws2_32 \ - -lpthreadgc2 - -3) Run thread_test.exe. You should see output like: - - dpage@PC30:/cvs/pgsql/src/tools/thread$ ./thread_test - Your GetLastError() is thread-safe. - Your system uses strerror() which is thread-safe. - getpwuid_r()/getpwuid() are not applicable to Win32 platforms. - Your system uses gethostbyname which is thread-safe. - - Your platform is thread-safe. diff --git a/src/timezone/Makefile b/src/timezone/Makefile index 715b63cee0cdb..2b5d8ecbef810 100644 --- a/src/timezone/Makefile +++ b/src/timezone/Makefile @@ -56,7 +56,7 @@ zic: $(ZICOBJS) | submake-libpgport install: all installdirs ifeq (,$(with_system_tzdata)) - $(ZIC) -d '$(DESTDIR)$(datadir)/timezone' -b slim $(ZIC_OPTIONS) $(TZDATAFILES) + $(ZIC) -d '$(DESTDIR)$(datadir)/timezone' $(ZIC_OPTIONS) $(TZDATAFILES) endif $(MAKE) -C tznames $@ diff --git a/src/timezone/README b/src/timezone/README index 8af44449329a9..f588d1f5add21 100644 --- a/src/timezone/README +++ b/src/timezone/README @@ -55,7 +55,7 @@ match properly on the old version. Time Zone code ============== -The code in this directory is currently synced with tzcode release 2020a. +The code in this directory is currently synced with tzcode release 2020d. There are many cosmetic (and not so cosmetic) differences from the original tzcode library, but diffs in the upstream version should usually be propagated to our version. Here are some notes about that. diff --git a/src/timezone/data/tzdata.zi b/src/timezone/data/tzdata.zi index e7c31b68c8671..09afb428717f6 100644 --- a/src/timezone/data/tzdata.zi +++ b/src/timezone/data/tzdata.zi @@ -1,4 +1,4 @@ -# version 2020a +# version 2020d # This zic input file is in the public domain. R d 1916 o - Jun 14 23s 1 S R d 1916 1919 - O Su>=1 23s 0 - @@ -22,7 +22,7 @@ R d 1978 o - Mar 24 1 1 S R d 1978 o - S 22 3 0 - R d 1980 o - Ap 25 0 1 S R d 1980 o - O 31 2 0 - -Z Africa/Algiers 0:12:12 - LMT 1891 Mar 15 0:1 +Z Africa/Algiers 0:12:12 - LMT 1891 Mar 16 0:9:21 - PMT 1911 Mar 11 0 d WE%sT 1940 F 25 2 1 d CE%sT 1946 O 7 @@ -193,7 +193,7 @@ R M 2021 o - May 16 2 0 - R M 2022 o - Mar 27 3 -1 - R M 2022 o - May 8 2 0 - R M 2023 o - Mar 19 3 -1 - -R M 2023 o - Ap 23 2 0 - +R M 2023 o - Ap 30 2 0 - R M 2024 o - Mar 10 3 -1 - R M 2024 o - Ap 14 2 0 - R M 2025 o - F 23 3 -1 - @@ -209,7 +209,7 @@ R M 2029 o - F 18 2 0 - R M 2029 o - D 30 3 -1 - R M 2030 o - F 10 2 0 - R M 2030 o - D 22 3 -1 - -R M 2031 o - Ja 26 2 0 - +R M 2031 o - F 2 2 0 - R M 2031 o - D 14 3 -1 - R M 2032 o - Ja 18 2 0 - R M 2032 o - N 28 3 -1 - @@ -225,7 +225,7 @@ R M 2036 o - N 23 2 0 - R M 2037 o - O 4 3 -1 - R M 2037 o - N 15 2 0 - R M 2038 o - S 26 3 -1 - -R M 2038 o - O 31 2 0 - +R M 2038 o - N 7 2 0 - R M 2039 o - S 18 3 -1 - R M 2039 o - O 23 2 0 - R M 2040 o - S 2 3 -1 - @@ -241,7 +241,7 @@ R M 2044 o - Au 28 2 0 - R M 2045 o - Jul 9 3 -1 - R M 2045 o - Au 20 2 0 - R M 2046 o - Jul 1 3 -1 - -R M 2046 o - Au 5 2 0 - +R M 2046 o - Au 12 2 0 - R M 2047 o - Jun 23 3 -1 - R M 2047 o - Jul 28 2 0 - R M 2048 o - Jun 7 3 -1 - @@ -257,7 +257,7 @@ R M 2052 o - Jun 2 2 0 - R M 2053 o - Ap 13 3 -1 - R M 2053 o - May 25 2 0 - R M 2054 o - Ap 5 3 -1 - -R M 2054 o - May 10 2 0 - +R M 2054 o - May 17 2 0 - R M 2055 o - Mar 28 3 -1 - R M 2055 o - May 2 2 0 - R M 2056 o - Mar 12 3 -1 - @@ -273,7 +273,7 @@ R M 2060 o - Mar 7 2 0 - R M 2061 o - Ja 16 3 -1 - R M 2061 o - F 27 2 0 - R M 2062 o - Ja 8 3 -1 - -R M 2062 o - F 12 2 0 - +R M 2062 o - F 19 2 0 - R M 2062 o - D 31 3 -1 - R M 2063 o - F 4 2 0 - R M 2063 o - D 16 3 -1 - @@ -289,7 +289,7 @@ R M 2067 o - D 11 2 0 - R M 2068 o - O 21 3 -1 - R M 2068 o - D 2 2 0 - R M 2069 o - O 13 3 -1 - -R M 2069 o - N 17 2 0 - +R M 2069 o - N 24 2 0 - R M 2070 o - O 5 3 -1 - R M 2070 o - N 9 2 0 - R M 2071 o - S 20 3 -1 - @@ -305,7 +305,7 @@ R M 2075 o - S 15 2 0 - R M 2076 o - Jul 26 3 -1 - R M 2076 o - S 6 2 0 - R M 2077 o - Jul 18 3 -1 - -R M 2077 o - Au 22 2 0 - +R M 2077 o - Au 29 2 0 - R M 2078 o - Jul 10 3 -1 - R M 2078 o - Au 14 2 0 - R M 2079 o - Jun 25 3 -1 - @@ -315,13 +315,13 @@ R M 2080 o - Jul 21 2 0 - R M 2081 o - Jun 1 3 -1 - R M 2081 o - Jul 13 2 0 - R M 2082 o - May 24 3 -1 - -R M 2082 o - Jun 28 2 0 - +R M 2082 o - Jul 5 2 0 - R M 2083 o - May 16 3 -1 - R M 2083 o - Jun 20 2 0 - R M 2084 o - Ap 30 3 -1 - R M 2084 o - Jun 11 2 0 - R M 2085 o - Ap 22 3 -1 - -R M 2085 o - May 27 2 0 - +R M 2085 o - Jun 3 2 0 - R M 2086 o - Ap 14 3 -1 - R M 2086 o - May 19 2 0 - R M 2087 o - Mar 30 3 -1 - @@ -426,7 +426,12 @@ Z Antarctica/Casey 0 - -00 1969 11 - +11 2012 F 21 17u 8 - +08 2016 O 22 11 - +11 2018 Mar 11 4 -8 - +08 +8 - +08 2018 O 7 4 +11 - +11 2019 Mar 17 3 +8 - +08 2019 O 4 3 +11 - +11 2020 Mar 8 3 +8 - +08 2020 O 4 0:1 +11 - +11 Z Antarctica/Davis 0 - -00 1957 Ja 13 7 - +07 1964 N 0 - -00 1969 F @@ -1091,10 +1096,10 @@ R P 2004 o - O 1 1 0 - R P 2005 o - O 4 2 0 - R P 2006 2007 - Ap 1 0 1 S R P 2006 o - S 22 0 0 - -R P 2007 o - S Th>=8 2 0 - +R P 2007 o - S 13 2 0 - R P 2008 2009 - Mar lastF 0 1 S R P 2008 o - S 1 0 0 - -R P 2009 o - S F>=1 1 0 - +R P 2009 o - S 4 1 0 - R P 2010 o - Mar 26 0 1 S R P 2010 o - Au 11 0 0 - R P 2011 o - Ap 1 0:1 1 S @@ -1103,12 +1108,16 @@ R P 2011 o - Au 30 0 1 S R P 2011 o - S 30 0 0 - R P 2012 2014 - Mar lastTh 24 1 S R P 2012 o - S 21 1 0 - -R P 2013 o - S F>=21 0 0 - -R P 2014 2015 - O F>=21 0 0 - -R P 2015 o - Mar lastF 24 1 S +R P 2013 o - S 27 0 0 - +R P 2014 o - O 24 0 0 - +R P 2015 o - Mar 28 0 1 S +R P 2015 o - O 23 1 0 - R P 2016 2018 - Mar Sa>=24 1 1 S -R P 2016 ma - O lastSa 1 0 - -R P 2019 ma - Mar lastF 0 1 S +R P 2016 2018 - O Sa>=24 1 0 - +R P 2019 o - Mar 29 0 1 S +R P 2019 o - O Sa>=24 0 0 - +R P 2020 ma - Mar Sa>=24 0 1 S +R P 2020 ma - O Sa>=24 1 0 - Z Asia/Gaza 2:17:52 - LMT 1900 O 2 Z EET/EEST 1948 May 15 2 K EE%sT 1967 Jun 5 @@ -1399,8 +1408,9 @@ Z Antarctica/Macquarie 0 - -00 1899 N 10 AU AE%sT 1919 Ap 1 0s 0 - -00 1948 Mar 25 10 AU AE%sT 1967 -10 AT AE%sT 2010 Ap 4 3 -11 - +11 +10 AT AE%sT 2010 +10 1 AEDT 2011 +10 AT AE%sT Z Indian/Christmas 7:2:52 - LMT 1895 F 7 - +07 Z Indian/Cocos 6:27:40 - LMT 1900 @@ -1415,7 +1425,9 @@ R FJ 2012 2013 - Ja Su>=18 3 0 - R FJ 2014 o - Ja Su>=18 2 0 - R FJ 2014 2018 - N Su>=1 2 1 - R FJ 2015 ma - Ja Su>=12 3 0 - -R FJ 2019 ma - N Su>=8 2 1 - +R FJ 2019 o - N Su>=8 2 1 - +R FJ 2020 o - D 20 2 1 - +R FJ 2021 ma - N Su>=8 2 1 - Z Pacific/Fiji 11:55:44 - LMT 1915 O 26 12 FJ +12/+13 Z Pacific/Gambier -8:59:48 - LMT 1912 O @@ -1992,8 +2004,8 @@ R F 1945 o - Ap 2 2 2 M R F 1945 o - S 16 3 0 - R F 1976 o - Mar 28 1 1 S R F 1976 o - S 26 1 0 - -Z Europe/Paris 0:9:21 - LMT 1891 Mar 15 0:1 -0:9:21 - PMT 1911 Mar 11 0:1 +Z Europe/Paris 0:9:21 - LMT 1891 Mar 16 +0:9:21 - PMT 1911 Mar 11 0 F WE%sT 1940 Jun 14 23 1 c CE%sT 1944 Au 25 0 F WE%sT 1945 S 16 3 @@ -2045,29 +2057,30 @@ Z Europe/Athens 1:34:52 - LMT 1895 S 14 1 g CE%sT 1944 Ap 4 2 g EE%sT 1981 2 E EE%sT -R h 1918 o - Ap 1 3 1 S -R h 1918 o - S 16 3 0 - -R h 1919 o - Ap 15 3 1 S -R h 1919 o - N 24 3 0 - +R h 1918 1919 - Ap 15 2 1 S +R h 1918 1920 - S M>=15 3 0 - +R h 1920 o - Ap 5 2 1 S R h 1945 o - May 1 23 1 S -R h 1945 o - N 1 0 0 - +R h 1945 o - N 1 1 0 - R h 1946 o - Mar 31 2s 1 S -R h 1946 1949 - O Su>=1 2s 0 - +R h 1946 o - O 7 2 0 - R h 1947 1949 - Ap Su>=4 2s 1 S -R h 1950 o - Ap 17 2s 1 S -R h 1950 o - O 23 2s 0 - -R h 1954 1955 - May 23 0 1 S -R h 1954 1955 - O 3 0 0 - -R h 1956 o - Jun Su>=1 0 1 S -R h 1956 o - S lastSu 0 0 - -R h 1957 o - Jun Su>=1 1 1 S -R h 1957 o - S lastSu 3 0 - -R h 1980 o - Ap 6 1 1 S -Z Europe/Budapest 1:16:20 - LMT 1890 O +R h 1947 1949 - O Su>=1 2s 0 - +R h 1954 o - May 23 0 1 S +R h 1954 o - O 3 0 0 - +R h 1955 o - May 22 2 1 S +R h 1955 o - O 2 3 0 - +R h 1956 1957 - Jun Su>=1 2 1 S +R h 1956 1957 - S lastSu 3 0 - +R h 1980 o - Ap 6 0 1 S +R h 1980 o - S 28 1 0 - +R h 1981 1983 - Mar lastSu 0 1 S +R h 1981 1983 - S lastSu 1 0 - +Z Europe/Budapest 1:16:20 - LMT 1890 N 1 c CE%sT 1918 -1 h CE%sT 1941 Ap 8 +1 h CE%sT 1941 Ap 7 23 1 c CE%sT 1945 -1 h CE%sT 1980 S 28 2s +1 h CE%sT 1984 1 E CE%sT R w 1917 1919 - F 19 23 1 - R w 1917 o - O 21 1 0 - @@ -2223,8 +2236,8 @@ Z Europe/Chisinau 1:55:20 - LMT 1880 2 R EE%sT 1992 2 e EE%sT 1997 2 MD EE%sT -Z Europe/Monaco 0:29:32 - LMT 1891 Mar 15 -0:9:21 - PMT 1911 Mar 11 +Z Europe/Monaco 0:29:32 - LMT 1892 Jun +0:9:21 - PMT 1911 Mar 29 0 F WE%sT 1945 S 16 3 1 F CE%sT 1977 1 E CE%sT @@ -3413,12 +3426,12 @@ Z America/Inuvik 0 - -00 1953 Z America/Whitehorse -9:0:12 - LMT 1900 Au 20 -9 Y Y%sT 1967 May 28 -8 Y P%sT 1980 --8 C P%sT 2020 Mar 8 2 +-8 C P%sT 2020 N -7 - MST Z America/Dawson -9:17:40 - LMT 1900 Au 20 -9 Y Y%sT 1973 O 28 -8 Y P%sT 1980 --8 C P%sT 2020 Mar 8 2 +-8 C P%sT 2020 N -7 - MST R m 1939 o - F 5 0 1 D R m 1939 o - Jun 25 0 0 S diff --git a/src/timezone/strftime.c b/src/timezone/strftime.c index 4b942c393a344..dd6c7db869580 100644 --- a/src/timezone/strftime.c +++ b/src/timezone/strftime.c @@ -128,12 +128,22 @@ size_t pg_strftime(char *s, size_t maxsize, const char *format, const struct pg_tm *t) { char *p; + int saved_errno = errno; enum warn warn = IN_NONE; p = _fmt(format, t, s, s + maxsize, &warn); + if (!p) + { + errno = EOVERFLOW; + return 0; + } if (p == s + maxsize) + { + errno = ERANGE; return 0; + } *p = '\0'; + errno = saved_errno; return p - s; } diff --git a/src/timezone/zic.c b/src/timezone/zic.c index 10c5b4bfb5b55..0ea6ead2db3ae 100644 --- a/src/timezone/zic.c +++ b/src/timezone/zic.c @@ -37,10 +37,6 @@ typedef int64 zic_t; #define MKDIR_UMASK 0755 #endif #endif -#ifndef AT_SYMLINK_FOLLOW -#define linkat(fromdir, from, todir, to, flag) \ - (itssymlink(from) ? (errno = ENOTSUP, -1) : link(from, to)) -#endif /* Port to native MS-Windows and to ancient UNIX. */ #if !defined S_ISDIR && defined S_IFDIR && defined S_IFMT #define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR) @@ -66,7 +62,6 @@ struct rule zic_t r_loyear; /* for example, 1986 */ zic_t r_hiyear; /* for example, 1986 */ - const char *r_yrtype; bool r_lowasnum; bool r_hiwasnum; @@ -116,7 +111,11 @@ struct zone zic_t z_untiltime; }; -extern int link(const char *fromname, const char *toname); +extern int link(const char *target, const char *linkname); +#ifndef AT_SYMLINK_FOLLOW +#define linkat(targetdir, target, linknamedir, linkname, flag) \ + (itssymlink(target) ? (errno = ENOTSUP, -1) : link(target, linkname)) +#endif static void memory_exhausted(const char *msg) pg_attribute_noreturn(); static void verror(const char *string, va_list args) pg_attribute_printf(1, 0); @@ -154,7 +153,6 @@ static void rulesub(struct rule *rp, const char *typep, const char *monthp, const char *dayp, const char *timep); static zic_t tadd(zic_t t1, zic_t t2); -static bool yearistype(zic_t year, const char *type); /* Bound on length of what %z can expand to. */ enum @@ -253,8 +251,8 @@ static int typecnt; * Which fields are which on a Link line. */ -#define LF_FROM 1 -#define LF_TO 2 +#define LF_TARGET 1 +#define LF_LINKNAME 2 #define LINK_FIELDS 3 /* @@ -292,8 +290,8 @@ struct link { const char *l_filename; lineno_t l_linenum; - const char *l_from; - const char *l_to; + const char *l_target; + const char *l_linkname; }; static struct link *links; @@ -634,11 +632,10 @@ static const char *lcltime; static const char *directory; static const char *leapsec; static const char *tzdefault; -static const char *yitcommand; /* -1 if the TZif output file should be slim, 0 if default, 1 if the - output should be fat for backward compatibility. Currently the - default is fat, although this may change. */ + output should be fat for backward compatibility. ZIC_BLOAT_DEFAULT + determines the default. */ static int bloat; static bool @@ -648,7 +645,7 @@ want_bloat(void) } #ifndef ZIC_BLOAT_DEFAULT -#define ZIC_BLOAT_DEFAULT "fat" +#define ZIC_BLOAT_DEFAULT "slim" #endif int @@ -747,18 +744,7 @@ main(int argc, char **argv) tzdefault = optarg; break; case 'y': - if (yitcommand == NULL) - { - warning(_("-y is obsolescent")); - yitcommand = strdup(optarg); - } - else - { - fprintf(stderr, - _("%s: More than one -y option specified\n"), - progname); - return EXIT_FAILURE; - } + warning(_("-y ignored")); break; case 'L': if (leapsec == NULL) @@ -802,13 +788,20 @@ main(int argc, char **argv) if (optind == argc - 1 && strcmp(argv[optind], "=") == 0) usage(stderr, EXIT_FAILURE); /* usage message by request */ if (bloat == 0) - bloat = strcmp(ZIC_BLOAT_DEFAULT, "slim") == 0 ? -1 : 1; + { + static char const bloat_default[] = ZIC_BLOAT_DEFAULT; + + if (strcmp(bloat_default, "slim") == 0) + bloat = -1; + else if (strcmp(bloat_default, "fat") == 0) + bloat = 1; + else + abort(); /* Configuration error. */ + } if (directory == NULL) directory = "data"; if (tzdefault == NULL) tzdefault = TZDEFAULT; - if (yitcommand == NULL) - yitcommand = "yearistype"; if (optind < argc && leapsec != NULL) { @@ -838,11 +831,11 @@ main(int argc, char **argv) for (i = 0; i < nlinks; ++i) { eat(links[i].l_filename, links[i].l_linenum); - dolink(links[i].l_from, links[i].l_to, false); + dolink(links[i].l_target, links[i].l_linkname, false); if (noise) for (j = 0; j < nlinks; ++j) - if (strcmp(links[i].l_to, - links[j].l_from) == 0) + if (strcmp(links[i].l_linkname, + links[j].l_target) == 0) warning(_("link to link")); } if (lcltime != NULL) @@ -953,7 +946,7 @@ namecheck(const char *name) */ #ifdef HAVE_SYMLINK static char * -relname(char const *from, char const *to) +relname(char const *target, char const *linkname) { size_t i, taillen, @@ -961,26 +954,26 @@ relname(char const *from, char const *to) size_t dir_len = 0, dotdots = 0, linksize = SIZE_MAX; - char const *f = from; + char const *f = target; char *result = NULL; - if (*to == '/') + if (*linkname == '/') { /* Make F absolute too. */ size_t len = strlen(directory); bool needslash = len && directory[len - 1] != '/'; - linksize = len + needslash + strlen(from) + 1; + linksize = len + needslash + strlen(target) + 1; f = result = emalloc(linksize); strcpy(result, directory); result[len] = '/'; - strcpy(result + len + needslash, from); + strcpy(result + len + needslash, target); } - for (i = 0; f[i] && f[i] == to[i]; i++) + for (i = 0; f[i] && f[i] == linkname[i]; i++) if (f[i] == '/') dir_len = i + 1; - for (; to[i]; i++) - dotdots += to[i] == '/' && to[i - 1] != '/'; + for (; linkname[i]; i++) + dotdots += linkname[i] == '/' && linkname[i - 1] != '/'; taillen = strlen(f + dir_len); dotdotetcsize = 3 * dotdots + taillen + 1; if (dotdotetcsize <= linksize) @@ -998,62 +991,65 @@ relname(char const *from, char const *to) /* Hard link FROM to TO, following any symbolic links. Return 0 if successful, an error number otherwise. */ static int -hardlinkerr(char const *from, char const *to) +hardlinkerr(char const *target, char const *linkname) { - int r = linkat(AT_FDCWD, from, AT_FDCWD, to, AT_SYMLINK_FOLLOW); + int r = linkat(AT_FDCWD, target, AT_FDCWD, linkname, AT_SYMLINK_FOLLOW); return r == 0 ? 0 : errno; } static void -dolink(char const *fromfield, char const *tofield, bool staysymlink) +dolink(char const *target, char const *linkname, bool staysymlink) { - bool todirs_made = false; + bool remove_only = strcmp(target, "-") == 0; + bool linkdirs_made = false; int link_errno; /* * We get to be careful here since there's a fair chance of root running * us. */ - if (itsdir(fromfield)) + if (!remove_only && itsdir(target)) { - fprintf(stderr, _("%s: link from %s/%s failed: %s\n"), - progname, directory, fromfield, strerror(EPERM)); + fprintf(stderr, _("%s: linking target %s/%s failed: %s\n"), + progname, directory, target, strerror(EPERM)); exit(EXIT_FAILURE); } if (staysymlink) - staysymlink = itssymlink(tofield); - if (remove(tofield) == 0) - todirs_made = true; + staysymlink = itssymlink(linkname); + if (remove(linkname) == 0) + linkdirs_made = true; else if (errno != ENOENT) { char const *e = strerror(errno); fprintf(stderr, _("%s: Can't remove %s/%s: %s\n"), - progname, directory, tofield, e); + progname, directory, linkname, e); exit(EXIT_FAILURE); } - link_errno = staysymlink ? ENOTSUP : hardlinkerr(fromfield, tofield); - if (link_errno == ENOENT && !todirs_made) + if (remove_only) + return; + link_errno = staysymlink ? ENOTSUP : hardlinkerr(target, linkname); + if (link_errno == ENOENT && !linkdirs_made) { - mkdirs(tofield, true); - todirs_made = true; - link_errno = hardlinkerr(fromfield, tofield); + mkdirs(linkname, true); + linkdirs_made = true; + link_errno = hardlinkerr(target, linkname); } if (link_errno != 0) { #ifdef HAVE_SYMLINK - bool absolute = *fromfield == '/'; - char *linkalloc = absolute ? NULL : relname(fromfield, tofield); - char const *contents = absolute ? fromfield : linkalloc; - int symlink_errno = symlink(contents, tofield) == 0 ? 0 : errno; + bool absolute = *target == '/'; + char *linkalloc = absolute ? NULL : relname(target, linkname); + char const *contents = absolute ? target : linkalloc; + int symlink_errno = symlink(contents, linkname) == 0 ? 0 : errno; - if (!todirs_made + if (!linkdirs_made && (symlink_errno == ENOENT || symlink_errno == ENOTSUP)) { - mkdirs(tofield, true); + mkdirs(linkname, true); if (symlink_errno == ENOENT) - symlink_errno = symlink(contents, tofield) == 0 ? 0 : errno; + symlink_errno = symlink(contents, linkname) == 0 ? 0 : errno; } free(linkalloc); if (symlink_errno == 0) @@ -1069,28 +1065,28 @@ dolink(char const *fromfield, char const *tofield, bool staysymlink) *tp; int c; - fp = fopen(fromfield, "rb"); + fp = fopen(target, "rb"); if (!fp) { char const *e = strerror(errno); fprintf(stderr, _("%s: Can't read %s/%s: %s\n"), - progname, directory, fromfield, e); + progname, directory, target, e); exit(EXIT_FAILURE); } - tp = fopen(tofield, "wb"); + tp = fopen(linkname, "wb"); if (!tp) { char const *e = strerror(errno); fprintf(stderr, _("%s: Can't create %s/%s: %s\n"), - progname, directory, tofield, e); + progname, directory, linkname, e); exit(EXIT_FAILURE); } while ((c = getc(fp)) != EOF) putc(c, tp); - close_file(fp, directory, fromfield); - close_file(tp, directory, tofield); + close_file(fp, directory, target); + close_file(tp, directory, linkname); if (link_errno != ENOTSUP) warning(_("copy used because hard link failed: %s"), strerror(link_errno)); @@ -1806,17 +1802,17 @@ inlink(char **fields, int nfields) error(_("wrong number of fields on Link line")); return; } - if (*fields[LF_FROM] == '\0') + if (*fields[LF_TARGET] == '\0') { - error(_("blank FROM field on Link line")); + error(_("blank TARGET field on Link line")); return; } - if (!namecheck(fields[LF_TO])) + if (!namecheck(fields[LF_LINKNAME])) return; l.l_filename = filename; l.l_linenum = linenum; - l.l_from = ecpyalloc(fields[LF_FROM]); - l.l_to = ecpyalloc(fields[LF_TO]); + l.l_target = ecpyalloc(fields[LF_TARGET]); + l.l_linkname = ecpyalloc(fields[LF_LINKNAME]); links = growalloc(links, sizeof *links, nlinks, &nlinks_alloc); links[nlinks++] = l; } @@ -1932,18 +1928,11 @@ rulesub(struct rule *rp, const char *loyearp, const char *hiyearp, error(_("starting year greater than ending year")); return; } - if (*typep == '\0') - rp->r_yrtype = NULL; - else + if (*typep != '\0') { - if (rp->r_loyear == rp->r_hiyear) - { - error(_("typed single year")); - return; - } - warning(_("year type \"%s\" is obsolete; use \"-\" instead"), - typep); - rp->r_yrtype = ecpyalloc(typep); + error(_("year type \"%s\" is unsupported; use \"-\" instead"), + typep); + return; } /* @@ -2848,8 +2837,6 @@ stringzone(char *result, struct zone const *zpfirst, ptrdiff_t zonecount) rp = &zp->z_rules[i]; if (rp->r_hiwasnum || rp->r_hiyear != ZIC_MAX) continue; - if (rp->r_yrtype != NULL) - continue; if (!rp->r_isdst) { if (stdrp == NULL) @@ -3145,7 +3132,8 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) /* * Mark which rules to do in the current year. For those to - * do, calculate rpytime(rp, year); + * do, calculate rpytime(rp, year); The former TYPE field was + * also considered here. */ for (j = 0; j < zp->z_nrules; ++j) { @@ -3153,8 +3141,7 @@ outzone(const struct zone *zpfirst, ptrdiff_t zonecount) eats(zp->z_filename, zp->z_linenum, rp->r_filename, rp->r_linenum); rp->r_todo = year >= rp->r_loyear && - year <= rp->r_hiyear && - yearistype(year, rp->r_yrtype); + year <= rp->r_hiyear; if (rp->r_todo) { rp->r_temp = rpytime(rp, year); @@ -3474,54 +3461,6 @@ adjleap(void) } } -static char * -shellquote(char *b, char const *s) -{ - *b++ = '\''; - while (*s) - { - if (*s == '\'') - *b++ = '\'', *b++ = '\\', *b++ = '\''; - *b++ = *s++; - } - *b++ = '\''; - return b; -} - -static bool -yearistype(zic_t year, const char *type) -{ - char *buf; - char *b; - int result; - - if (type == NULL || *type == '\0') - return true; - buf = emalloc(1 + 4 * strlen(yitcommand) + 2 - + INT_STRLEN_MAXIMUM(zic_t) + 2 + 4 * strlen(type) + 2); - b = shellquote(buf, yitcommand); - *b++ = ' '; - b += sprintf(b, INT64_FORMAT, year); - *b++ = ' '; - b = shellquote(b, type); - *b = '\0'; - result = system(buf); - if (WIFEXITED(result)) - { - int status = WEXITSTATUS(result); - - if (status <= 1) - { - free(buf); - return status == 0; - } - } - error(_("Wild result from command execution")); - fprintf(stderr, _("%s: command was '%s', result was %d\n"), - progname, buf, result); - exit(EXIT_FAILURE); -} - /* Is A a space character in the C locale? */ static bool is_space(char a) diff --git a/src/tools/PerfectHash.pm b/src/tools/PerfectHash.pm index 74fb1f2ef628a..964f79b71a27e 100644 --- a/src/tools/PerfectHash.pm +++ b/src/tools/PerfectHash.pm @@ -81,13 +81,13 @@ sub generate_hash_function # to calculate via shift-and-add, so don't change them without care. # (Commonly, random seeds are tried, but we want reproducible results # from this program so we don't do that.) - my $hash_mult1 = 31; + my $hash_mult1 = 257; my $hash_mult2; my $hash_seed1; my $hash_seed2; my @subresult; FIND_PARAMS: - foreach (127, 257, 521, 1033, 2053) + foreach (17, 31, 127, 8191) { $hash_mult2 = $_; # "foreach $hash_mult2" doesn't work for ($hash_seed1 = 0; $hash_seed1 < 10; $hash_seed1++) @@ -121,13 +121,16 @@ sub generate_hash_function { $f .= sprintf "%s(const void *key, size_t keylen)\n{\n", $funcname; } - $f .= sprintf "\tstatic const %s h[%d] = {\n", $elemtype, $nhash; + $f .= sprintf "\tstatic const %s h[%d] = {\n\t\t", $elemtype, $nhash; for (my $i = 0; $i < $nhash; $i++) { - $f .= sprintf "%s%6d,%s", - ($i % 8 == 0 ? "\t\t" : " "), - $hashtab[$i], - ($i % 8 == 7 ? "\n" : ""); + # Hash element. + $f .= sprintf "%d", $hashtab[$i]; + next if ($i == $nhash - 1); + + # Optional indentation and newline, with eight items per line. + $f .= sprintf ",%s", + ($i % 8 == 7 ? "\n\t\t" : ' ' x (6 - length($hashtab[$i]))); } $f .= sprintf "\n" if ($nhash % 8 != 0); $f .= sprintf "\t};\n\n"; diff --git a/src/tools/RELEASE_CHANGES b/src/tools/RELEASE_CHANGES index 6ba9121e303a8..5206640341d59 100644 --- a/src/tools/RELEASE_CHANGES +++ b/src/tools/RELEASE_CHANGES @@ -73,7 +73,7 @@ but there may be reasons to do them at other times as well. to lower numbers, using renumber_oids.pl (see notes in bki.sgml) * Update config.guess and config.sub - (from http://savannah.gnu.org/projects/config) + (from https://savannah.gnu.org/projects/config) * Update inet/cidr data types with newest Bind patches diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm index b6d0cfd39b492..ea3af4877754f 100644 --- a/src/tools/msvc/Install.pm +++ b/src/tools/msvc/Install.pm @@ -369,7 +369,7 @@ sub GenerateTimezoneFiles print "Generating timezone files..."; my @args = ( - "$conf/zic/zic", '-d', "$target/share/timezone", '-b', 'slim'); + "$conf/zic/zic", '-d', "$target/share/timezone"); foreach (@tzfiles) { my $tzfile = $_; diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index 89e1b39036568..90594bd41bac9 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -103,7 +103,7 @@ sub mkvcbuild pg_strong_random.c pgcheckdir.c pgmkdirp.c pgsleep.c pgstrcasecmp.c pqsignal.c mkdtemp.c qsort.c qsort_arg.c quotes.c system.c strerror.c tar.c thread.c - win32env.c win32error.c win32security.c win32setlocale.c); + win32env.c win32error.c win32security.c win32setlocale.c win32stat.c); push(@pgportfiles, 'strtof.c') if ($vsVersion < '14.00'); diff --git a/src/tools/pgindent/README b/src/tools/pgindent/README index 8eb15fafb996e..d36f5088279c4 100644 --- a/src/tools/pgindent/README +++ b/src/tools/pgindent/README @@ -101,6 +101,10 @@ the comment block with some dashes: Odd spacing around typedef names might indicate an incomplete typedefs list. +pgindent will mangle both declaration and definition of a C function whose +name matches a typedef. Currently the best workaround is to choose +non-conflicting names. + pgindent can get confused by #if sequences that look correct to the compiler but have mismatched braces/parentheses when considered as a whole. Usually that looks pretty unreadable to humans too, so best practice is to rearrange @@ -138,25 +142,11 @@ Which files are processed The pgindent run processes (nearly) all PostgreSQL *.c and *.h files, but we currently exclude *.y and *.l files, as well as *.c and *.h files derived from *.y and *.l files. Additional exceptions are listed -in exclude_file_patterns: - -src/include/storage/s_lock.h and src/include/port/atomics/ are excluded -because they contain assembly code that pgindent tends to mess up. - -src/backend/utils/fmgrtab.c is excluded because it confuses pgindent -and it's a derived file anyway. - -src/interfaces/ecpg/test/expected/ is excluded to avoid breaking the ecpg -regression tests, since what ecpg generates is not necessarily formatted -as pgindent would do it. (Note that we do not exclude ecpg's header files -from the run; some of them get copied verbatim into ecpg's output, meaning -that the expected files may need to be updated to match.) - -src/include/snowball/libstemmer/ and src/backend/snowball/libstemmer/ -are excluded because those files are imported from an external project, -not maintained locally, and are machine-generated anyway. Likewise for -plperl/ppport.h. +in exclude_file_patterns; see the notes therein for rationale. +Note that we do not exclude ecpg's header files from the run. Some of them +get copied verbatim into ecpg's output, meaning that ecpg's expected files +may need to be updated to match. The perltidy run processes all *.pl and *.pm files, plus a few executable Perl scripts that are not named that way. See the "find" diff --git a/src/tools/pgindent/exclude_file_patterns b/src/tools/pgindent/exclude_file_patterns index c8efc9a913109..f08180b0d0892 100644 --- a/src/tools/pgindent/exclude_file_patterns +++ b/src/tools/pgindent/exclude_file_patterns @@ -1,10 +1,49 @@ -#list of file patterns to exclude from pgindent runs, see notes in README -/storage/s_lock\.h$ -/port/atomics/ -/utils/fmgrtab\.c$ -/ecpg/test/expected/ +# List of filename patterns to exclude from pgindent runs +# +# These contain assembly code that pgindent tends to mess up. +src/include/storage/s_lock\.h$ +src/include/port/atomics/ +# +# This contains C++ constructs that confuse pgindent. +src/include/jit/llvmjit\.h$ +# +# This confuses pgindent, and it's a derived file anyway. +src/backend/utils/fmgrtab\.c$ +# +# pgindent might mangle entries in this that match typedef names. +# Since it's a derived file anyway, just exclude it. +src/backend/utils/fmgrprotos\.h$ +# +# kwlist_d files are made by gen_keywordlist.pl. While we could insist that +# they match pgindent style, they'd look worse not better, so exclude them. +kwlist_d\.h$ +# +# These are generated by the scripts from src/common/unicode/. They use +# hash functions generated by PerfectHash.pm whose format looks worse with +# pgindent. +src/include/common/unicode_norm_hashfunc\.h$ +src/include/common/unicode_normprops_table\.h$ +# +# Exclude ecpg test files to avoid breaking the ecpg regression tests +# (but include files at the top level of the ecpg/test/ directory). +src/interfaces/ecpg/test/.*/ +# +# src/include/snowball/libstemmer/ and src/backend/snowball/libstemmer/ +# are excluded because those files are imported from an external project, +# rather than maintained locally, and they are machine-generated anyway. /snowball/libstemmer/ -/pl/plperl/ppport\.h$ -/jit/llvmjit\.h$ +# +# These files are machine-generated by code not under our control, +# so we shouldn't expect them to conform to our style. +# (Some versions of dtrace build probes.h files that confuse pgindent, too.) +src/backend/utils/probes\.h$ +src/include/pg_config\.h$ +src/pl/plperl/ppport\.h$ +src/pl/plperl/SPI\.c$ +src/pl/plperl/Util\.c$ +# +# Exclude any temporary installations that may be in the tree. /tmp_check/ /tmp_install/ +# ... and for paranoia's sake, don't touch git stuff. +/\.git/ diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent index 457e328824844..4124d27dea669 100755 --- a/src/tools/pgindent/pgindent +++ b/src/tools/pgindent/pgindent @@ -159,6 +159,7 @@ sub process_exclude while (my $line = <$eh>) { chomp $line; + next if $line =~ m/^#/; my $rgx = qr!$line!; @files = grep { $_ !~ /$rgx/ } @files if $rgx; } diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index f151e13d7f1c1..ff853634bc521 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -1020,6 +1020,7 @@ HbaToken HeadlineJsonState HeadlineParsedText HeadlineWordEntry +HeapCheckContext HeapScanDesc HeapTuple HeapTupleData @@ -1832,7 +1833,9 @@ PgStat_MsgFuncstat PgStat_MsgHdr PgStat_MsgInquiry PgStat_MsgRecoveryConflict +PgStat_MsgReplSlot PgStat_MsgResetcounter +PgStat_MsgResetreplslotcounter PgStat_MsgResetsharedcounter PgStat_MsgResetsinglecounter PgStat_MsgResetslrucounter @@ -1841,6 +1844,8 @@ PgStat_MsgTabpurge PgStat_MsgTabstat PgStat_MsgTempFile PgStat_MsgVacuum +PgStat_MsgWal +PgStat_ReplSlotStats PgStat_SLRUStats PgStat_Shared_Reset_Target PgStat_Single_Reset_Type @@ -1852,6 +1857,7 @@ PgStat_TableCounts PgStat_TableEntry PgStat_TableStatus PgStat_TableXactStatus +PgStat_WalStats PgXmlErrorContext PgXmlStrictness Pg_finfo_record @@ -2285,6 +2291,7 @@ SimpleStringList SimpleStringListCell SingleBoundSortItem Size +SkipPages SlabBlock SlabChunk SlabContext @@ -2295,12 +2302,12 @@ SlotNumber SlruCtl SlruCtlData SlruErrorCause -SlruFlush -SlruFlushData SlruPageStatus SlruScanCallback SlruShared SlruSharedData +SlruWriteAll +SlruWriteAllData SnapBuild SnapBuildOnDisk SnapBuildState @@ -2404,6 +2411,7 @@ Syn SyncOps SyncRepConfigData SyncRepStandbyData +SyncRequestHandler SyncRequestType SysScanDesc SyscacheCallbackFunction @@ -2785,6 +2793,8 @@ XactCallback XactCallbackItem XactEvent XactLockTableWaitInfo +XidBoundsViolation +XidCommitStatus XidHorizonPrefetchState XidStatus XmlExpr @@ -3185,6 +3195,7 @@ pg_tz pg_tz_cache pg_tzenum pg_unicode_decomposition +pg_unicode_norminfo pg_unicode_normprops pg_utf_to_local_combined pg_uuid_t @@ -3578,3 +3589,4 @@ yyscan_t z_stream z_streamp zic_t +HeapTupleForceOption diff --git a/src/tutorial/complex.source b/src/tutorial/complex.source index 0355926701623..d849ec0d4b707 100644 --- a/src/tutorial/complex.source +++ b/src/tutorial/complex.source @@ -111,7 +111,7 @@ CREATE FUNCTION complex_add(complex, complex) LANGUAGE C IMMUTABLE STRICT; -- we can now define the operator. We show a binary operator here but you --- can also define unary operators by omitting either of leftarg or rightarg. +-- can also define a prefix operator by omitting the leftarg. CREATE OPERATOR + ( leftarg = complex, rightarg = complex, diff --git a/src/tutorial/syscat.source b/src/tutorial/syscat.source index 3a1767f97be74..8a04d6a961f28 100644 --- a/src/tutorial/syscat.source +++ b/src/tutorial/syscat.source @@ -96,36 +96,22 @@ SELECT n.nspname, r.rolname, format_type(t.oid, null) as typname -- --- lists all left unary operators +-- lists all prefix operators -- -SELECT n.nspname, o.oprname AS left_unary, +SELECT n.nspname, o.oprname AS prefix_op, format_type(right_type.oid, null) AS operand, format_type(result.oid, null) AS return_type FROM pg_namespace n, pg_operator o, pg_type right_type, pg_type result WHERE o.oprnamespace = n.oid - and o.oprkind = 'l' -- left unary + and o.oprkind = 'l' -- prefix ("left unary") and o.oprright = right_type.oid and o.oprresult = result.oid ORDER BY nspname, operand; -- --- lists all right unary operators --- -SELECT n.nspname, o.oprname AS right_unary, - format_type(left_type.oid, null) AS operand, - format_type(result.oid, null) AS return_type - FROM pg_namespace n, pg_operator o, - pg_type left_type, pg_type result - WHERE o.oprnamespace = n.oid - and o.oprkind = 'r' -- right unary - and o.oprleft = left_type.oid - and o.oprresult = result.oid - ORDER BY nspname, operand; - --- --- lists all binary operators +-- lists all infix operators -- SELECT n.nspname, o.oprname AS binary_op, format_type(left_type.oid, null) AS left_opr, @@ -134,7 +120,7 @@ SELECT n.nspname, o.oprname AS binary_op, FROM pg_namespace n, pg_operator o, pg_type left_type, pg_type right_type, pg_type result WHERE o.oprnamespace = n.oid - and o.oprkind = 'b' -- binary + and o.oprkind = 'b' -- infix ("binary") and o.oprleft = left_type.oid and o.oprright = right_type.oid and o.oprresult = result.oid