diff --git a/.circleci/config.yml b/.circleci/config.yml
deleted file mode 100644
index 5a52a52..0000000
--- a/.circleci/config.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-version: 2
-
-workflows:
- version: 2
- test:
- jobs:
- - test-10
- - test-11
- - test-12
-
-jobs:
- test-10: &test-template
- working_directory: ~/postgres_dba
- docker:
- - image: postgres:10
- environment:
- - POSTGRES_VERSION: 10
- steps:
- - checkout
- - run:
- name: Init Postgres cluster
- command: |
- pg_createcluster $POSTGRES_VERSION main
- echo 'local all all trust' > /etc/postgresql/$POSTGRES_VERSION/main/pg_hba.conf
- echo "shared_preload_libraries='pg_stat_statements'" >> /etc/postgresql/$POSTGRES_VERSION/main/postgresql.conf
- pg_ctlcluster $POSTGRES_VERSION main start
- - run:
- name: Prepare DB
- command: |
- psql -U postgres -c 'create database test'
- psql -U postgres test -c 'create extension pg_stat_statements'
- psql -U postgres test -c 'create extension pgstattuple'
- psql -U postgres test -c "create table align1 as select 1::int4, 2::int8, 3::int4 as more from generate_series(1, 100000) _(i);"
- psql -U postgres test -c "create table align2 as select 1::int4, 3::int4 as more, 2::int8 from generate_series(1, 100000) _(i);"
- - run:
- name: Tests
- command: |
- echo "\set postgres_dba_wide true" > ~/.psqlrc
- echo "\set postgres_dba_interactive_mode false" >> ~/.psqlrc
- for f in ~/postgres_dba/sql/*; do psql -U postgres test -f ~/postgres_dba/warmup.psql -f "$f">/dev/null; done
- echo "\set postgres_dba_wide false" > ~/.psqlrc
- echo "\set postgres_dba_interactive_mode false" >> ~/.psqlrc
- for f in ~/postgres_dba/sql/*; do psql -U postgres test -f ~/postgres_dba/warmup.psql -f "$f">/dev/null; done
- diff -b test/regression/0_node.out <(psql -U postgres test -f warmup.psql -f ~/postgres_dba/sql/0_node.sql | grep Role)
- diff -b test/regression/p1_alignment_padding.out <(psql -U postgres test -f warmup.psql -f ~/postgres_dba/sql/p1_alignment_padding.sql | grep align)
- diff -b test/regression/a1_activity.out <(psql -U postgres test -f warmup.psql -f ~/postgres_dba/sql/a1_activity.sql | grep User)
- test-11:
- <<: *test-template
- docker:
- - image: postgres:11
- environment:
- - POSTGRES_VERSION: 11
- test-12:
- <<: *test-template
- docker:
- - image: postgres:12
- environment:
- - POSTGRES_VERSION: 12
diff --git a/.cursor/rules/sql-style.mdc b/.cursor/rules/sql-style.mdc
new file mode 100644
index 0000000..03f67dc
--- /dev/null
+++ b/.cursor/rules/sql-style.mdc
@@ -0,0 +1,136 @@
+---
+globs: *.sql,*.psql
+description: SQL style guide rules for PostgreSQL development
+---
+
+# SQL style guide
+
+Source: [postgres.ai SQL style guide](https://postgres.ai/rules/sql-style)
+
+## Core philosophy
+
+From PEP8:
+
+* Consistency with this style guide is important
+* Consistency within a project is more important
+* Consistency within one module or function is the most important
+* However, know when to be inconsistent -- sometimes style guide recommendations just aren't applicable
+
+## Core rules
+
+* **Use lowercase SQL keywords** (not uppercase)
+* Use `snake_case` for all identifiers (no CamelCase)
+* Names must begin with a letter and may not end in underscore
+* Only use letters, numbers, and underscores in names
+* Be explicit: always use `AS` for aliases, specify JOIN types
+* Root keywords on their own line (except with single argument)
+* Multi-line arguments must be indented relative to root keyword
+* Use **ISO 8601 date format**: `yyyy-mm-ddThh:mm:ss.sssss`
+* Foreign key naming: `user_id` to reference `users` table (singular + \_id)
+* Use meaningful aliases that reflect the data (not just single letters)
+
+## Formatting
+
+### Keywords and alignment
+
+```sql
+-- Root keywords left-aligned
+-- Arguments indented relative to root keyword
+select
+ client_id,
+ submission_date
+from main_summary
+where
+ sample_id = '42'
+ and submission_date > '20180101'
+limit 10;
+```
+
+### Comments
+
+```sql
+/* Block comments for multi-line descriptions */
+-- Line comments for single line notes
+select
+ client_id, -- user identifier
+ submission_date
+from main_summary;
+```
+
+### Parentheses
+
+```sql
+-- Opening paren ends the line
+-- Closing paren aligns with starting line
+-- Contents indented
+with sample as (
+ select
+ client_id,
+ submission_date
+ from main_summary
+ where sample_id = '42'
+)
+```
+
+### Boolean operators
+
+```sql
+-- AND/OR at beginning of line
+where
+ submission_date > '20180101'
+ and sample_id = '42'
+```
+
+## Table design rules
+
+* Always add `id` column of type `identity generated always`
+* Always add table comments using `comment on table...`
+* Default to `public` schema
+* Include schema in queries for clarity
+* Use singular table names with `_id` suffix for foreign keys
+
+## Best practices
+
+* Use CTEs instead of nested queries
+* Explicit column names in GROUP BY (except for expressions - see below)
+* Functions treated as identifiers: `date_trunc()` not `DATE_TRUNC()`
+* One argument per line for multi-argument clauses
+* Use meaningful aliases that reflect the data being selected
+
+### GROUP BY exception
+
+```sql
+-- Acceptable: use numbers to avoid repeating complex expressions
+select
+ date_trunc('minute', xact_start) as xact_start_minute,
+ count(*)
+from pg_stat_activity
+group by 1
+order by 1;
+```
+
+## Examples
+
+### Good
+
+```sql
+select
+ t.client_id as client_id,
+ date(t.created_at) as day
+from telemetry as t
+inner join users as u
+ on t.user_id = u.id
+where
+ t.submission_date > '2019-07-01'
+ and t.sample_id = '10'
+group by t.client_id, day;
+```
+
+### Bad
+
+```sql
+SELECT t.client_id, DATE(t.created_at) day
+FROM telemetry t, users u
+WHERE t.user_id = u.id AND t.submission_date > '2019-07-01'
+GROUP BY 1, 2;
+```
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..0ac7c29
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,149 @@
+name: Test PostgreSQL Versions
+
+on:
+ push:
+ branches: [ master, main ]
+ pull_request:
+ branches: [ master, main ]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+
+ strategy:
+ matrix:
+ postgres-version: ['13', '14', '15', '16', '17', '18']
+ fail-fast: false
+
+ services:
+ postgres:
+ image: postgres:${{ matrix.postgres-version }}
+ env:
+ POSTGRES_PASSWORD: postgres
+ POSTGRES_DB: test
+ POSTGRES_HOST_AUTH_METHOD: trust
+ POSTGRES_INITDB_ARGS: --auth-host=trust --auth-local=trust
+ options: >-
+ --health-cmd pg_isready
+ --health-interval 10s
+ --health-timeout 5s
+ --health-retries 5
+ ports:
+ - 5432:5432
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Install PostgreSQL client
+ run: |
+ # Install default PostgreSQL client (works for all versions)
+ sudo apt-get update
+ sudo apt-get install -y postgresql-client
+
+ # Verify installation
+ psql --version
+
+ - name: Prepare test database
+ run: |
+ # Wait for PostgreSQL to be ready
+ until pg_isready -h localhost -p 5432 -U postgres; do
+ echo "Waiting for postgres..."
+ sleep 2
+ done
+
+ # Check PostgreSQL version
+ psql -h localhost -U postgres -d test -c 'SELECT version();'
+
+ # Create extensions (pg_stat_statements may not work without shared_preload_libraries)
+ psql -h localhost -U postgres -d test -c 'CREATE EXTENSION IF NOT EXISTS pg_stat_statements;' || echo "Warning: pg_stat_statements extension not available"
+ psql -h localhost -U postgres -d test -c 'CREATE EXTENSION IF NOT EXISTS pgstattuple;'
+
+ # Create minimal privilege user for testing
+ psql -h localhost -U postgres -d test -c "CREATE USER dba_user;"
+ psql -h localhost -U postgres -d test -c "GRANT pg_monitor TO dba_user;"
+ psql -h localhost -U postgres -d test -c "GRANT CONNECT ON DATABASE test TO dba_user;"
+ psql -h localhost -U postgres -d test -c "GRANT USAGE ON SCHEMA public TO dba_user;"
+
+ # Verify extensions
+ psql -h localhost -U postgres -d test -c 'SELECT extname FROM pg_extension ORDER BY extname;'
+
+ # Create test tables for alignment testing (as superuser)
+ psql -h localhost -U postgres -d test -c "CREATE TABLE align1 AS SELECT 1::int4, 2::int8, 3::int4 AS more FROM generate_series(1, 100000) _(i);"
+ psql -h localhost -U postgres -d test -c "CREATE TABLE align2 AS SELECT 1::int4, 3::int4 AS more, 2::int8 FROM generate_series(1, 100000) _(i);"
+
+ # Grant access to test tables for dba_user
+ psql -h localhost -U postgres -d test -c "GRANT SELECT ON ALL TABLES IN SCHEMA public TO dba_user;"
+
+ # Test connection as dba_user
+ psql -h localhost -U dba_user -d test -c 'SELECT current_user, session_user;'
+
+ - name: Test wide mode
+ run: |
+ echo "\set postgres_dba_wide true" > ~/.psqlrc
+ echo "\set postgres_dba_interactive_mode false" >> ~/.psqlrc
+ echo "Testing all SQL files in wide mode with minimal privileges..."
+ for f in sql/*; do
+ echo " Testing $f..."
+ if ! psql -h localhost -U dba_user -d test --no-psqlrc -f warmup.psql -f "$f" > /dev/null 2>&1; then
+ echo "❌ FAILED: $f in wide mode"
+ echo "Error output:"
+ psql -h localhost -U dba_user -d test --no-psqlrc -f warmup.psql -f "$f"
+ exit 1
+ fi
+ done
+ echo "✅ All tests passed in wide mode"
+
+ - name: Test normal mode
+ run: |
+ echo "\set postgres_dba_wide false" > ~/.psqlrc
+ echo "\set postgres_dba_interactive_mode false" >> ~/.psqlrc
+ echo "Testing all SQL files in normal mode with minimal privileges..."
+ for f in sql/*; do
+ echo " Testing $f..."
+ if ! psql -h localhost -U dba_user -d test --no-psqlrc -f warmup.psql -f "$f" > /dev/null 2>&1; then
+ echo "❌ FAILED: $f in normal mode"
+ echo "Error output:"
+ psql -h localhost -U dba_user -d test --no-psqlrc -f warmup.psql -f "$f"
+ exit 1
+ fi
+ done
+ echo "✅ All tests passed in normal mode"
+
+ - name: Run regression tests
+ run: |
+ echo "\set postgres_dba_wide false" > ~/.psqlrc
+ echo "\set postgres_dba_interactive_mode false" >> ~/.psqlrc
+
+ echo "Running regression tests with minimal privileges..."
+
+ echo " Testing 0_node.sql..."
+ OUTPUT=$(psql -h localhost -U dba_user -d test --no-psqlrc -f warmup.psql -f sql/0_node.sql | grep Role)
+ if [[ "$OUTPUT" == *"Master"* ]]; then
+ echo " ✓ Role test passed"
+ else
+ echo " ✗ Role test failed: $OUTPUT"
+ exit 1
+ fi
+
+ echo " Testing p1_alignment_padding.sql..."
+ OUTPUT=$(psql -h localhost -U dba_user -d test --no-psqlrc -f warmup.psql -f sql/p1_alignment_padding.sql | grep align)
+ if [[ "$OUTPUT" == *"align1"* && "$OUTPUT" == *"align2"* && "$OUTPUT" == *"int4, more, int8"* ]]; then
+ echo " ✓ Alignment padding test passed"
+ else
+ echo " ✗ Alignment padding test failed: $OUTPUT"
+ exit 1
+ fi
+
+ echo " Testing a1_activity.sql..."
+ OUTPUT=$(psql -h localhost -U dba_user -d test --no-psqlrc -f warmup.psql -f sql/a1_activity.sql | grep User)
+ if [[ "$OUTPUT" == *"User"* ]]; then
+ echo " ✓ Activity test passed"
+ else
+ echo " ✗ Activity test failed: $OUTPUT"
+ exit 1
+ fi
+
+ echo "✅ All regression tests passed with minimal privileges"
+
+
diff --git a/.gitignore b/.gitignore
index 5ca0973..725c3f7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
.DS_Store
+.cursor/environment.json
diff --git a/README.md b/README.md
index 1cefe2c..57dfb2f 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,4 @@
-[](https://circleci.com/gh/NikolayS/postgres_dba)
# postgres_dba (PostgresDBA)
-
The missing set of useful tools for Postgres DBA and mere mortals.
:warning: If you have great ideas, feel free to create a pull request or open an issue.
@@ -8,7 +6,7 @@ The missing set of useful tools for Postgres DBA and mere mortals.
-:point_right: See also [postgres-checkup](https://gitlab.com/postgres-ai/postgres-checkup), a tool for automated health checks and SQL performance analysis.
+:point_right: See also [postgres_ai](https://github.com/postgres-ai/postgres_ai), a comprehensive monitoring and optimization platform that includes automated health checks, SQL performance analysis, and much more.
## Questions?
@@ -18,29 +16,82 @@ Questions? Ideas? Contact me: nik@postgres.ai, Nikolay Samokhvalov.
**postgres_dba** is based on useful queries created and improved by many developers. Here is incomplete list of them:
* Jehan-Guillaume (ioguix) de Rorthais https://github.com/ioguix/pgsql-bloat-estimation
- * Alexey Lesovsky, Alexey Ermakov, Maxim Boguk, Ilya Kosmodemiansky et al. from Data Egret (aka PostgreSQL-Consulting) https://github.com/dataegret/pg-utils
+ * Alexey Lesovsky, Alexey Ermakov, Maxim Boguk, Ilya Kosmodemiansky et al. https://github.com/dataegret/pg-utils
* Josh Berkus, Quinn Weaver et al. from PostgreSQL Experts, Inc. https://github.com/pgexperts/pgx_scripts
## Requirements
**You need to have psql version 10 or newer**, but the Postgres server itself can be older – most tools work with it.
-You can install postgresql-client library version, say, 12 on your machine and use it to work with Postgres server version 9.6 and older – in this case postgres_dba will work. But you do need to have psql from the latest (version 12) Postgres release.
+You can install the latest postgresql-client library on your machine and use it to work with older Postgres servers – in this case postgres_dba will work. It's recommended to use psql from PostgreSQL 18 (the latest release) for the best compatibility.
+
+### Installing on Ubuntu
On clean Ubuntu, this is how you can get postgresql-client and have the most recent psql:
-```
+```bash
sudo sh -c "echo \"deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main\" >> /etc/apt/sources.list.d/pgdg.list"
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
-sudo apt-get install -y postgresql-client-12
+sudo apt-get install -y postgresql-client-18
```
-Using alternative psql pager called "pspg" is highly recommended (but not required): https://github.com/okbob/pspg.
+### Installing on macOS
+
+On macOS, use Homebrew to install PostgreSQL client and pspg:
+
+```bash
+# Install PostgreSQL client (includes psql)
+brew install libpq
+
+# Add libpq to PATH (required because it's keg-only)
+echo 'export PATH="/opt/homebrew/opt/libpq/bin:$PATH"' >> ~/.zshrc
+source ~/.zshrc
+
+# For Intel Macs, use:
+# echo 'export PATH="/usr/local/opt/libpq/bin:$PATH"' >> ~/.zshrc
+
+# Verify installation
+psql --version
+
+# Install pspg (recommended pager)
+brew install pspg
+```
+
+Alternatively, you can install the full PostgreSQL package which includes psql:
+```bash
+brew install postgresql@18
+```
+
+### pspg - Enhanced psql Pager (Optional)
+
+Using alternative psql pager called "pspg" is highly recommended but optional: https://github.com/okbob/pspg.
+
+After installing pspg, configure it in your `~/.psqlrc`:
+```bash
+\setenv PAGER pspg
+\pset border 2
+\pset linestyle unicode
+```
+
+## Supported PostgreSQL Versions
+
+**postgres_dba** is tested and supports **PostgreSQL 13-18**, including the latest PostgreSQL 18 release.
+
+- ✅ **PostgreSQL 13** - Fully supported
+- ✅ **PostgreSQL 14** - Fully supported
+- ✅ **PostgreSQL 15** - Fully supported
+- ✅ **PostgreSQL 16** - Fully supported
+- ✅ **PostgreSQL 17** - Fully supported (includes `pg_stat_checkpointer` compatibility)
+- ✅ **PostgreSQL 18** - Fully supported (latest release)
+
+Older versions (9.6-12) may work but are not actively tested. Some reports may require specific PostgreSQL features introduced in newer versions.
## Installation
-The installation is trivial. Clone the repository and put "dba" alias to your `.psqlrc` file (note, that this will work only in bash, see https://github.com/NikolayS/postgres_dba/pull/45):
+The installation is trivial. Clone the repository and put "dba" alias to your `.psqlrc` file (works in bash, zsh, and csh):
```bash
git clone https://github.com/NikolayS/postgres_dba.git
-echo "\\set dba '\\\\i `pwd`/postgres_dba/start.psql'" >> ~/.psqlrc
+cd postgres_dba
+printf "%s %s %s %s\n" \\echo 🧐 🐘 'postgres_dba 18.0 installed. Use ":dba" to see menu' >> ~/.psqlrc
+printf "%s %s %s %s\n" \\set dba \'\\\\i $(pwd)/start.psql\' >> ~/.psqlrc
```
That's it.
@@ -82,6 +133,36 @@ And then:
:dba
```
+## Key Features
+
+### Secure Role Management
+
+**postgres_dba** includes interactive tools for secure role (user) management:
+
+- **r1** – Create user with random password (interactive)
+- **r2** – Alter user with random password (interactive)
+
+These tools help prevent password exposure in psql history, logs, and command-line process lists by:
+- Generating secure random 16-character passwords
+- Using interactive prompts instead of command-line arguments
+- Only displaying the password once at creation/alteration time
+
+**Usage example:**
+```sql
+-- In psql, after launching :dba
+-- Select option r1 to create a new user
+-- The script will prompt you for:
+-- - Username
+-- - Superuser privilege (yes/no)
+-- - Login privilege (yes/no)
+-- The generated password will be displayed once in the output
+
+-- To see the password, set client_min_messages to DEBUG first:
+set client_min_messages to DEBUG;
+```
+
+**Security note:** These are DBA tools designed for trusted environments where the user already has superuser privileges. The password is shown in the psql output, so ensure you're working in a secure session.
+
## How to Extend (Add More Queries)
You can add your own useful SQL queries and use them from the main menu. Just add your SQL code to `./sql` directory. The filename should start with some 1 or 2-letter code, followed by underscore and some additional arbitrary words. Extension should be `.sql`. Example:
```
diff --git a/init/generate.sh b/init/generate.sh
index c282764..91d7526 100755
--- a/init/generate.sh
+++ b/init/generate.sh
@@ -16,6 +16,10 @@ cat > "$WARMUP" <<- VersCheck
select 1/0;
\endif
+select current_setting('server_version_num')::integer >= 170000 as postgres_dba_pgvers_17plus \gset
+
+select current_setting('server_version_num')::integer >= 130000 as postgres_dba_pgvers_13plus \gset
+
select current_setting('server_version_num')::integer >= 100000 as postgres_dba_pgvers_10plus \gset
\if :postgres_dba_pgvers_10plus
\set postgres_dba_last_wal_receive_lsn pg_last_wal_receive_lsn
diff --git a/misc/generate_password.sql b/misc/generate_password.sql
index 49b2d82..53b435e 100644
--- a/misc/generate_password.sql
+++ b/misc/generate_password.sql
@@ -1,3 +1,7 @@
+-- WARNING: random() that is used here is not cryptographically strong –
+-- if an attacker knows one value, it's easy to guess the "next" value
+-- TODO: rework to use pgcrypto instead
+
with init(len, arr) as (
-- edit password length and possible characters here
select 16, string_to_array('123456789abcdefghjkmnpqrstuvwxyzABCDEFGHJKMNPQRSTUVWXYZ', null)
diff --git a/roles/alter_user_with_random_password.psql b/roles/alter_user_with_random_password.psql
index f951c87..2f7bf59 100644
--- a/roles/alter_user_with_random_password.psql
+++ b/roles/alter_user_with_random_password.psql
@@ -43,17 +43,17 @@ begin
j := int4(random() * allowed_len);
pwd := pwd || substr(allowed, j+1, 1);
end loop;
- sql := 'alter role ' || current_setting('postgres_dba.username')::text || ' password ''' || pwd || ''';';
+ sql := format('alter role %I password %L', current_setting('postgres_dba.username')::text, pwd);
raise debug 'SQL: %', sql;
execute sql;
- sql := 'alter role ' || current_setting('postgres_dba.username')::text
- || (case when lower(current_setting('postgres_dba.is_superuser')::text) not in ('0', '', 'no', 'false', 'n', 'f') then ' superuser' else '' end)
- || ';';
+ sql := format('alter role %I%s',
+ current_setting('postgres_dba.username')::text,
+ (case when lower(current_setting('postgres_dba.is_superuser')::text) not in ('0', '', 'no', 'false', 'n', 'f') then ' superuser' else '' end));
raise debug 'SQL: %', sql;
execute sql;
- sql := 'alter role ' || current_setting('postgres_dba.username')::text
- || (case when lower(current_setting('postgres_dba.login')::text) not in ('0', '', 'no', 'false', 'n', 'f') then ' login' else '' end)
- || ';';
+ sql := format('alter role %I%s',
+ current_setting('postgres_dba.username')::text,
+ (case when lower(current_setting('postgres_dba.login')::text) not in ('0', '', 'no', 'false', 'n', 'f') then ' login' else '' end));
raise debug 'SQL: %', sql;
execute sql;
raise debug 'User % altered, password: %', current_setting('postgres_dba.username')::text, pwd;
diff --git a/roles/create_user_with_random_password.psql b/roles/create_user_with_random_password.psql
index a5257a3..3d3f42a 100644
--- a/roles/create_user_with_random_password.psql
+++ b/roles/create_user_with_random_password.psql
@@ -43,10 +43,11 @@ begin
j := int4(random() * allowed_len);
pwd := pwd || substr(allowed, j+1, 1);
end loop;
- sql := 'create role ' || current_setting('postgres_dba.username')::text
- || (case when lower(current_setting('postgres_dba.is_superuser')::text) not in ('0', '', 'no', 'false', 'n', 'f') then ' superuser' else '' end)
- || (case when lower(current_setting('postgres_dba.login')::text) not in ('0', '', 'no', 'false', 'n', 'f') then ' login' else '' end)
- || ' password ''' || pwd || ''';';
+ sql := format('create role %I%s%s password %L',
+ current_setting('postgres_dba.username')::text,
+ (case when lower(current_setting('postgres_dba.is_superuser')::text) not in ('0', '', 'no', 'false', 'n', 'f') then ' superuser' else '' end),
+ (case when lower(current_setting('postgres_dba.login')::text) not in ('0', '', 'no', 'false', 'n', 'f') then ' login' else '' end),
+ pwd);
raise debug 'SQL: %', sql;
execute sql;
raise info 'User % created, password: %', current_setting('postgres_dba.username')::text, pwd;
diff --git a/sql/0_node.sql b/sql/0_node.sql
index 5f4d968..62d5f1d 100644
--- a/sql/0_node.sql
+++ b/sql/0_node.sql
@@ -1,4 +1,4 @@
---Node & Current DB Information: master/replica, lag, DB size, tmp files, etc
+--Node and current database information: primary/replica, lag, database size, temporary files, etc.
/*
For Postgres versions older than 10, run this first:
@@ -8,6 +8,7 @@ For Postgres versions older than 10, run this first:
\set postgres_dba_is_wal_replay_paused pg_is_xlog_replay_paused
*/
+
with data as (
select s.*
from pg_stat_database s
@@ -48,28 +49,41 @@ select 'Started At', pg_postmaster_start_time()::timestamptz(0)::text
union all
select 'Uptime', (now() - pg_postmaster_start_time())::interval(0)::text
union all
-select
- 'Checkpoints',
- (select (checkpoints_timed + checkpoints_req)::text from pg_stat_bgwriter)
+\if :postgres_dba_pgvers_17plus
+select 'Checkpoints', (select (num_timed + num_requested)::text from pg_stat_checkpointer)
union all
-select
- 'Forced Checkpoints',
- (
- select round(100.0 * checkpoints_req::numeric /
- (nullif(checkpoints_timed + checkpoints_req, 0)), 1)::text || '%'
- from pg_stat_bgwriter
- )
+select 'Forced Checkpoints', (
+ select round(100.0 * num_requested::numeric /
+ (nullif(num_timed + num_requested, 0)), 1)::text || '%'
+ from pg_stat_checkpointer
+)
union all
-select
- 'Checkpoint MB/sec',
- (
- select round((nullif(buffers_checkpoint::numeric, 0) /
- ((1024.0 * 1024 /
- (current_setting('block_size')::numeric))
- * extract('epoch' from now() - stats_reset)
- ))::numeric, 6)::text
- from pg_stat_bgwriter
- )
+select 'Checkpoint MiB/sec', (
+ select round((nullif(buffers_written::numeric, 0) /
+ ((1024.0 * 1024 /
+ (current_setting('block_size')::numeric))
+ * extract('epoch' from now() - stats_reset)
+ ))::numeric, 6)::text
+ from pg_stat_checkpointer
+)
+\else
+select 'Checkpoints', (select (checkpoints_timed + checkpoints_req)::text from pg_stat_bgwriter)
+union all
+select 'Forced Checkpoints', (
+ select round(100.0 * checkpoints_req::numeric /
+ (nullif(checkpoints_timed + checkpoints_req, 0)), 1)::text || '%'
+ from pg_stat_bgwriter
+)
+union all
+select 'Checkpoint MiB/sec', (
+ select round((nullif(buffers_checkpoint::numeric, 0) /
+ ((1024.0 * 1024 /
+ (current_setting('block_size')::numeric))
+ * extract('epoch' from now() - stats_reset)
+ ))::numeric, 6)::text
+ from pg_stat_bgwriter
+)
+\endif
union all
select repeat('-', 33), repeat('-', 88)
union all
diff --git a/sql/1_databases.sql b/sql/1_databases.sql
index a6fe8d9..778ad2f 100644
--- a/sql/1_databases.sql
+++ b/sql/1_databases.sql
@@ -1,4 +1,4 @@
---Databases: Size, Statistics
+--Databases: size, stats
with data as (
select
d.oid,
diff --git a/sql/2_table_sizes.sql b/sql/2_table_sizes.sql
index ba2a96d..bcafe23 100644
--- a/sql/2_table_sizes.sql
+++ b/sql/2_table_sizes.sql
@@ -1,4 +1,4 @@
---Table Sizes
+--Tables: table/index/TOAST size, number of rows
with data as (
select
diff --git a/sql/3_load_profiles.sql b/sql/3_load_profiles.sql
index 6ee7cf5..76b2a88 100644
--- a/sql/3_load_profiles.sql
+++ b/sql/3_load_profiles.sql
@@ -1,4 +1,4 @@
---Load Profile
+--Load profile
with data as (
select
diff --git a/sql/a1_activity.sql b/sql/a1_activity.sql
index df822e5..a52dca0 100644
--- a/sql/a1_activity.sql
+++ b/sql/a1_activity.sql
@@ -1,4 +1,4 @@
---Current Activity: count of current connections grouped by database, user name, state
+--Current activity: count of current connections grouped by database, username, state
select
coalesce(usename, '** ALL users **') as "User",
coalesce(datname, '** ALL databases **') as "DB",
diff --git a/sql/b1_table_estimation.sql b/sql/b1_table_estimation.sql
index 77f7ced..acee405 100644
--- a/sql/b1_table_estimation.sql
+++ b/sql/b1_table_estimation.sql
@@ -1,4 +1,4 @@
---Tables Bloat, rough estimation
+--Table bloat (estimated)
--This SQL is derived from https://github.com/ioguix/pgsql-bloat-estimation/blob/master/table/table_bloat.sql
diff --git a/sql/b2_btree_estimation.sql b/sql/b2_btree_estimation.sql
index 6da0e17..70e9fe2 100644
--- a/sql/b2_btree_estimation.sql
+++ b/sql/b2_btree_estimation.sql
@@ -1,4 +1,4 @@
---B-tree Indexes Bloat, rough estimation
+--B-tree index bloat (estimated)
-- enhanced version of https://github.com/ioguix/pgsql-bloat-estimation/blob/master/btree/btree_bloat.sql
diff --git a/sql/b3_table_pgstattuple.sql b/sql/b3_table_pgstattuple.sql
index 6b7530e..0b00841 100644
--- a/sql/b3_table_pgstattuple.sql
+++ b/sql/b3_table_pgstattuple.sql
@@ -1,4 +1,4 @@
---Tables Bloat, more precise (requires pgstattuple extension; expensive)
+--Table bloat (requires pgstattuple; expensive)
--https://github.com/dataegret/pg-utils/tree/master/sql
--pgstattuple extension required
diff --git a/sql/b4_btree_pgstattuple.sql b/sql/b4_btree_pgstattuple.sql
index 1cbf6cc..745a911 100644
--- a/sql/b4_btree_pgstattuple.sql
+++ b/sql/b4_btree_pgstattuple.sql
@@ -1,4 +1,4 @@
---B-tree Indexes Bloat, more precise (requires pgstattuple extension; expensive)
+--B-tree indexes bloat (requires pgstattuple; expensive)
--https://github.com/dataegret/pg-utils/tree/master/sql
--pgstattuple extension required
diff --git a/sql/b5_tables_no_stats.sql b/sql/b5_tables_no_stats.sql
index fd6e1a3..484cf13 100644
--- a/sql/b5_tables_no_stats.sql
+++ b/sql/b5_tables_no_stats.sql
@@ -1,4 +1,4 @@
---Tables and Columns Without Stats (so bloat cannot be estimated)
+--Tables and columns without stats (so bloat cannot be estimated)
--Created by PostgreSQL Experts https://github.com/pgexperts/pgx_scripts/blob/master/bloat/no_stats_table_check.sql
diff --git a/sql/e1_extensions.sql b/sql/e1_extensions.sql
index 576f91c..817fa0a 100644
--- a/sql/e1_extensions.sql
+++ b/sql/e1_extensions.sql
@@ -1,4 +1,4 @@
---List of extensions installed in the current DB
+--Extensions installed in current database
select
ae.name,
diff --git a/sql/i1_rare_indexes.sql b/sql/i1_rare_indexes.sql
index ccc49c2..492138d 100644
--- a/sql/i1_rare_indexes.sql
+++ b/sql/i1_rare_indexes.sql
@@ -1,4 +1,4 @@
---Unused/Rarely Used Indexes
+--Unused and rarely used indexes
--PostgreSQL Experts https://github.com/pgexperts/pgx_scripts/blob/master/indexes/unused_indexes.sql
diff --git a/sql/i2_redundant_indexes.sql b/sql/i2_redundant_indexes.sql
index 7f65fa1..a57c64f 100644
--- a/sql/i2_redundant_indexes.sql
+++ b/sql/i2_redundant_indexes.sql
@@ -1,4 +1,4 @@
---List of redundant indexes
+--Redundant indexes
-- Use it to see redundant indexes list
@@ -9,29 +9,60 @@
-- (Keep in mind, that on replicas, the whole picture of index usage
-- is usually very different from master).
-with index_data as (
+with fk_indexes as (
+ select
+ n.nspname as schema_name,
+ ci.relname as index_name,
+ cr.relname as table_name,
+ (confrelid::regclass)::text as fk_table_ref,
+ array_to_string(indclass, ', ') as opclasses
+ from pg_index i
+ join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
+ join pg_class cr on cr.oid = i.indrelid and cr.relkind = 'r'
+ join pg_namespace n on n.oid = ci.relnamespace
+ join pg_constraint cn on cn.conrelid = cr.oid
+ left join pg_stat_user_indexes si on si.indexrelid = i.indexrelid
+ where
+ contype = 'f'
+ and i.indisunique is false
+ and conkey is not null
+ and ci.relpages > 0 -- raise for a DB with a lot of indexes
+ and si.idx_scan < 10
+),
+-- Redundant indexes
+index_data as (
select
*,
- indkey::text as columns,
+ (select string_agg(lpad(i, 3, '0'), ' ') from unnest(string_to_array(indkey::text, ' ')) i) as columns,
array_to_string(indclass, ', ') as opclasses
- from pg_index
-), redundant as (
+ from pg_index i
+ join pg_class ci on ci.oid = i.indexrelid and ci.relkind = 'i'
+ where indisvalid = true and ci.relpages > 0 -- raise for a DD with a lot of indexes
+), redundant_indexes as (
select
+ i2.indexrelid as index_id,
tnsp.nspname AS schema_name,
trel.relname AS table_name,
+ pg_relation_size(trel.oid) as table_size_bytes,
irel.relname AS index_name,
am1.amname as access_method,
- format('redundant to index: %I', i1.indexrelid::regclass)::text as reason,
+ (i1.indexrelid::regclass)::text as reason,
+ i1.indexrelid as reason_index_id,
pg_get_indexdef(i1.indexrelid) main_index_def,
pg_size_pretty(pg_relation_size(i1.indexrelid)) main_index_size,
pg_get_indexdef(i2.indexrelid) index_def,
- pg_size_pretty(pg_relation_size(i2.indexrelid)) index_size,
- s.idx_scan as index_usage
+ pg_relation_size(i2.indexrelid) index_size_bytes,
+ s.idx_scan as index_usage,
+ quote_ident(tnsp.nspname) as formated_schema_name,
+ coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(irel.relname) as formated_index_name,
+ quote_ident(trel.relname) AS formated_table_name,
+ coalesce(nullif(quote_ident(tnsp.nspname), 'public') || '.', '') || quote_ident(trel.relname) as formated_relation_name,
+ i2.opclasses
from
index_data as i1
join index_data as i2 on (
- i1.indrelid = i2.indrelid /* same table */
- and i1.indexrelid <> i2.indexrelid /* NOT same index */
+ i1.indrelid = i2.indrelid -- same table
+ and i1.indexrelid <> i2.indexrelid -- NOT same index
)
inner join pg_opclass op1 on i1.indclass[0] = op1.oid
inner join pg_opclass op2 on i2.indclass[0] = op2.oid
@@ -42,23 +73,79 @@ with index_data as (
join pg_namespace as tnsp on trel.relnamespace = tnsp.oid
join pg_class as irel on irel.oid = i2.indexrelid
where
- not i1.indisprimary -- index 1 is not primary
+ not i2.indisprimary -- index 1 is not primary
and not ( -- skip if index1 is (primary or uniq) and is NOT (primary and uniq)
- (i1.indisprimary or i1.indisunique)
- and (not i2.indisprimary or not i2.indisunique)
- )
- and am1.amname = am2.amname -- same access type
- and (
- i2.columns like (i1.columns || '%') -- index 2 includes all columns from index 1
- or i1.columns = i2.columns -- index1 and index 2 includes same columns
- )
- and (
- i2.opclasses like (i1.opclasses || '%')
- or i1.opclasses = i2.opclasses
+ i2.indisunique and not i1.indisprimary
)
+ and am1.amname = am2.amname -- same access type
+ and i1.columns like (i2.columns || '%') -- index 2 includes all columns from index 1
+ and i1.opclasses like (i2.opclasses || '%')
-- index expressions is same
and pg_get_expr(i1.indexprs, i1.indrelid) is not distinct from pg_get_expr(i2.indexprs, i2.indrelid)
-- index predicates is same
and pg_get_expr(i1.indpred, i1.indrelid) is not distinct from pg_get_expr(i2.indpred, i2.indrelid)
+), redundant_indexes_fk as (
+ select
+ ri.*,
+ (
+ select count(1)
+ from fk_indexes fi
+ where
+ fi.fk_table_ref = ri.table_name
+ and fi.opclasses like (ri.opclasses || '%')
+ ) > 0 as supports_fk
+ from redundant_indexes ri
+),
+-- Cut recursive links
+redundant_indexes_tmp_num as (
+ select
+ row_number() over () num,
+ rig.*
+ from redundant_indexes_fk rig
+ order by index_id
+), redundant_indexes_tmp_cut as (
+ select
+ ri1.*,
+ ri2.num as r_num
+ from redundant_indexes_tmp_num ri1
+ left join redundant_indexes_tmp_num ri2 on ri2.reason_index_id = ri1.index_id and ri1.reason_index_id = ri2.index_id
+ where ri1.num < ri2.num or ri2.num is null
+), redundant_indexes_cut_grouped as (
+ select
+ distinct(num),
+ *
+ from redundant_indexes_tmp_cut
+ order by index_size_bytes desc
+), redundant_indexes_grouped as (
+ select
+ distinct(num),
+ *
+ from redundant_indexes_tmp_cut
+ order by index_size_bytes desc
)
-select * from redundant;
+select
+ schema_name,
+ table_name,
+ table_size_bytes,
+ index_name,
+ access_method,
+ string_agg(distinct reason, ', ') as redundant_to,
+ string_agg(main_index_def, ', ') as main_index_def,
+ string_agg(main_index_size, ', ') as main_index_size,
+ index_def,
+ index_size_bytes,
+ index_usage,
+ supports_fk
+from redundant_indexes_cut_grouped
+group by
+ index_id,
+ schema_name,
+ table_name,
+ table_size_bytes,
+ index_name,
+ access_method,
+ index_def,
+ index_size_bytes,
+ index_usage,
+ supports_fk
+order by index_size_bytes desc;
diff --git a/sql/i3_non_indexed_fks.sql b/sql/i3_non_indexed_fks.sql
index 13b7c35..1b448e6 100644
--- a/sql/i3_non_indexed_fks.sql
+++ b/sql/i3_non_indexed_fks.sql
@@ -1,4 +1,4 @@
---FKs with Missing/Bad Indexes
+--Foreign keys with missing or bad indexes
--Created by PostgreSQL Experts https://github.com/pgexperts/pgx_scripts/blob/master/indexes/fk_no_index.sql
diff --git a/sql/i4_invalid_indexes.sql b/sql/i4_invalid_indexes.sql
index 084a88f..8e68c48 100644
--- a/sql/i4_invalid_indexes.sql
+++ b/sql/i4_invalid_indexes.sql
@@ -1,4 +1,4 @@
---List of invalid indexes
+--Invalid indexes
-- Use it to see invalid indexes list
diff --git a/sql/i5_indexes_migration.sql b/sql/i5_indexes_migration.sql
index cda8141..96600b2 100644
--- a/sql/i5_indexes_migration.sql
+++ b/sql/i5_indexes_migration.sql
@@ -1,4 +1,4 @@
---Unused/Redundant Indexes Do & Undo Migration DDL
+--Cleanup unused and redundant indexes – DO & UNDO migration DDL
-- Use it to generate a database migration (e.g. RoR's db:migrate or Sqitch)
-- to drop unused and redundant indexes.
@@ -119,13 +119,13 @@ with unused as (
group by table_name, index_name
order by table_name, index_name
)
-select '-- Do migration: --' as run_in_separate_transactions
+select '-- DO migration: --' as run_in_separate_transactions
union all
select * from droplines
union all
select ''
union all
-select '-- Revert migration: --'
+select '-- UNDO migration: --'
union all
select * from createlines;
diff --git a/sql/l1_lock_trees.sql b/sql/l1_lock_trees.sql
index 1a5f2a2..cae0106 100644
--- a/sql/l1_lock_trees.sql
+++ b/sql/l1_lock_trees.sql
@@ -1,53 +1,39 @@
---Locks: analysis of "locking trees"
+--Lock trees (lightweight)
+
+-- Source: https://github.com/dataegret/pg-utils/blob/master/sql/locktree.sql
+-- The paths won't be precise but this query is very light and may be used quite frequently
--- Based on: https://gitlab.com/snippets/1890428
with recursive l as (
- select
- pid, locktype, granted,
- array_position(array['AccessShare','RowShare','RowExclusive','ShareUpdateExclusive','Share','ShareRowExclusive','Exclusive','AccessExclusive'], left(mode, -4)) m,
- row(locktype, database, relation, page, tuple, virtualxid, transactionid, classid, objid, objsubid) obj
- from pg_locks
+ select pid, locktype, granted,
+ array_position(array['accessshare','rowshare','rowexclusive','shareupdateexclusive','share','sharerowexclusive','exclusive','accessexclusive'], left(mode,-4)) m,
+ row(locktype,database,relation,page,tuple,virtualxid,transactionid,classid,objid,objsubid) obj from pg_locks
), pairs as (
select w.pid waiter, l.pid locker, l.obj, l.m
- from l w join l on l.obj is not distinct from w.obj and l.locktype = w.locktype and not l.pid = w.pid and l.granted
- where not w.granted
- and not exists (select from l i where i.pid=l.pid and i.locktype = l.locktype and i.obj is not distinct from l.obj and i.m > l.m)
+ from l w join l on l.obj is not distinct from w.obj and l.locktype=w.locktype and not l.pid=w.pid and l.granted
+ where not w.granted
+ and not exists ( select from l i where i.pid=l.pid and i.locktype=l.locktype and i.obj is not distinct from l.obj and i.m > l.m )
), leads as (
- select o.locker, 1::int lvl, count(*) q, array[locker] track, false as cycle
- from pairs o
- group by o.locker
+ select o.locker, 1::int lvl, count(*) q, array[locker] track, false as cycle from pairs o group by o.locker
union all
- select i.locker, leads.lvl + 1, (select count(*) from pairs q where q.locker = i.locker), leads.track || i.locker, i.locker = any(leads.track)
- from pairs i, leads
- where i.waiter=leads.locker and not cycle
+ select i.locker, leads.lvl+1, (select count(*) from pairs q where q.locker=i.locker), leads.track||i.locker, i.locker=any(leads.track)
+ from pairs i, leads where i.waiter=leads.locker and not cycle
), tree as (
- select locker pid,locker dad,locker root,case when cycle then track end dl, null::record obj,0 lvl, locker::text path, array_agg(locker) over () all_pids
- from leads o
- where
- (cycle and not exists (select from leads i where i.locker=any(o.track) and (i.lvl>o.lvl or i.qo.lvl))
+ select locker pid,locker dad,locker root,case when cycle then track end dl, null::record obj,0 lvl,locker::text path,array_agg(locker) over () all_pids from leads o
+ where (cycle and not exists (select from leads i where i.locker=any(o.track) and (i.lvl>o.lvl or i.q' else repeat(' .', lvl) end||' '||trim(left(regexp_replace(a.query, e'\\s+', ' ', 'g'),300)) latest_query_in_tx
-from tree
-left join pairs w on w.waiter = tree.pid and w.locker = tree.dad
-join pg_stat_activity a using (pid)
-join pg_stat_activity r on r.pid=tree.root
-order by (now() - r.xact_start), path;
+select (clock_timestamp() - a.xact_start)::interval(0) as ts_age,
+ (clock_timestamp() - a.state_change)::interval(0) as change_age,
+ a.datname,a.usename,a.client_addr,
+ --w.obj wait_on_object,
+ tree.pid,replace(a.state, 'idle in transaction', 'idletx') state,
+ lvl,(select count(*) from tree p where p.path ~ ('^'||tree.path) and not p.path=tree.path) blocked,
+ case when tree.pid=any(tree.dl) then '!>' else repeat(' .', lvl) end||' '||trim(left(regexp_replace(a.query, e'\\s+', ' ', 'g'),100)) query
+ from tree
+ left join pairs w on w.waiter=tree.pid and w.locker=tree.dad
+ join pg_stat_activity a using (pid)
+ join pg_stat_activity r on r.pid=tree.root
+ order by (now() - r.xact_start), path;
diff --git a/sql/l2_lock_trees.sql b/sql/l2_lock_trees.sql
new file mode 100644
index 0000000..bc5c26f
--- /dev/null
+++ b/sql/l2_lock_trees.sql
@@ -0,0 +1,70 @@
+--Lock trees, detailed (based on pg_blocking_pids())
+
+-- Based on: https://gitlab.com/-/snippets/1890428
+-- See also: https://postgres.ai/blog/20211018-postgresql-lock-trees
+
+begin;
+
+set local statement_timeout to '100ms';
+
+with recursive activity as (
+ select
+ pg_blocking_pids(pid) blocked_by,
+ *,
+ age(clock_timestamp(), xact_start)::interval(0) as tx_age,
+ age(clock_timestamp(), state_change)::interval(0) as state_age
+ from pg_stat_activity
+ where state is distinct from 'idle'
+), blockers as (
+ select
+ array_agg(distinct c order by c) as pids
+ from (
+ select unnest(blocked_by)
+ from activity
+ ) as dt(c)
+), tree as (
+ select
+ activity.*,
+ 1 as level,
+ activity.pid as top_blocker_pid,
+ array[activity.pid] as path,
+ array[activity.pid]::int[] as all_blockers_above
+ from activity, blockers
+ where
+ array[pid] <@ blockers.pids
+ and blocked_by = '{}'::int[]
+ union all
+ select
+ activity.*,
+ tree.level + 1 as level,
+ tree.top_blocker_pid,
+ path || array[activity.pid] as path,
+ tree.all_blockers_above || array_agg(activity.pid) over () as all_blockers_above
+ from activity, tree
+ where
+ not array[activity.pid] <@ tree.all_blockers_above
+ and activity.blocked_by <> '{}'::int[]
+ and activity.blocked_by <@ tree.all_blockers_above
+)
+select
+ pid,
+ blocked_by,
+ tx_age,
+ state_age,
+ backend_xid as xid,
+ backend_xmin as xmin,
+ replace(state, 'idle in transaction', 'idletx') as state,
+ datname,
+ usename,
+ wait_event_type || ':' || wait_event as wait,
+ (select count(distinct t1.pid) from tree t1 where array[tree.pid] <@ t1.path and t1.pid <> tree.pid) as blkd,
+ format(
+ '%s %s%s',
+ lpad('[' || pid::text || ']', 7, ' '),
+ repeat('.', level - 1) || case when level > 1 then ' ' end,
+ left(query, 1000)
+ ) as query
+from tree
+order by top_blocker_pid, level, pid;
+
+commit;
diff --git a/sql/p1_alignment_padding.sql b/sql/p1_alignment_padding.sql
index da5531d..567fa0a 100644
--- a/sql/p1_alignment_padding.sql
+++ b/sql/p1_alignment_padding.sql
@@ -1,4 +1,4 @@
---[EXPERIMENTAL] Alignment Padding. How many bytes can be saved if columns are ordered better?
+--[EXP] Alignment padding: how many bytes can be saved if columns are reordered?
-- TODO: not-yet-analyzed tables – show a warning (cannot get n_live_tup -> cannot get total bytes)
-- TODO: NULLs
diff --git a/sql/r1_create_user_with_random_password.sql b/sql/r1_create_user_with_random_password.sql
new file mode 100644
index 0000000..1c862ed
--- /dev/null
+++ b/sql/r1_create_user_with_random_password.sql
@@ -0,0 +1,2 @@
+--Create user with random password (interactive)
+\ir ../roles/create_user_with_random_password.psql
\ No newline at end of file
diff --git a/sql/r2_alter_user_with_random_password.sql b/sql/r2_alter_user_with_random_password.sql
new file mode 100644
index 0000000..b741f72
--- /dev/null
+++ b/sql/r2_alter_user_with_random_password.sql
@@ -0,0 +1,2 @@
+--Alter user with random password (interactive)
+\ir ../roles/alter_user_with_random_password.psql
\ No newline at end of file
diff --git a/sql/s1_pg_stat_statements_top_total.sql b/sql/s1_pg_stat_statements_top_total.sql
index 5904c82..e954758 100644
--- a/sql/s1_pg_stat_statements_top_total.sql
+++ b/sql/s1_pg_stat_statements_top_total.sql
@@ -1,4 +1,4 @@
---Slowest Queries, by Total Time (requires pg_stat_statements extension)
+--Slowest queries, by total time (requires pg_stat_statements)
-- In pg_stat_statements, there is a problem: sometimes (quite often), it registers the same query twice (or even more).
-- It's easy to check in your DB:
@@ -10,15 +10,36 @@
--
-- This query gives you "full picture", aggregating stats for each query-database-username ternary
--- Works with Postgres 9.6
+-- Works with Postgres 9.6+
select
sum(calls) as calls,
- sum(total_time) as total_time,
- sum(mean_time * calls) / sum(calls) as mean_time,
- max(max_time) as max_time,
- min(min_time) as min_time,
+\if :postgres_dba_pgvers_13plus
+ round(sum(total_exec_time)::numeric, 2) as total_exec_t,
+ round((sum(mean_exec_time * calls) / sum(calls))::numeric, 2) as mean_exec_t,
+ format(
+ '%s–%s',
+ round(min(min_exec_time)::numeric, 2),
+ round(max(max_exec_time)::numeric, 2)
+ ) as min_max_exec_t,
+ round(sum(total_plan_time)::numeric, 2) as total_plan_t,
+ round((sum(mean_plan_time * calls) / sum(calls))::numeric, 2) as mean_plan_t,
+ format(
+ '%s–%s',
+ round(min(min_plan_time)::numeric, 2),
+ round(max(max_plan_time)::numeric, 2)
+ ) as min_max_plan_t,
+\else
+ sum(calls) as calls,
+ round(sum(total_time)::numeric, 2) as total_time,
+ round((sum(mean_time * calls) / sum(calls))::numeric, 2) as mean_time,
+ format(
+ '%s–%s',
+ round(min(min_time)::numeric, 2),
+ round(max(max_time)::numeric, 2)
+ ) as min_max_t,
-- stddev_time, -- https://stats.stackexchange.com/questions/55999/is-it-possible-to-find-the-combined-standard-deviation
+\endif
sum(rows) as rows,
(select usename from pg_user where usesysid = userid) as usr,
(select datname from pg_database where oid = dbid) as db,
@@ -38,6 +59,9 @@ select
array_agg(queryid) as queryids -- 9.4+
from pg_stat_statements
group by userid, dbid, query
+\if :postgres_dba_pgvers_13plus
+order by sum(total_exec_time) desc
+\else
order by sum(total_time) desc
+\endif
limit 50;
-
diff --git a/sql/s2_pg_stat_statements_report.sql b/sql/s2_pg_stat_statements_report.sql
index e19b353..d49beb3 100644
--- a/sql/s2_pg_stat_statements_report.sql
+++ b/sql/s2_pg_stat_statements_report.sql
@@ -1,6 +1,166 @@
---Slowest Queries Report (requires pg_stat_statements)
+--Slowest queries report (requires pg_stat_statements)
--Original version – Data Egret: https://github.com/dataegret/pg-utils/blob/master/sql/global_reports/query_stat_total.sql
+\if :postgres_dba_pgvers_13plus
+with pg_stat_statements_slice as (
+ select *
+ from pg_stat_statements
+ -- if current database is postgres then generate report for all databases,
+ -- otherwise generate for current database only
+ where
+ current_database() = 'postgres'
+ or dbid = (
+ select oid
+ from pg_database
+ where datname = current_database()
+ )
+), pg_stat_statements_normalized as (
+ select
+ *,
+ translate(
+ regexp_replace(
+ regexp_replace(
+ regexp_replace(
+ regexp_replace(
+ query,
+ e'\\?(::[a-zA-Z_]+)?( *, *\\?(::[a-zA-Z_]+)?)+', '?', 'g'
+ ),
+ e'\\$[0-9]+(::[a-zA-Z_]+)?( *, *\\$[0-9]+(::[a-zA-Z_]+)?)*', '$N', 'g'
+ ),
+ e'--.*$', '', 'ng'
+ ),
+ e'/\\*.*?\\*/', '', 'g'
+ ),
+ e'\r', ''
+ ) as query_normalized
+ from pg_stat_statements_slice
+), totals as (
+ select
+ sum(total_exec_time) as total_exec_time,
+ sum(blk_read_time+blk_write_time) as io_time,
+ sum(total_exec_time-blk_read_time-blk_write_time) as non_io_time,
+ sum(calls) as ncalls,
+ sum(rows) as total_rows
+ from pg_stat_statements_slice
+), _pg_stat_statements as (
+ select
+ (select datname from pg_database where oid = p.dbid) as database,
+ (select rolname from pg_roles where oid = p.userid) as username,
+ --select shortest query, replace \n\n-- strings to avoid email clients format text as footer
+ substring(
+ translate(
+ replace(
+ (array_agg(query order by length(query)))[1],
+ e'-- \n',
+ e'--\n'
+ ),
+ e'\r', ''
+ ),
+ 1,
+ 8192
+ ) as query,
+ sum(total_exec_time) as total_exec_time,
+ sum(blk_read_time) as blk_read_time, sum(blk_write_time) as blk_write_time,
+ sum(calls) as calls, sum(rows) as rows
+ from pg_stat_statements_normalized p
+ group by dbid, userid, md5(query_normalized)
+), totals_readable as (
+ select
+ to_char(interval '1 millisecond' * total_exec_time, 'HH24:MI:SS') as total_exec_time,
+ (100*io_time/total_exec_time)::numeric(20,2) as io_time_percent,
+ to_char(ncalls, 'FM999,999,999,990') as total_queries,
+ (select to_char(count(distinct md5(query)), 'FM999,999,990') from _pg_stat_statements) as unique_queries
+ from totals
+), statements as (
+ select
+ (100*total_exec_time/(select total_exec_time from totals)) as time_percent,
+ (100*(blk_read_time+blk_write_time)/(select greatest(io_time, 1) from totals)) as io_time_percent,
+ (100*(total_exec_time-blk_read_time-blk_write_time)/(select non_io_time from totals)) as non_io_time_percent,
+ to_char(interval '1 millisecond' * total_exec_time, 'HH24:MI:SS') as total_exec_time,
+ (total_exec_time::numeric/calls)::numeric(20,2) as avg_time,
+ ((total_exec_time-blk_read_time-blk_write_time)::numeric/calls)::numeric(20, 2) as avg_non_io_time,
+ ((blk_read_time+blk_write_time)::numeric/calls)::numeric(20, 2) as avg_io_time,
+ to_char(calls, 'FM999,999,999,990') as calls,
+ (100*calls/(select ncalls from totals))::numeric(20, 2) as calls_percent,
+ to_char(rows, 'FM999,999,999,990') as rows,
+ (100*rows/(select total_rows from totals))::numeric(20, 2) as row_percent,
+ database,
+ username,
+ query
+ from _pg_stat_statements
+ where
+ (total_exec_time-blk_read_time-blk_write_time)/(select non_io_time from totals) >= 0.01
+ or (blk_read_time+blk_write_time)/(
+ select greatest(io_time, 1) from totals
+ ) >= 0.01
+ or calls/(select ncalls from totals) >= 0.02
+ or rows/(select total_rows from totals) >= 0.02
+ union all
+ select
+ (100*sum(total_exec_time)::numeric/(select total_exec_time from totals)) as time_percent,
+ (100*sum(blk_read_time+blk_write_time)::numeric/(select greatest(io_time, 1) from totals)) as io_time_percent,
+ (100*sum(total_exec_time-blk_read_time-blk_write_time)::numeric/(select non_io_time from totals)) as non_io_time_percent,
+ to_char(interval '1 millisecond' * sum(total_exec_time), 'HH24:MI:SS') as total_exec_time,
+ (sum(total_exec_time)::numeric/sum(calls))::numeric(20,2) as avg_time,
+ (sum(total_exec_time-blk_read_time-blk_write_time)::numeric/sum(calls))::numeric(20, 2) as avg_non_io_time,
+ (sum(blk_read_time+blk_write_time)::numeric/sum(calls))::numeric(20, 2) as avg_io_time,
+ to_char(sum(calls), 'FM999,999,999,990') as calls,
+ (100*sum(calls)/(select ncalls from totals))::numeric(20, 2) as calls_percent,
+ to_char(sum(rows), 'FM999,999,999,990') as rows,
+ (100*sum(rows)/(select total_rows from totals))::numeric(20, 2) as row_percent,
+ 'all' as database,
+ 'all' as username,
+ 'other' as query
+ from _pg_stat_statements
+ where
+ not (
+ (total_exec_time-blk_read_time-blk_write_time)/(select non_io_time from totals) >= 0.01
+ or (blk_read_time+blk_write_time)/(select greatest(io_time, 1) from totals) >= 0.01
+ or calls/(select ncalls from totals)>=0.02 or rows/(select total_rows from totals) >= 0.02
+ )
+), statements_readable as (
+ select row_number() over (order by s.time_percent desc) as pos,
+ to_char(time_percent, 'FM990.0') || '%' as time_percent,
+ to_char(io_time_percent, 'FM990.0') || '%' as io_time_percent,
+ to_char(non_io_time_percent, 'FM990.0') || '%' as non_io_time_percent,
+ to_char(avg_io_time*100/(coalesce(nullif(avg_time, 0), 1)), 'FM990.0') || '%' as avg_io_time_percent,
+ total_exec_time, avg_time, avg_non_io_time, avg_io_time, calls, calls_percent, rows, row_percent,
+ database, username, query
+ from statements s
+ where calls is not null
+)
+select
+ e'total time:\t' || total_exec_time || ' (IO: ' || io_time_percent || E'%)\n'
+ || e'total queries:\t' || total_queries || ' (unique: ' || unique_queries || E')\n'
+ || 'report for ' || (select case when current_database() = 'postgres' then 'all databases' else current_database() || ' database' end)
+ || E', version b0.9.6'
+ || ' @ PostgreSQL '
+ || (select setting from pg_settings where name='server_version') || E'\ntracking '
+ || (select setting from pg_settings where name='pg_stat_statements.track') || ' '
+ || (select setting from pg_settings where name='pg_stat_statements.max') || ' queries, utilities '
+ || (select setting from pg_settings where name='pg_stat_statements.track_utility')
+ || ', logging ' || (select (case when setting = '0' then 'all' when setting = '-1' then 'none' when setting::int > 1000 then (setting::numeric/1000)::numeric(20, 1) || 's+' else setting || 'ms+' end) from pg_settings where name='log_min_duration_statement')
+ || E' queries\n'
+ || (
+ select coalesce(string_agg('WARNING: database ' || datname || ' must be vacuumed within ' || to_char(2147483647 - age(datfrozenxid), 'FM999,999,999,990') || ' transactions', E'\n' order by age(datfrozenxid) desc) || E'\n', '')
+ from pg_database where (2147483647 - age(datfrozenxid)) < 200000000
+ ) || E'\n'
+from totals_readable
+union all
+(
+select
+ e'=============================================================================================================\n'
+ || 'pos:' || pos || E'\t total time: ' || total_exec_time || ' (' || time_percent
+ || ', IO: ' || io_time_percent || ', Non-IO: ' || non_io_time_percent || E')\t calls: '
+ || calls || ' (' || calls_percent || E'%)\t avg_time: ' || avg_time
+ || 'ms (IO: ' || avg_io_time_percent || E')\n' || 'user: '
+ || username || E'\t db: ' || database || E'\t rows: ' || rows
+ || ' (' || row_percent || '%)' || E'\t query:\n' || query || E'\n'
+from statements_readable
+order by pos
+);
+
+\else
with pg_stat_statements_slice as (
select *
from pg_stat_statements
@@ -37,7 +197,7 @@ with pg_stat_statements_slice as (
select
sum(total_time) as total_time,
sum(blk_read_time+blk_write_time) as io_time,
- sum(total_time-blk_read_time-blk_write_time) as cpu_time,
+ sum(total_time-blk_read_time-blk_write_time) as non_io_time,
sum(calls) as ncalls,
sum(rows) as total_rows
from pg_stat_statements_slice
@@ -74,10 +234,10 @@ with pg_stat_statements_slice as (
select
(100*total_time/(select total_time from totals)) as time_percent,
(100*(blk_read_time+blk_write_time)/(select greatest(io_time, 1) from totals)) as io_time_percent,
- (100*(total_time-blk_read_time-blk_write_time)/(select cpu_time from totals)) as cpu_time_percent,
+ (100*(total_time-blk_read_time-blk_write_time)/(select non_io_time from totals)) as non_io_time_percent,
to_char(interval '1 millisecond' * total_time, 'HH24:MI:SS') as total_time,
(total_time::numeric/calls)::numeric(20,2) as avg_time,
- ((total_time-blk_read_time-blk_write_time)::numeric/calls)::numeric(20, 2) as avg_cpu_time,
+ ((total_time-blk_read_time-blk_write_time)::numeric/calls)::numeric(20, 2) as avg_non_io_time,
((blk_read_time+blk_write_time)::numeric/calls)::numeric(20, 2) as avg_io_time,
to_char(calls, 'FM999,999,999,990') as calls,
(100*calls/(select ncalls from totals))::numeric(20, 2) as calls_percent,
@@ -88,7 +248,7 @@ with pg_stat_statements_slice as (
query
from _pg_stat_statements
where
- (total_time-blk_read_time-blk_write_time)/(select cpu_time from totals) >= 0.01
+ (total_time-blk_read_time-blk_write_time)/(select non_io_time from totals) >= 0.01
or (blk_read_time+blk_write_time)/(
select greatest(io_time, 1) from totals
) >= 0.01
@@ -98,10 +258,10 @@ with pg_stat_statements_slice as (
select
(100*sum(total_time)::numeric/(select total_time from totals)) as time_percent,
(100*sum(blk_read_time+blk_write_time)::numeric/(select greatest(io_time, 1) from totals)) as io_time_percent,
- (100*sum(total_time-blk_read_time-blk_write_time)::numeric/(select cpu_time from totals)) as cpu_time_percent,
+ (100*sum(total_time-blk_read_time-blk_write_time)::numeric/(select non_io_time from totals)) as non_io_time_percent,
to_char(interval '1 millisecond' * sum(total_time), 'HH24:MI:SS') as total_time,
(sum(total_time)::numeric/sum(calls))::numeric(20,2) as avg_time,
- (sum(total_time-blk_read_time-blk_write_time)::numeric/sum(calls))::numeric(20, 2) as avg_cpu_time,
+ (sum(total_time-blk_read_time-blk_write_time)::numeric/sum(calls))::numeric(20, 2) as avg_non_io_time,
(sum(blk_read_time+blk_write_time)::numeric/sum(calls))::numeric(20, 2) as avg_io_time,
to_char(sum(calls), 'FM999,999,999,990') as calls,
(100*sum(calls)/(select ncalls from totals))::numeric(20, 2) as calls_percent,
@@ -113,7 +273,7 @@ with pg_stat_statements_slice as (
from _pg_stat_statements
where
not (
- (total_time-blk_read_time-blk_write_time)/(select cpu_time from totals) >= 0.01
+ (total_time-blk_read_time-blk_write_time)/(select non_io_time from totals) >= 0.01
or (blk_read_time+blk_write_time)/(select greatest(io_time, 1) from totals) >= 0.01
or calls/(select ncalls from totals)>=0.02 or rows/(select total_rows from totals) >= 0.02
)
@@ -121,9 +281,9 @@ with pg_stat_statements_slice as (
select row_number() over (order by s.time_percent desc) as pos,
to_char(time_percent, 'FM990.0') || '%' as time_percent,
to_char(io_time_percent, 'FM990.0') || '%' as io_time_percent,
- to_char(cpu_time_percent, 'FM990.0') || '%' as cpu_time_percent,
+ to_char(non_io_time_percent, 'FM990.0') || '%' as non_io_time_percent,
to_char(avg_io_time*100/(coalesce(nullif(avg_time, 0), 1)), 'FM990.0') || '%' as avg_io_time_percent,
- total_time, avg_time, avg_cpu_time, avg_io_time, calls, calls_percent, rows, row_percent,
+ total_time, avg_time, avg_non_io_time, avg_io_time, calls, calls_percent, rows, row_percent,
database, username, query
from statements s
where calls is not null
@@ -150,7 +310,7 @@ union all
select
e'=============================================================================================================\n'
|| 'pos:' || pos || E'\t total time: ' || total_time || ' (' || time_percent
- || ', CPU: ' || cpu_time_percent || ', IO: ' || io_time_percent || E')\t calls: '
+ || ', IO: ' || io_time_percent || ', Non-IO: ' || non_io_time_percent || E')\t calls: '
|| calls || ' (' || calls_percent || E'%)\t avg_time: ' || avg_time
|| 'ms (IO: ' || avg_io_time_percent || E')\n' || 'user: '
|| username || E'\t db: ' || database || E'\t rows: ' || rows
@@ -158,4 +318,4 @@ select
from statements_readable
order by pos
);
-
+\endif
diff --git a/sql/t1_tuning.sql b/sql/t1_tuning.sql
index 361c2f7..0563b15 100644
--- a/sql/t1_tuning.sql
+++ b/sql/t1_tuning.sql
@@ -42,7 +42,7 @@ select :postgres_dba_t1_location = 3 as postgres_dba_t1_location_rds \gset
\echo
\echo
-\echo 'Type total available memory (in GB): '
+\echo 'Type total available memory (in GiB): '
\prompt postgres_dba_t1_memory
\echo
diff --git a/sql/v1_vacuum_activity.sql b/sql/v1_vacuum_activity.sql
index e298829..06ca84b 100644
--- a/sql/v1_vacuum_activity.sql
+++ b/sql/v1_vacuum_activity.sql
@@ -1,4 +1,4 @@
---Vacuum: Current Activity
+--Vacuum: current activity
-- Based on: https://github.com/lesovsky/uber-scripts/blob/master/postgresql/sql/vacuum_activity.sql
with data as (
diff --git a/sql/v2_autovacuum_progress_and_queue.sql b/sql/v2_autovacuum_progress_and_queue.sql
index 380ce5a..dc05269 100644
--- a/sql/v2_autovacuum_progress_and_queue.sql
+++ b/sql/v2_autovacuum_progress_and_queue.sql
@@ -1,4 +1,4 @@
---Vacuum: VACUUM progress and autovacuum queue
+--VACUUM progress and autovacuum queue
-- Based on: https://gitlab.com/snippets/1889668
diff --git a/start.psql b/start.psql
index c83b396..aa7d0ae 100644
--- a/start.psql
+++ b/start.psql
@@ -1,28 +1,31 @@
\ir warmup.psql
\echo '\033[1;35mMenu:\033[0m'
-\echo ' 0 – Node & Current DB Information: master/replica, lag, DB size, tmp files, etc'
-\echo ' 1 – Databases: Size, Statistics'
-\echo ' 2 – Table Sizes'
-\echo ' 3 – Load Profile'
-\echo ' a1 – Current Activity: count of current connections grouped by database, user name, state'
-\echo ' b1 – Tables Bloat, rough estimation'
-\echo ' b2 – B-tree Indexes Bloat, rough estimation'
-\echo ' b3 – Tables Bloat, more precise (requires pgstattuple extension; expensive)'
-\echo ' b4 – B-tree Indexes Bloat, more precise (requires pgstattuple extension; expensive)'
-\echo ' b5 – Tables and Columns Without Stats (so bloat cannot be estimated)'
-\echo ' e1 – List of extensions installed in the current DB'
-\echo ' i1 – Unused/Rarely Used Indexes'
-\echo ' i2 – List of redundant indexes'
-\echo ' i3 – FKs with Missing/Bad Indexes'
-\echo ' i4 – List of invalid indexes'
-\echo ' i5 – Unused/Redundant Indexes Do & Undo Migration DDL'
-\echo ' l1 – Locks: analysis of "locking trees"'
-\echo ' p1 – [EXPERIMENTAL] Alignment Padding. How many bytes can be saved if columns are ordered better?'
-\echo ' s1 – Slowest Queries, by Total Time (requires pg_stat_statements extension)'
-\echo ' s2 – Slowest Queries Report (requires pg_stat_statements)'
+\echo ' 0 – Node and current database information: primary/replica, lag, database size, temporary files, etc.'
+\echo ' 1 – Databases: size, stats'
+\echo ' 2 – Tables: table/index/TOAST size, number of rows'
+\echo ' 3 – Load profile'
+\echo ' a1 – Current activity: count of current connections grouped by database, username, state'
+\echo ' b1 – Table bloat (estimated)'
+\echo ' b2 – B-tree index bloat (estimated)'
+\echo ' b3 – Table bloat (requires pgstattuple; expensive)'
+\echo ' b4 – B-tree indexes bloat (requires pgstattuple; expensive)'
+\echo ' b5 – Tables and columns without stats (so bloat cannot be estimated)'
+\echo ' e1 – Extensions installed in current database'
+\echo ' i1 – Unused and rarely used indexes'
+\echo ' i2 – Redundant indexes'
+\echo ' i3 – Foreign keys with missing or bad indexes'
+\echo ' i4 – Invalid indexes'
+\echo ' i5 – Cleanup unused and redundant indexes – DO & UNDO migration DDL'
+\echo ' l1 – Lock trees (lightweight)'
+\echo ' l2 – Lock trees, detailed (based on pg_blocking_pids())'
+\echo ' p1 – [EXP] Alignment padding: how many bytes can be saved if columns are reordered?'
+\echo ' r1 – Create user with random password (interactive)'
+\echo ' r2 – Alter user with random password (interactive)'
+\echo ' s1 – Slowest queries, by total time (requires pg_stat_statements)'
+\echo ' s2 – Slowest queries report (requires pg_stat_statements)'
\echo ' t1 – Postgres parameters tuning'
-\echo ' v1 – Vacuum: Current Activity'
-\echo ' v2 – Vacuum: VACUUM progress and autovacuum queue'
+\echo ' v1 – Vacuum: current activity'
+\echo ' v2 – VACUUM progress and autovacuum queue'
\echo ' q – Quit'
\echo
\echo Type your choice and press :
@@ -46,7 +49,10 @@ select
:d_stp::text = 'i4' as d_step_is_i4,
:d_stp::text = 'i5' as d_step_is_i5,
:d_stp::text = 'l1' as d_step_is_l1,
+:d_stp::text = 'l2' as d_step_is_l2,
:d_stp::text = 'p1' as d_step_is_p1,
+:d_stp::text = 'r1' as d_step_is_r1,
+:d_stp::text = 'r2' as d_step_is_r2,
:d_stp::text = 's1' as d_step_is_s1,
:d_stp::text = 's2' as d_step_is_s2,
:d_stp::text = 't1' as d_step_is_t1,
@@ -124,10 +130,22 @@ select
\ir ./sql/l1_lock_trees.sql
\prompt 'Press to continue…' d_dummy
\ir ./start.psql
+\elif :d_step_is_l2
+ \ir ./sql/l2_lock_trees.sql
+ \prompt 'Press to continue…' d_dummy
+ \ir ./start.psql
\elif :d_step_is_p1
\ir ./sql/p1_alignment_padding.sql
\prompt 'Press to continue…' d_dummy
\ir ./start.psql
+\elif :d_step_is_r1
+ \ir ./sql/r1_create_user_with_random_password.sql
+ \prompt 'Press to continue…' d_dummy
+ \ir ./start.psql
+\elif :d_step_is_r2
+ \ir ./sql/r2_alter_user_with_random_password.sql
+ \prompt 'Press to continue…' d_dummy
+ \ir ./start.psql
\elif :d_step_is_s1
\ir ./sql/s1_pg_stat_statements_top_total.sql
\prompt 'Press to continue…' d_dummy
diff --git a/warmup.psql b/warmup.psql
index 4105053..a964275 100644
--- a/warmup.psql
+++ b/warmup.psql
@@ -4,6 +4,10 @@
select 1/0;
\endif
+select current_setting('server_version_num')::integer >= 170000 as postgres_dba_pgvers_17plus \gset
+
+select current_setting('server_version_num')::integer >= 130000 as postgres_dba_pgvers_13plus \gset
+
select current_setting('server_version_num')::integer >= 100000 as postgres_dba_pgvers_10plus \gset
\if :postgres_dba_pgvers_10plus
\set postgres_dba_last_wal_receive_lsn pg_last_wal_receive_lsn