Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions doc/src/sgml/backup.sgml
Original file line number Diff line number Diff line change
Expand Up @@ -1520,11 +1520,11 @@ tar -rf /var/lib/pgsql/backup.tar /var/lib/pgsql/archive/
If archive storage size is a concern, you can use
<application>gzip</application> to compress the archive files:
<programlisting>
archive_command = 'gzip &lt; %p &gt; /var/lib/pgsql/archive/%f'
archive_command = 'gzip &lt; %p &gt; /mnt/server/archivedir/%f.gz'
</programlisting>
You will then need to use <application>gunzip</application> during recovery:
<programlisting>
restore_command = 'gunzip &lt; /mnt/server/archivedir/%f &gt; %p'
restore_command = 'gunzip &lt; /mnt/server/archivedir/%f.gz &gt; %p'
</programlisting>
</para>
</sect3>
Expand Down
8 changes: 4 additions & 4 deletions doc/src/sgml/func.sgml
Original file line number Diff line number Diff line change
Expand Up @@ -26238,14 +26238,14 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
<row>
<entry role="func_table_entry"><para role="func_signature">
<indexterm>
<primary>pg_collation_current_version</primary>
<primary>pg_collation_actual_version</primary>
</indexterm>
<function>pg_collation_current_version</function> ( <type>oid</type> )
<function>pg_collation_actual_version</function> ( <type>oid</type> )
<returnvalue>text</returnvalue>
</para>
<para>
Returns the version of the collation object as reported by the ICU
library or operating system. <literal>null</literal> is returned
Returns the actual version of the collation object as it is currently
installed in the operating system. <literal>null</literal> is returned
on operating systems where <productname>PostgreSQL</productname>
doesn't have support for versions.
</para></entry>
Expand Down
1 change: 1 addition & 0 deletions src/backend/access/gin/ginvacuum.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,7 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn

END_CRIT_SECTION();

gvs->result->pages_newly_deleted++;
gvs->result->pages_deleted++;
}

Expand Down
19 changes: 16 additions & 3 deletions src/backend/access/gist/gistvacuum.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,21 @@ gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
MemoryContext oldctx;

/*
* Reset counts that will be incremented during the scan; needed in case
* of multiple scans during a single VACUUM command.
* Reset fields that track information about the entire index now. This
* avoids double-counting in the case where a single VACUUM command
* requires multiple scans of the index.
*
* Avoid resetting the tuples_removed and pages_newly_deleted fields here,
* since they track information about the VACUUM command, and so must last
* across each call to gistvacuumscan().
*
* (Note that pages_free is treated as state about the whole index, not
* the current VACUUM. This is appropriate because RecordFreeIndexPage()
* calls are idempotent, and get repeated for the same deleted pages in
* some scenarios. The point for us is to track the number of recyclable
* pages in the index at the end of the VACUUM command.)
*/
stats->num_pages = 0;
stats->estimated_count = false;
stats->num_index_tuples = 0;
stats->pages_deleted = 0;
Expand Down Expand Up @@ -281,8 +293,8 @@ gistvacuumpage(GistVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno)
{
/* Okay to recycle this page */
RecordFreeIndexPage(rel, blkno);
vstate->stats->pages_free++;
vstate->stats->pages_deleted++;
vstate->stats->pages_free++;
}
else if (GistPageIsDeleted(page))
{
Expand Down Expand Up @@ -636,6 +648,7 @@ gistdeletepage(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/* mark the page as deleted */
MarkBufferDirty(leafBuffer);
GistPageSetDeleted(leafPage, txid);
stats->pages_newly_deleted++;
stats->pages_deleted++;

/* remove the downlink from the parent */
Expand Down
4 changes: 3 additions & 1 deletion src/backend/access/heap/vacuumlazy.c
Original file line number Diff line number Diff line change
Expand Up @@ -2521,9 +2521,11 @@ lazy_cleanup_index(Relation indrel,
(*stats)->num_index_tuples,
(*stats)->num_pages),
errdetail("%.0f index row versions were removed.\n"
"%u index pages have been deleted, %u are currently reusable.\n"
"%u index pages were newly deleted.\n"
"%u index pages are currently deleted, of which %u are currently reusable.\n"
"%s.",
(*stats)->tuples_removed,
(*stats)->pages_newly_deleted,
(*stats)->pages_deleted, (*stats)->pages_free,
pg_rusage_show(&ru0))));
}
Expand Down
54 changes: 30 additions & 24 deletions src/backend/access/nbtree/nbtpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ static bool _bt_mark_page_halfdead(Relation rel, Buffer leafbuf,
static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf,
BlockNumber scanblkno,
bool *rightsib_empty,
uint32 *ndeleted);
BTVacState *vstate);
static bool _bt_lock_subtree_parent(Relation rel, BlockNumber child,
BTStack stack,
Buffer *subtreeparent,
Expand Down Expand Up @@ -1760,28 +1760,31 @@ _bt_rightsib_halfdeadflag(Relation rel, BlockNumber leafrightsib)
* should never pass a buffer containing an existing deleted page here. The
* lock and pin on caller's buffer will be dropped before we return.
*
* Returns the number of pages successfully deleted (zero if page cannot
* be deleted now; could be more than one if parent or right sibling pages
* were deleted too). Note that this does not include pages that we delete
* that the btvacuumscan scan has yet to reach; they'll get counted later
* instead.
* Maintains bulk delete stats for caller, which are taken from vstate. We
* need to cooperate closely with caller here so that whole VACUUM operation
* reliably avoids any double counting of subsidiary-to-leafbuf pages that we
* delete in passing. If such pages happen to be from a block number that is
* ahead of the current scanblkno position, then caller is expected to count
* them directly later on. It's simpler for us to understand caller's
* requirements than it would be for caller to understand when or how a
* deleted page became deleted after the fact.
*
* NOTE: this leaks memory. Rather than trying to clean up everything
* carefully, it's better to run it in a temp context that can be reset
* frequently.
*/
uint32
_bt_pagedel(Relation rel, Buffer leafbuf)
void
_bt_pagedel(Relation rel, Buffer leafbuf, BTVacState *vstate)
{
uint32 ndeleted = 0;
BlockNumber rightsib;
bool rightsib_empty;
Page page;
BTPageOpaque opaque;

/*
* Save original leafbuf block number from caller. Only deleted blocks
* that are <= scanblkno get counted in ndeleted return value.
* that are <= scanblkno are added to bulk delete stat's pages_deleted
* count.
*/
BlockNumber scanblkno = BufferGetBlockNumber(leafbuf);

Expand Down Expand Up @@ -1843,7 +1846,7 @@ _bt_pagedel(Relation rel, Buffer leafbuf)
RelationGetRelationName(rel))));

_bt_relbuf(rel, leafbuf);
return ndeleted;
return;
}

/*
Expand Down Expand Up @@ -1873,7 +1876,7 @@ _bt_pagedel(Relation rel, Buffer leafbuf)
Assert(!P_ISHALFDEAD(opaque));

_bt_relbuf(rel, leafbuf);
return ndeleted;
return;
}

/*
Expand Down Expand Up @@ -1922,8 +1925,7 @@ _bt_pagedel(Relation rel, Buffer leafbuf)
if (_bt_leftsib_splitflag(rel, leftsib, leafblkno))
{
ReleaseBuffer(leafbuf);
Assert(ndeleted == 0);
return ndeleted;
return;
}

/* we need an insertion scan key for the search, so build one */
Expand Down Expand Up @@ -1964,7 +1966,7 @@ _bt_pagedel(Relation rel, Buffer leafbuf)
if (!_bt_mark_page_halfdead(rel, leafbuf, stack))
{
_bt_relbuf(rel, leafbuf);
return ndeleted;
return;
}
}

Expand All @@ -1979,7 +1981,7 @@ _bt_pagedel(Relation rel, Buffer leafbuf)
{
/* Check for interrupts in _bt_unlink_halfdead_page */
if (!_bt_unlink_halfdead_page(rel, leafbuf, scanblkno,
&rightsib_empty, &ndeleted))
&rightsib_empty, vstate))
{
/*
* _bt_unlink_halfdead_page should never fail, since we
Expand All @@ -1990,7 +1992,7 @@ _bt_pagedel(Relation rel, Buffer leafbuf)
* lock and pin on leafbuf for us.
*/
Assert(false);
return ndeleted;
return;
}
}

Expand Down Expand Up @@ -2026,8 +2028,6 @@ _bt_pagedel(Relation rel, Buffer leafbuf)

leafbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
}

return ndeleted;
}

/*
Expand Down Expand Up @@ -2262,9 +2262,10 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
*/
static bool
_bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno,
bool *rightsib_empty, uint32 *ndeleted)
bool *rightsib_empty, BTVacState *vstate)
{
BlockNumber leafblkno = BufferGetBlockNumber(leafbuf);
IndexBulkDeleteResult *stats = vstate->stats;
BlockNumber leafleftsib;
BlockNumber leafrightsib;
BlockNumber target;
Expand Down Expand Up @@ -2674,12 +2675,17 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno,
_bt_relbuf(rel, buf);

/*
* If btvacuumscan won't revisit this page in a future btvacuumpage call
* and count it as deleted then, we count it as deleted by current
* btvacuumpage call
* Maintain pages_newly_deleted, which is simply the number of pages
* deleted by the ongoing VACUUM operation.
*
* Maintain pages_deleted in a way that takes into account how
* btvacuumpage() will count deleted pages that have yet to become
* scanblkno -- only count page when it's not going to get that treatment
* later on.
*/
stats->pages_newly_deleted++;
if (target <= scanblkno)
(*ndeleted)++;
stats->pages_deleted++;

return true;
}
Expand Down
33 changes: 12 additions & 21 deletions src/backend/access/nbtree/nbtree.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,17 +38,6 @@
#include "utils/memutils.h"


/* Working state needed by btvacuumpage */
typedef struct
{
IndexVacuumInfo *info;
IndexBulkDeleteResult *stats;
IndexBulkDeleteCallback callback;
void *callback_state;
BTCycleId cycleid;
MemoryContext pagedelcontext;
} BTVacState;

/*
* BTPARALLEL_NOT_INITIALIZED indicates that the scan has not started.
*
Expand Down Expand Up @@ -1016,9 +1005,9 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
* avoids double-counting in the case where a single VACUUM command
* requires multiple scans of the index.
*
* Avoid resetting the tuples_removed field here, since it tracks
* information about the VACUUM command, and so must last across each call
* to btvacuumscan().
* Avoid resetting the tuples_removed and pages_newly_deleted fields here,
* since they track information about the VACUUM command, and so must last
* across each call to btvacuumscan().
*
* (Note that pages_free is treated as state about the whole index, not
* the current VACUUM. This is appropriate because RecordFreeIndexPage()
Expand Down Expand Up @@ -1237,11 +1226,13 @@ btvacuumpage(BTVacState *vstate, BlockNumber scanblkno)
}
else if (P_ISHALFDEAD(opaque))
{
/* Half-dead leaf page (from interrupted VACUUM) -- finish deleting */
attempt_pagedel = true;

/*
* Half-dead leaf page. Try to delete now. Might update
* pages_deleted below.
* _bt_pagedel() will increment both pages_newly_deleted and
* pages_deleted stats in all cases (barring corruption)
*/
attempt_pagedel = true;
}
else if (P_ISLEAF(opaque))
{
Expand Down Expand Up @@ -1451,12 +1442,12 @@ btvacuumpage(BTVacState *vstate, BlockNumber scanblkno)
oldcontext = MemoryContextSwitchTo(vstate->pagedelcontext);

/*
* We trust the _bt_pagedel return value because it does not include
* any page that a future call here from btvacuumscan is expected to
* count. There will be no double-counting.
* _bt_pagedel maintains the bulk delete stats on our behalf;
* pages_newly_deleted and pages_deleted are likely to be incremented
* during call
*/
Assert(blkno == scanblkno);
stats->pages_deleted += _bt_pagedel(rel, buf);
_bt_pagedel(rel, buf, vstate);

MemoryContextSwitchTo(oldcontext);
/* pagedel released buffer, so we shouldn't */
Expand Down
1 change: 1 addition & 0 deletions src/backend/access/spgist/spgvacuum.c
Original file line number Diff line number Diff line change
Expand Up @@ -891,6 +891,7 @@ spgvacuumscan(spgBulkDeleteState *bds)

/* Report final stats */
bds->stats->num_pages = num_pages;
bds->stats->pages_newly_deleted = bds->stats->pages_deleted;
bds->stats->pages_free = bds->stats->pages_deleted;
}

Expand Down
2 changes: 1 addition & 1 deletion src/backend/commands/collationcmds.c
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ IsThereCollationInNamespace(const char *collname, Oid nspOid)
}

Datum
pg_collation_current_version(PG_FUNCTION_ARGS)
pg_collation_actual_version(PG_FUNCTION_ARGS)
{
Oid collid = PG_GETARG_OID(0);
char *version;
Expand Down
12 changes: 6 additions & 6 deletions src/backend/parser/parse_cte.c
Original file line number Diff line number Diff line change
Expand Up @@ -730,15 +730,15 @@ makeDependencyGraphWalker(Node *node, CteState *cstate)
* In the non-RECURSIVE case, query names are visible to the
* WITH items after them and to the main query.
*/
ListCell *cell1;

cstate->innerwiths = lcons(NIL, cstate->innerwiths);
cell1 = list_head(cstate->innerwiths);
foreach(lc, stmt->withClause->ctes)
{
CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
ListCell *cell1;

(void) makeDependencyGraphWalker(cte->ctequery, cstate);
/* note that recursion could mutate innerwiths list */
cell1 = list_head(cstate->innerwiths);
lfirst(cell1) = lappend((List *) lfirst(cell1), cte);
}
(void) raw_expression_tree_walker(node,
Expand Down Expand Up @@ -1006,15 +1006,15 @@ checkWellFormedRecursionWalker(Node *node, CteState *cstate)
* In the non-RECURSIVE case, query names are visible to the
* WITH items after them and to the main query.
*/
ListCell *cell1;

cstate->innerwiths = lcons(NIL, cstate->innerwiths);
cell1 = list_head(cstate->innerwiths);
foreach(lc, stmt->withClause->ctes)
{
CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
ListCell *cell1;

(void) checkWellFormedRecursionWalker(cte->ctequery, cstate);
/* note that recursion could mutate innerwiths list */
cell1 = list_head(cstate->innerwiths);
lfirst(cell1) = lappend((List *) lfirst(cell1), cte);
}
checkWellFormedSelectStmt(stmt, cstate);
Expand Down
Loading