From 1a3690be16be340f288c069c452e8428f1cc48ad Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 20 Mar 2019 20:24:44 +0200 Subject: [PATCH 2/2] Delete empty pages during GiST VACUUM This commit teaches GiST to actually delete pages during VACUUM. To do this we scan GiST two times. At first pass we make note of empty pages and internal pages. At second pass we scan through internal pages looking for references to empty leaf pages. Heikki's CHANGES since v22: * Only scan the empty pages after the last scan, in a multi-pass vacuum. (I think that's what we want...) We could actually be smarter, and do this as part of the second pass's scan, in a multi-pass vacuum. * Call ReadNewTransactionId() while holding lock. I think that's needed for correctness. * Use new IntegerSet implementation. Author: Andrey Borodin Discussion: /message-id/B1E4DF12-6CD3-4706-BDBD-BF3283328F60@yandex-team.ru --- src/backend/access/gist/README | 48 ++++ src/backend/access/gist/gist.c | 15 ++ src/backend/access/gist/gistutil.c | 15 +- src/backend/access/gist/gistvacuum.c | 350 +++++++++++++++++++++++-- src/backend/access/gist/gistxlog.c | 65 +++++ src/backend/access/rmgrdesc/gistdesc.c | 3 + src/include/access/gist.h | 4 + src/include/access/gist_private.h | 7 +- src/include/access/gistxlog.h | 12 +- src/test/regress/expected/gist.out | 6 +- src/test/regress/sql/gist.sql | 6 +- 11 files changed, 489 insertions(+), 42 deletions(-) diff --git a/src/backend/access/gist/README b/src/backend/access/gist/README index 02228662b81..501b1c1a77a 100644 --- a/src/backend/access/gist/README +++ b/src/backend/access/gist/README @@ -413,6 +413,54 @@ emptied yet; tuples never move upwards in the tree. The final emptying loops through buffers at a given level until all buffers at that level have been emptied, and then moves down to the next level. +Bulk delete algorithm (VACUUM) +------------------------------ + +VACUUM works in two stages: + +In the first stage, we scan the whole index in physical order. To make sure +that we don't miss any dead tuples because a concurrent page split moved them, +we check the F_FOLLOW_RIGHT flags and NSN on each page, to detect if the +page has been concurrently split. If a concurrent page split is detected, and +one half the page was moved to a position that we already scanned, we "jump" +to scan the page again. This is the same mechanism that B-tree VACUUM uses, +but because we already have NSNs on pages, to detect page splits during +searches, we don't need a "vacuum cycle ID" concept for that like B-tree does. + +While we scan all the pages, we also make note of any completely empty leaf +pages. We will try to unlink them from the tree in the second stage. We also +record the block numbers of all internal pages, in an IntegerSet. They are +needed in the second stage, to locate parents of empty pages. + +In the second stage, we try to unlink any empty leaf pages from the tree, so +that their space can be reused. If we didn't see any empty pages in the first +stage, the second stage is skipped. In order to delete an empty page, its +downlink must be removed from the parent. We scan all the internal pages, +whose block numbers we +memorized in the first stage, and look for downlinks to pages that we have +memorized as being empty. Whenever we find one, we acquire a lock on the +parent and child page, re-check that the child page is still empty. Then, we +remove the downlink and mark the child as deleted, and release the locks. + +The insertion algorithm would get confused, if an internal page was completely +empty. So we never delete the last child of an internal page, even if it's +empty. Currently, we only support deleting leaf pages. + +This page deletion algorithm works on a best-effor basis. It might fail to +find a downlink, if a concurrent page split moved it after the first stage. +In that case, we won't be able to remove all empty pages. That's OK, it's +not expected to happen very often, and hopefully the next VACUUM will clean +it up, instead. + +When we have deleted a page, it's possible that an in-progress search will +still descend on the page, if it saw the downlink before we removed it. The +search will see that it is deleted, and ignore it, but as long as that can +happen, we cannot reuse the page. To "wait out" any in-progress searches, when +the page is deleted, it's labeled with the current next-transaction counter +value. The page is not recycled, until that XID is no longer visible to +anyone. That's much more conservative than necessary, but let's keep it +simple. + Authors: Teodor Sigaev diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 2ce5425ef98..a746e911f37 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -704,6 +704,9 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTInsertStack *item; OffsetNumber downlinkoffnum; + /* currently, internal pages are never deleted */ + Assert(!GistPageIsDeleted(stack->page)); + downlinkoffnum = gistchoose(state.r, stack->page, itup, giststate); iid = PageGetItemId(stack->page, downlinkoffnum); idxtuple = (IndexTuple) PageGetItem(stack->page, iid); @@ -838,6 +841,18 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, } } + /* + * The page might have been deleted after we scanned the parent + * and saw the downlink. + */ + if (GistPageIsDeleted(stack->page)) + { + UnlockReleaseBuffer(stack->buffer); + xlocked = false; + state.stack = stack = stack->parent; + continue; + } + /* now state.stack->(page, buffer and blkno) points to leaf page */ gistinserttuple(&state, stack, giststate, itup, diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index f32e16eed58..4e511dfb8c2 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -23,6 +23,7 @@ #include "storage/lmgr.h" #include "utils/float.h" #include "utils/syscache.h" +#include "utils/snapmgr.h" #include "utils/lsyscache.h" @@ -829,13 +830,21 @@ gistNewBuffer(Relation r) { Page page = BufferGetPage(buffer); + /* + * If the page was never initialized, it's OK to use. + */ if (PageIsNew(page)) - return buffer; /* OK to use, if never initialized */ + return buffer; gistcheckpage(r, buffer); - if (GistPageIsDeleted(page)) - return buffer; /* OK to use */ + /* + * Otherwise, recycle it if deleted, and too old to have any processes + * interested in it. + */ + if (GistPageIsDeleted(page) && + TransactionIdPrecedes(GistPageGetDeleteXid(page), RecentGlobalXmin)) + return buffer; LockBuffer(buffer, GIST_UNLOCK); } diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index 3c1d75691e8..531b4b73a45 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -16,26 +16,49 @@ #include "access/genam.h" #include "access/gist_private.h" +#include "access/transam.h" #include "commands/vacuum.h" +#include "lib/integerset.h" #include "miscadmin.h" #include "storage/indexfsm.h" #include "storage/lmgr.h" -/* Working state needed by gistbulkdelete */ +/* + * State kept across vacuum stages. + */ typedef struct { + IndexBulkDeleteResult stats; /* must be first */ + IndexVacuumInfo *info; - IndexBulkDeleteResult *stats; + + /* + * These are used to memorize all internal and empty leaf pages in the 1st + * vacuum phase. They are used in the 2nd phase, to delete all the empty + * pages. + */ + IntegerSet *internalPagesSet; + IntegerSet *emptyLeafPagesSet; +} GistBulkDeleteResult; + +/* Working state needed by gistbulkdelete */ +typedef struct +{ + GistBulkDeleteResult *stats; IndexBulkDeleteCallback callback; void *callback_state; GistNSN startNSN; BlockNumber totFreePages; /* true total # of free pages */ } GistVacState; -static void gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, +static void gistvacuumscan(IndexVacuumInfo *info, GistBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state); static void gistvacuumpage(GistVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno); +static void gistvacuum_recycle_pages(GistBulkDeleteResult *stats); +static bool gistdeletepage(GistBulkDeleteResult *stats, + Buffer buffer, OffsetNumber downlink, + Buffer leafBuffer); /* * VACUUM bulkdelete stage: remove index entries. @@ -44,13 +67,15 @@ IndexBulkDeleteResult * gistbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state) { + GistBulkDeleteResult *gist_stats = (GistBulkDeleteResult *) stats; + /* allocate stats if first time through, else re-use existing struct */ - if (stats == NULL) - stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); + if (gist_stats == NULL) + gist_stats = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult)); - gistvacuumscan(info, stats, callback, callback_state); + gistvacuumscan(info, gist_stats, callback, callback_state); - return stats; + return (IndexBulkDeleteResult *) gist_stats; } /* @@ -59,6 +84,8 @@ gistbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexBulkDeleteResult * gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) { + GistBulkDeleteResult *gist_stats = (GistBulkDeleteResult *) stats; + /* No-op in ANALYZE ONLY mode */ if (info->analyze_only) return stats; @@ -68,10 +95,26 @@ gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) * stats from the latest gistbulkdelete call. If it wasn't called, we * still need to do a pass over the index, to obtain index statistics. */ - if (stats == NULL) + if (gist_stats == NULL) { - stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); - gistvacuumscan(info, stats, NULL, NULL); + gist_stats = (GistBulkDeleteResult *) palloc0(sizeof(GistBulkDeleteResult)); + gistvacuumscan(info, gist_stats, NULL, NULL); + } + + /* + * If we saw any empty pages that could be recycle, try to unlink them from + * the tree so that they can be reused. + */ + if (gist_stats->emptyLeafPagesSet) + { + gistvacuum_recycle_pages(gist_stats); + intset_free(gist_stats->emptyLeafPagesSet); + gist_stats->emptyLeafPagesSet = NULL; + } + if (gist_stats->internalPagesSet) + { + intset_free(gist_stats->internalPagesSet); + gist_stats->internalPagesSet = NULL; } /* @@ -82,11 +125,11 @@ gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) */ if (!info->estimated_count) { - if (stats->num_index_tuples > info->num_heap_tuples) - stats->num_index_tuples = info->num_heap_tuples; + if (gist_stats->stats.num_index_tuples > info->num_heap_tuples) + gist_stats->stats.num_index_tuples = info->num_heap_tuples; } - return stats; + return (IndexBulkDeleteResult *) gist_stats; } /* @@ -97,15 +140,16 @@ gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) * btvacuumcleanup invoke this (the latter only if no btbulkdelete call * occurred). * - * This also adds unused/delete pages to the free space map, although that - * is currently not very useful. There is currently no support for deleting - * empty pages, so recycleable pages can only be found if an error occurs - * while the index is being expanded, leaving an all-zeros page behind. + * This also makes note of any empty leaf pages, as well as all internal + * pages. gistvacuum_recycle_pages() needs that information. Any deleted + * pages are added directly to the free space map. (They should've been + * added there when they were originally deleted, already, but it's possible + * that the FSM was lost at a crash, for example.) * * The caller is responsible for initially allocating/zeroing a stats struct. */ static void -gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, +gistvacuumscan(IndexVacuumInfo *info, GistBulkDeleteResult *stats, IndexBulkDeleteCallback callback, void *callback_state) { Relation rel = info->index; @@ -118,12 +162,19 @@ gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, * Reset counts that will be incremented during the scan; needed in case * of multiple scans during a single VACUUM command. */ - stats->estimated_count = false; - stats->num_index_tuples = 0; - stats->pages_deleted = 0; + stats->stats.estimated_count = false; + stats->stats.num_index_tuples = 0; + stats->stats.pages_deleted = 0; + + if (stats->internalPagesSet != NULL) + intset_free(stats->internalPagesSet); + stats->internalPagesSet = intset_create(); + if (stats->emptyLeafPagesSet != NULL) + intset_free(stats->emptyLeafPagesSet); + stats->emptyLeafPagesSet = intset_create(); /* Set up info to pass down to gistvacuumpage */ - vstate.info = info; + stats->info = info; vstate.stats = stats; vstate.callback = callback; vstate.callback_state = callback_state; @@ -171,6 +222,7 @@ gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, /* Quit if we've scanned the whole relation */ if (blkno >= num_pages) break; + /* Iterate over pages, then loop back to recheck length */ for (; blkno < num_pages; blkno++) gistvacuumpage(&vstate, blkno, blkno); @@ -192,8 +244,8 @@ gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, IndexFreeSpaceMapVacuum(rel); /* update statistics */ - stats->num_pages = num_pages; - stats->pages_free = vstate.totFreePages; + stats->stats.num_pages = num_pages; + stats->stats.pages_free = vstate.totFreePages; } /* @@ -210,8 +262,8 @@ gistvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, static void gistvacuumpage(GistVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno) { - IndexVacuumInfo *info = vstate->info; - IndexBulkDeleteResult *stats = vstate->stats; + GistBulkDeleteResult *stats = vstate->stats; + IndexVacuumInfo *info = stats->info; IndexBulkDeleteCallback callback = vstate->callback; void *callback_state = vstate->callback_state; Relation rel = info->index; @@ -240,12 +292,13 @@ restart: /* Okay to recycle this page */ RecordFreeIndexPage(rel, blkno); vstate->totFreePages++; - stats->pages_deleted++; + stats->stats.pages_deleted++; } else if (GistPageIsLeaf(page)) { OffsetNumber todelete[MaxOffsetNumber]; int ntodelete = 0; + int nremain; GISTPageOpaque opaque = GistPageGetOpaque(page); OffsetNumber maxoff = PageGetMaxOffsetNumber(page); @@ -314,12 +367,28 @@ restart: END_CRIT_SECTION(); - stats->tuples_removed += ntodelete; + stats->stats.tuples_removed += ntodelete; /* must recompute maxoff */ maxoff = PageGetMaxOffsetNumber(page); } - stats->num_index_tuples += maxoff - FirstOffsetNumber + 1; + nremain = maxoff - FirstOffsetNumber + 1; + if (nremain == 0) + { + /* + * The page is now completely empty. Remember its block number, + * we will try to delete the page in the second stage, in + * gistvacuum_recycle_pages(). + * + * Skip this when recursing, because IntegerSet requires that the + * values are added in ascending order. The next VACUUM will pick + * it up... + */ + if (blkno == orig_blkno) + intset_add_member(stats->emptyLeafPagesSet, blkno); + } + else + stats->stats.num_index_tuples += nremain; } else { @@ -347,6 +416,14 @@ restart: errdetail("This is caused by an incomplete page split at crash recovery before upgrading to PostgreSQL 9.1."), errhint("Please REINDEX it."))); } + + /* + * Remember the block number of this page, so that we can revisit + * it later in gistvacuum_recycle_pages(), when we search for parents + * of empty children. + */ + if (blkno == orig_blkno) + intset_add_member(stats->internalPagesSet, blkno); } UnlockReleaseBuffer(buffer); @@ -364,3 +441,218 @@ restart: goto restart; } } + +/* + * Scan all internal pages, and try to delete their empty child pages. + */ +static void +gistvacuum_recycle_pages(GistBulkDeleteResult *stats) +{ + IndexVacuumInfo *info = stats->info; + Relation rel = info->index; + BlockNumber empty_pages_remaining; + + empty_pages_remaining = intset_num_entries(stats->emptyLeafPagesSet); + + /* + * Rescan all inner pages to find those that have empty child pages. + */ + intset_begin_iterate(stats->internalPagesSet); + while (empty_pages_remaining > 0) + { + BlockNumber blkno; + bool found; + Buffer buffer; + Page page; + OffsetNumber off, + maxoff; + OffsetNumber todelete[MaxOffsetNumber]; + BlockNumber leafs_to_delete[MaxOffsetNumber]; + int ntodelete; + int deleted; + + blkno = intset_iterate_next(stats->internalPagesSet, &found); + if (!found) + break; + + buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, + info->strategy); + + LockBuffer(buffer, GIST_SHARE); + page = (Page) BufferGetPage(buffer); + + if (PageIsNew(page) || GistPageIsDeleted(page) || GistPageIsLeaf(page)) + { + /* + * This page was an internal page earlier, but now it's something + * else. Shouldn't happen... + */ + Assert(false); + UnlockReleaseBuffer(buffer); + continue; + } + + /* + * Scan all the downlinks, and see if any of them point to empty leaf + * pages. + */ + maxoff = PageGetMaxOffsetNumber(page); + ntodelete = 0; + for (off = FirstOffsetNumber; + off <= maxoff && ntodelete < maxoff - 1; + off = OffsetNumberNext(off)) + { + ItemId iid = PageGetItemId(page, off); + IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid); + BlockNumber leafblk; + + leafblk = ItemPointerGetBlockNumber(&(idxtuple->t_tid)); + if (intset_is_member(stats->emptyLeafPagesSet, leafblk)) + { + leafs_to_delete[ntodelete] = leafblk; + todelete[ntodelete++] = off; + } + } + + /* + * In order to avoid deadlock, child page must be locked before + * parent, so we must release the lock on the parent, lock the child, + * and then re-acquire the lock the parent. (And we wouldn't want + * to do I/O, while holding a lock, anyway.) + * + * At the instant that we're not holding a lock on the parent, the + * downlink might get moved by a concurrent, so we must re-check that + * it still points to the same child page after we have acquired both + * locks. Also, another backend might have inserted a tuple to the + * page, so that it is no longer empty. gistdeletepage() re-checks all + * these conditions. + */ + LockBuffer(buffer, GIST_UNLOCK); + + deleted = 0; + for (int i = 0; i < ntodelete; i++) + { + Buffer leafbuf; + + /* + * Don't remove the last downlink from the parent. That would + * confuse the insertion code. + */ + if (PageGetMaxOffsetNumber(page) == FirstOffsetNumber) + break; + + leafbuf = ReadBufferExtended(rel, MAIN_FORKNUM, leafs_to_delete[i], + RBM_NORMAL, info->strategy); + LockBuffer(leafbuf, GIST_EXCLUSIVE); + gistcheckpage(rel, leafbuf); + + LockBuffer(buffer, GIST_EXCLUSIVE); + if (gistdeletepage(stats, buffer, todelete[i] - deleted, leafbuf)) + deleted++; + LockBuffer(buffer, GIST_UNLOCK); + + UnlockReleaseBuffer(leafbuf); + } + empty_pages_remaining -= deleted; + + ReleaseBuffer(buffer); + } +} + + +/* + * gistdeletepage takes parent page and leaf page and tries to delete leaf. + * Both pages must be locked. Returns true if delete actually happened. + * Does not remove last downlink. + */ +static bool +gistdeletepage(GistBulkDeleteResult *stats, + Buffer parentBuffer, OffsetNumber downlink, + Buffer leafBuffer) +{ + Page parentPage = BufferGetPage(parentBuffer); + Page leafPage = BufferGetPage(leafBuffer); + ItemId iid; + IndexTuple idxtuple; + XLogRecPtr recptr; + TransactionId txid; + + /* Check that the leaf is still empty */ + if (!GistPageIsLeaf(leafPage)) + { + Assert(false); + return false; + } + if (PageGetMaxOffsetNumber(leafPage) != InvalidOffsetNumber) + return false; /* no longer empty */ + + if (GistFollowRight(leafPage) + || GistPageGetNSN(parentPage) > GistPageGetNSN(leafPage)) + { + /* Don't mess with a concurrent page split. */ + return false; + } + + /* + * Check that the parent page still looks valid. + */ + if (PageIsNew(parentPage) || + GistPageIsDeleted(parentPage) || + GistPageIsLeaf(parentPage)) + { + Assert(false); + return false; + } + + /* + * Check that old downlink is still pointing to leafBuffer. + * + * It might have moved by a concurrent insert. We could try to relocate + * it, by scanning the page again, or perhaps even by moving right if + * the page was split, but let's keep it simple and just give up. + * The next VACUUM will pick this up again. + */ + if (PageGetMaxOffsetNumber(parentPage) < downlink + || PageGetMaxOffsetNumber(parentPage) <= FirstOffsetNumber) + return false; + + iid = PageGetItemId(parentPage, downlink); + idxtuple = (IndexTuple) PageGetItem(parentPage, iid); + if (BufferGetBlockNumber(leafBuffer) != + ItemPointerGetBlockNumber(&(idxtuple->t_tid))) + return false; + + /* + * All good. Proceed with the deletion. + * + * Like in _bt_unlink_halfdead_page we need an upper bound on xid + * that could hold downlinks to this page. We use + * ReadNewTransactionId() to instead of GetCurrentTransactionId + * since we are in a VACUUM. + */ + txid = ReadNewTransactionId(); + + /* Mark page as deleted dropping references from internal pages */ + START_CRIT_SECTION(); + + /* Remember xid of last transaction that could see this page */ + GistPageSetDeleteXid(leafPage,txid); + GistPageSetDeleted(leafPage); + MarkBufferDirty(leafBuffer); + stats->stats.pages_deleted++; + + MarkBufferDirty(parentBuffer); + /* Offsets are changed as long as we delete tuples from internal page */ + PageIndexTupleDelete(parentPage, downlink); + + if (RelationNeedsWAL(stats->info->index)) + recptr = gistXLogPageDelete(leafBuffer, txid, parentBuffer, downlink); + else + recptr = gistGetFakeLSN(stats->info->index); + PageSetLSN(parentPage, recptr); + PageSetLSN(leafPage, recptr); + + END_CRIT_SECTION(); + + return true; +} diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index 408bd5390af..4dbca41bab1 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -508,6 +508,43 @@ gistRedoCreateIndex(XLogReaderState *record) UnlockReleaseBuffer(buffer); } +/* redo page deletion */ +static void +gistRedoPageDelete(XLogReaderState *record) +{ + XLogRecPtr lsn = record->EndRecPtr; + gistxlogPageDelete *xldata = (gistxlogPageDelete *) XLogRecGetData(record); + Buffer buffer; + Page page; + + /* FIXME: Are we locking the pages in correct order, for hot standby? */ + + if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) + { + page = (Page) BufferGetPage(buffer); + + GistPageSetDeleteXid(page, xldata->deleteXid); + GistPageSetDeleted(page); + + PageSetLSN(page, lsn); + MarkBufferDirty(buffer); + } + if (BufferIsValid(buffer)) + UnlockReleaseBuffer(buffer); + + if (XLogReadBufferForRedo(record, 1, &buffer) == BLK_NEEDS_REDO) + { + page = (Page) BufferGetPage(buffer); + + PageIndexTupleDelete(page, xldata->downlinkOffset); + + PageSetLSN(page, lsn); + MarkBufferDirty(buffer); + } + if (BufferIsValid(buffer)) + UnlockReleaseBuffer(buffer); +} + void gist_redo(XLogReaderState *record) { @@ -535,6 +572,9 @@ gist_redo(XLogReaderState *record) case XLOG_GIST_CREATE_INDEX: gistRedoCreateIndex(record); break; + case XLOG_GIST_PAGE_DELETE: + gistRedoPageDelete(record); + break; default: elog(PANIC, "gist_redo: unknown op code %u", info); } @@ -653,6 +693,31 @@ gistXLogSplit(bool page_is_leaf, return recptr; } +/* + * Write XLOG record describing a page deletion. This also includes removal of + * downlink from the parent page. + */ +XLogRecPtr +gistXLogPageDelete(Buffer buffer, TransactionId xid, + Buffer parentBuffer, OffsetNumber downlinkOffset) +{ + gistxlogPageDelete xlrec; + XLogRecPtr recptr; + + xlrec.deleteXid = xid; + xlrec.downlinkOffset = downlinkOffset; + + XLogBeginInsert(); + XLogRegisterData((char *) &xlrec, sizeof(gistxlogPageDelete)); + + XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); + XLogRegisterBuffer(1, parentBuffer, REGBUF_STANDARD); + + recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_DELETE); + + return recptr; +} + /* * Write XLOG record describing a page update. The update can include any * number of deletions and/or insertions of tuples on a single index page. diff --git a/src/backend/access/rmgrdesc/gistdesc.c b/src/backend/access/rmgrdesc/gistdesc.c index e468c9e15aa..0861f829922 100644 --- a/src/backend/access/rmgrdesc/gistdesc.c +++ b/src/backend/access/rmgrdesc/gistdesc.c @@ -76,6 +76,9 @@ gist_identify(uint8 info) case XLOG_GIST_CREATE_INDEX: id = "CREATE_INDEX"; break; + case XLOG_GIST_PAGE_DELETE: + id = "PAGE_DELETE"; + break; } return id; diff --git a/src/include/access/gist.h b/src/include/access/gist.h index 3234f241560..ce8bfd83ea4 100644 --- a/src/include/access/gist.h +++ b/src/include/access/gist.h @@ -151,6 +151,10 @@ typedef struct GISTENTRY #define GistPageGetNSN(page) ( PageXLogRecPtrGet(GistPageGetOpaque(page)->nsn)) #define GistPageSetNSN(page, val) ( PageXLogRecPtrSet(GistPageGetOpaque(page)->nsn, val)) +/* For deleted pages we store last xid which could see the page in scan */ +#define GistPageGetDeleteXid(page) ( ((PageHeader) (page))->pd_prune_xid ) +#define GistPageSetDeleteXid(page, val) ( ((PageHeader) (page))->pd_prune_xid = val) + /* * Vector of GISTENTRY structs; user-defined methods union and picksplit * take it as one of their arguments diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h index 463d2bfc7b9..c77d0b4dd81 100644 --- a/src/include/access/gist_private.h +++ b/src/include/access/gist_private.h @@ -414,12 +414,17 @@ extern bool gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, extern SplitedPageLayout *gistSplit(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate); +/* gistxlog.c */ +extern XLogRecPtr gistXLogPageDelete(Buffer buffer, + TransactionId xid, Buffer parentBuffer, + OffsetNumber downlinkOffset); + extern XLogRecPtr gistXLogUpdate(Buffer buffer, OffsetNumber *todelete, int ntodelete, IndexTuple *itup, int ntup, Buffer leftchild); -XLogRecPtr gistXLogDelete(Buffer buffer, OffsetNumber *todelete, +extern XLogRecPtr gistXLogDelete(Buffer buffer, OffsetNumber *todelete, int ntodelete, RelFileNode hnode); extern XLogRecPtr gistXLogSplit(bool page_is_leaf, diff --git a/src/include/access/gistxlog.h b/src/include/access/gistxlog.h index 5117aabf1af..939a1ea7559 100644 --- a/src/include/access/gistxlog.h +++ b/src/include/access/gistxlog.h @@ -23,7 +23,7 @@ #define XLOG_GIST_PAGE_SPLIT 0x30 /* #define XLOG_GIST_INSERT_COMPLETE 0x40 */ /* not used anymore */ #define XLOG_GIST_CREATE_INDEX 0x50 - /* #define XLOG_GIST_PAGE_DELETE 0x60 */ /* not used anymore */ +#define XLOG_GIST_PAGE_DELETE 0x60 /* * Backup Blk 0: updated page. @@ -76,6 +76,16 @@ typedef struct gistxlogPageSplit */ } gistxlogPageSplit; +/* + * Backup Blk 0: page that was deleted. + * Backup Blk 1: parent page, containing the downlink to the deleted page. + */ +typedef struct gistxlogPageDelete +{ + TransactionId deleteXid; /* last Xid which could see page in scan */ + OffsetNumber downlinkOffset; /* Offset of downlink referencing this page */ +} gistxlogPageDelete; + extern void gist_redo(XLogReaderState *record); extern void gist_desc(StringInfo buf, XLogReaderState *record); extern const char *gist_identify(uint8 info); diff --git a/src/test/regress/expected/gist.out b/src/test/regress/expected/gist.out index f5a2993aaf2..0a43449f003 100644 --- a/src/test/regress/expected/gist.out +++ b/src/test/regress/expected/gist.out @@ -27,10 +27,8 @@ insert into gist_point_tbl (id, p) select g+100000, point(g*10+1, g*10+1) from generate_series(1, 10000) g; -- To test vacuum, delete some entries from all over the index. delete from gist_point_tbl where id % 2 = 1; --- And also delete some concentration of values. (GiST doesn't currently --- attempt to delete pages even when they become empty, but if it did, this --- would exercise it) -delete from gist_point_tbl where id < 10000; +-- And also delete some concentration of values. +delete from gist_point_tbl where id > 5000; vacuum analyze gist_point_tbl; -- rebuild the index with a different fillfactor alter index gist_pointidx SET (fillfactor = 40); diff --git a/src/test/regress/sql/gist.sql b/src/test/regress/sql/gist.sql index bae722fe13c..657b1954847 100644 --- a/src/test/regress/sql/gist.sql +++ b/src/test/regress/sql/gist.sql @@ -28,10 +28,8 @@ select g+100000, point(g*10+1, g*10+1) from generate_series(1, 10000) g; -- To test vacuum, delete some entries from all over the index. delete from gist_point_tbl where id % 2 = 1; --- And also delete some concentration of values. (GiST doesn't currently --- attempt to delete pages even when they become empty, but if it did, this --- would exercise it) -delete from gist_point_tbl where id < 10000; +-- And also delete some concentration of values. +delete from gist_point_tbl where id > 5000; vacuum analyze gist_point_tbl; -- 2.20.1