From f49e1bb31f4ad56c3f42b9ed4e9fcb5d99198ed4 Mon Sep 17 00:00:00 2001 From: Matthias van de Meent Date: Tue, 9 Mar 2021 14:42:52 +0100 Subject: [PATCH v3] Truncate a pages' line pointer array when it has trailing unused ItemIds. This will allow reuse of what is effectively free space for data as well as new line pointers, instead of keeping it reserved for line pointers only. An additional benefit is that the HasFreeLinePointers hint-bit optimization now doesn't hint for free line pointers at the end of the array, slightly increasing the specificity of where the free lines are; and saving us from needing to search to the end of the array if all other entries are already filled. --- src/backend/access/heap/heapam.c | 9 ++++++++- src/backend/access/heap/pruneheap.c | 1 + src/backend/storage/page/bufpage.c | 13 ++++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 3b435c107d..ae218cfac6 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -635,8 +635,15 @@ heapgettup(HeapScanDesc scan, } else { + /* + * The previous returned tuple may have been vacuumed since the + * previous scan when we use a non-MVCC snapshot, so we must + * re-establish the lineoff <= PageGetMaxOffsetNumber(dp) + * invariant + */ lineoff = /* previous offnum */ - OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self))); + Min(lines, + OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)))); } /* page and lineoff now reference the physically previous tid */ diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 8bb38d6406..5903cdca82 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -946,6 +946,7 @@ heap_get_root_tuples(Page page, OffsetNumber *root_offsets) */ for (;;) { + Assert(OffsetNumberIsValid(nextoffnum) && nextoffnum <= maxoff); lp = PageGetItemId(page, nextoffnum); /* Check for broken chains */ diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index 9ac556b4ae..10d8f26ad0 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -672,7 +672,11 @@ compactify_tuples(itemIdCompact itemidbase, int nitems, Page page, bool presorte * PageRepairFragmentation * * Frees fragmented space on a page. - * It doesn't remove unused line pointers! Please don't change this. + * It doesn't remove intermediate unused line pointers (that would mean + * moving ItemIds, and that would imply invalidating indexed values), but it + * does truncate the page->pd_linp array to the last unused line pointer, so + * that this space may also be reused for data, instead of only for line + * pointers. * * This routine is usable for heap pages only, but see PageIndexMultiDelete. * @@ -691,6 +695,7 @@ PageRepairFragmentation(Page page) int nline, nstorage, nunused; + OffsetNumber lastUsed = InvalidOffsetNumber; int i; Size totallen; bool presorted = true; /* For now */ @@ -724,6 +729,7 @@ PageRepairFragmentation(Page page) lp = PageGetItemId(page, i); if (ItemIdIsUsed(lp)) { + lastUsed = i; if (ItemIdHasStorage(lp)) { itemidptr->offsetindex = i - 1; @@ -771,6 +777,11 @@ PageRepairFragmentation(Page page) compactify_tuples(itemidbase, nstorage, page, presorted); } + if (lastUsed != nline) { + ((PageHeader) page)->pd_lower = SizeOfPageHeaderData + (sizeof(ItemIdData) * lastUsed); + nunused = nunused - (nline - lastUsed); + } + /* Set hint bit for PageAddItem */ if (nunused > 0) PageSetHasFreeLinePointers(page); -- 2.20.1