summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/common/printtup.c18
-rw-r--r--src/backend/storage/page/bufpage.c15
-rw-r--r--src/backend/tcop/postgres.c5
-rw-r--r--src/backend/utils/mmgr/aset.c170
-rw-r--r--src/backend/utils/mmgr/mcxt.c13
-rw-r--r--src/include/pg_config_manual.h19
-rw-r--r--src/include/utils/memdebug.h34
-rw-r--r--src/tools/valgrind.supp94
8 files changed, 361 insertions, 7 deletions
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index e87e6752b67..8daac9e4f5a 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -20,6 +20,7 @@
#include "libpq/pqformat.h"
#include "tcop/pquery.h"
#include "utils/lsyscache.h"
+#include "utils/memdebug.h"
static void printtup_startup(DestReceiver *self, int operation,
@@ -324,9 +325,26 @@ printtup(TupleTableSlot *slot, DestReceiver *self)
/*
* If we have a toasted datum, forcibly detoast it here to avoid
* memory leakage inside the type's output routine.
+ *
+ * Here we catch undefined bytes in tuples that are returned to the
+ * client without hitting disk; see comments at the related check in
+ * PageAddItem(). Whether to test before or after detoast is somewhat
+ * arbitrary, as is whether to test external/compressed data at all.
+ * Undefined bytes in the pre-toast datum will have triggered Valgrind
+ * errors in the compressor or toaster; any error detected here for
+ * such datums would indicate an (unlikely) bug in a type-independent
+ * facility. Therefore, this test is most useful for uncompressed,
+ * non-external datums.
+ *
+ * We don't presently bother checking non-varlena datums for undefined
+ * data. PageAddItem() does check them.
*/
if (thisState->typisvarlena)
+ {
+ VALGRIND_CHECK_MEM_IS_DEFINED(origattr, VARSIZE_ANY(origattr));
+
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
+ }
else
attr = origattr;
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 36b88c5729b..f426a6b9041 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -17,6 +17,7 @@
#include "access/htup_details.h"
#include "access/xlog.h"
#include "storage/checksum.h"
+#include "utils/memdebug.h"
#include "utils/memutils.h"
@@ -297,6 +298,20 @@ PageAddItem(Page page,
/* set the item pointer */
ItemIdSetNormal(itemId, upper, size);
+ /*
+ * Items normally contain no uninitialized bytes. Core bufpage consumers
+ * conform, but this is not a necessary coding rule; a new index AM could
+ * opt to depart from it. However, data type input functions and other
+ * C-language functions that synthesize datums should initialize all
+ * bytes; datumIsEqual() relies on this. Testing here, along with the
+ * similar check in printtup(), helps to catch such mistakes.
+ *
+ * Values of the "name" type retrieved via index-only scans may contain
+ * uninitialized bytes; see comment in btrescan(). Valgrind will report
+ * this as an error, but it is safe to ignore.
+ */
+ VALGRIND_CHECK_MEM_IS_DEFINED(item, size);
+
/* copy the item's data onto the page */
memcpy((char *) page + upper, item, size);
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 31ea31304b4..ad258b74fd9 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -69,6 +69,7 @@
#include "tcop/tcopprot.h"
#include "tcop/utility.h"
#include "utils/lsyscache.h"
+#include "utils/memdebug.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/snapmgr.h"
@@ -846,6 +847,10 @@ exec_simple_query(const char *query_string)
TRACE_POSTGRESQL_QUERY_START(query_string);
+#ifdef USE_VALGRIND
+ VALGRIND_PRINTF("statement: %s\n", query_string);
+#endif
+
/*
* We use save_log_statement_stats so ShowUsage doesn't report incorrect
* results because ResetUsage wasn't called.
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index de643779f06..ab93620ae26 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -59,11 +59,34 @@
* the requested space whenever the request is less than the actual chunk
* size, and verifies that the byte is undamaged when the chunk is freed.
*
+ *
+ * About USE_VALGRIND and Valgrind client requests:
+ *
+ * Valgrind provides "client request" macros that exchange information with
+ * the host Valgrind (if any). Under !USE_VALGRIND, memdebug.h stubs out
+ * currently-used macros.
+ *
+ * When running under Valgrind, we want a NOACCESS memory region both before
+ * and after the allocation. The chunk header is tempting as the preceding
+ * region, but mcxt.c expects to able to examine the standard chunk header
+ * fields. Therefore, we use, when available, the requested_size field and
+ * any subsequent padding. requested_size is made NOACCESS before returning
+ * a chunk pointer to a caller. However, to reduce client request traffic,
+ * it is kept DEFINED in chunks on the free list.
+ *
+ * The rounded-up capacity of the chunk usually acts as a post-allocation
+ * NOACCESS region. If the request consumes precisely the entire chunk,
+ * there is no such region; another chunk header may immediately follow. In
+ * that case, Valgrind will not detect access beyond the end of the chunk.
+ *
+ * See also the cooperating Valgrind client requests in mcxt.c.
+ *
*-------------------------------------------------------------------------
*/
#include "postgres.h"
+#include "utils/memdebug.h"
#include "utils/memutils.h"
/* Define this to detail debug alloc information */
@@ -116,6 +139,19 @@
#define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
#define ALLOC_CHUNKHDRSZ MAXALIGN(sizeof(AllocChunkData))
+/* Portion of ALLOC_CHUNKHDRSZ examined outside aset.c. */
+#define ALLOC_CHUNK_PUBLIC \
+ (offsetof(AllocChunkData, size) + sizeof(Size))
+
+/* Portion of ALLOC_CHUNKHDRSZ excluding trailing padding. */
+#ifdef MEMORY_CONTEXT_CHECKING
+#define ALLOC_CHUNK_USED \
+ (offsetof(AllocChunkData, requested_size) + sizeof(Size))
+#else
+#define ALLOC_CHUNK_USED \
+ (offsetof(AllocChunkData, size) + sizeof(Size))
+#endif
+
typedef struct AllocBlockData *AllocBlock; /* forward reference */
typedef struct AllocChunkData *AllocChunk;
@@ -314,7 +350,9 @@ AllocSetFreeIndex(Size size)
static void
wipe_mem(void *ptr, size_t size)
{
+ VALGRIND_MAKE_MEM_UNDEFINED(ptr, size);
memset(ptr, 0x7F, size);
+ VALGRIND_MAKE_MEM_NOACCESS(ptr, size);
}
#endif
@@ -324,7 +362,9 @@ set_sentinel(void *base, Size offset)
{
char *ptr = (char *) base + offset;
+ VALGRIND_MAKE_MEM_UNDEFINED(ptr, 1);
*ptr = 0x7E;
+ VALGRIND_MAKE_MEM_NOACCESS(ptr, 1);
}
static bool
@@ -333,7 +373,9 @@ sentinel_ok(const void *base, Size offset)
const char *ptr = (const char *) base + offset;
bool ret;
+ VALGRIND_MAKE_MEM_DEFINED(ptr, 1);
ret = *ptr == 0x7E;
+ VALGRIND_MAKE_MEM_NOACCESS(ptr, 1);
return ret;
}
@@ -346,20 +388,29 @@ sentinel_ok(const void *base, Size offset)
* very random, just a repeating sequence with a length that's prime. What
* we mainly want out of it is to have a good probability that two palloc's
* of the same number of bytes start out containing different data.
+ *
+ * The region may be NOACCESS, so make it UNDEFINED first to avoid errors as
+ * we fill it. Filling the region makes it DEFINED, so make it UNDEFINED
+ * again afterward. Whether to finally make it UNDEFINED or NOACCESS is
+ * fairly arbitrary. UNDEFINED is more convenient for AllocSetRealloc(), and
+ * other callers have no preference.
*/
static void
randomize_mem(char *ptr, size_t size)
{
static int save_ctr = 1;
+ size_t remaining = size;
int ctr;
ctr = save_ctr;
- while (size-- > 0)
+ VALGRIND_MAKE_MEM_UNDEFINED(ptr, size);
+ while (remaining-- > 0)
{
*ptr++ = ctr;
if (++ctr > 251)
ctr = 1;
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ptr - size, size);
save_ctr = ctr;
}
#endif /* RANDOMIZE_ALLOCATED_MEMORY */
@@ -455,6 +506,10 @@ AllocSetContextCreate(MemoryContext parent,
context->blocks = block;
/* Mark block as not to be released at reset time */
context->keeper = block;
+
+ /* Mark unallocated space NOACCESS; leave the block header alone. */
+ VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
+ blksize - ALLOC_BLOCKHDRSZ);
}
return (MemoryContext) context;
@@ -524,6 +579,9 @@ AllocSetReset(MemoryContext context)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(datastart, block->freeptr - datastart);
+#else
+ /* wipe_mem() would have done this */
+ VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
#endif
block->freeptr = datastart;
block->next = NULL;
@@ -623,6 +681,7 @@ AllocSetAlloc(MemoryContext context, Size size)
chunk->aset = set;
chunk->size = chunk_size;
#ifdef MEMORY_CONTEXT_CHECKING
+ /* Valgrind: Will be made NOACCESS below. */
chunk->requested_size = size;
/* set mark to catch clobber of "unused" space */
if (size < chunk_size)
@@ -649,6 +708,16 @@ AllocSetAlloc(MemoryContext context, Size size)
}
AllocAllocInfo(set, chunk);
+
+ /*
+ * Chunk header public fields remain DEFINED. The requested
+ * allocation itself can be NOACCESS or UNDEFINED; our caller will
+ * soon make it UNDEFINED. Make extra space at the end of the chunk,
+ * if any, NOACCESS.
+ */
+ VALGRIND_MAKE_MEM_NOACCESS((char *) chunk + ALLOC_CHUNK_PUBLIC,
+ chunk_size + ALLOC_CHUNKHDRSZ - ALLOC_CHUNK_PUBLIC);
+
return AllocChunkGetPointer(chunk);
}
@@ -669,7 +738,10 @@ AllocSetAlloc(MemoryContext context, Size size)
chunk->aset = (void *) set;
#ifdef MEMORY_CONTEXT_CHECKING
+ /* Valgrind: Free list requested_size should be DEFINED. */
chunk->requested_size = size;
+ VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
+ sizeof(chunk->requested_size));
/* set mark to catch clobber of "unused" space */
if (size < chunk->size)
set_sentinel(AllocChunkGetPointer(chunk), size);
@@ -730,6 +802,9 @@ AllocSetAlloc(MemoryContext context, Size size)
chunk = (AllocChunk) (block->freeptr);
+ /* Prepare to initialize the chunk header. */
+ VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNK_USED);
+
block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
@@ -811,6 +886,10 @@ AllocSetAlloc(MemoryContext context, Size size)
if (set->keeper == NULL && blksize == set->initBlockSize)
set->keeper = block;
+ /* Mark unallocated space NOACCESS. */
+ VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
+ blksize - ALLOC_BLOCKHDRSZ);
+
block->next = set->blocks;
set->blocks = block;
}
@@ -820,6 +899,9 @@ AllocSetAlloc(MemoryContext context, Size size)
*/
chunk = (AllocChunk) (block->freeptr);
+ /* Prepare to initialize the chunk header. */
+ VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNK_USED);
+
block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
Assert(block->freeptr <= block->endptr);
@@ -827,6 +909,8 @@ AllocSetAlloc(MemoryContext context, Size size)
chunk->size = chunk_size;
#ifdef MEMORY_CONTEXT_CHECKING
chunk->requested_size = size;
+ VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
+ sizeof(chunk->requested_size));
/* set mark to catch clobber of "unused" space */
if (size < chunk->size)
set_sentinel(AllocChunkGetPointer(chunk), size);
@@ -853,6 +937,8 @@ AllocSetFree(MemoryContext context, void *pointer)
AllocFreeInfo(set, chunk);
#ifdef MEMORY_CONTEXT_CHECKING
+ VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
+ sizeof(chunk->requested_size));
/* Test for someone scribbling on unused space in chunk */
if (chunk->requested_size < chunk->size)
if (!sentinel_ok(pointer, chunk->requested_size))
@@ -916,6 +1002,11 @@ AllocSetFree(MemoryContext context, void *pointer)
* Returns new pointer to allocated memory of given size; this memory
* is added to the set. Memory associated with given pointer is copied
* into the new memory, and the old memory is freed.
+ *
+ * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
+ * makes our Valgrind client requests less-precise, hazarding false negatives.
+ * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
+ * request size.)
*/
static void *
AllocSetRealloc(MemoryContext context, void *pointer, Size size)
@@ -925,6 +1016,8 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
Size oldsize = chunk->size;
#ifdef MEMORY_CONTEXT_CHECKING
+ VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
+ sizeof(chunk->requested_size));
/* Test for someone scribbling on unused space in chunk */
if (chunk->requested_size < oldsize)
if (!sentinel_ok(pointer, chunk->requested_size))
@@ -940,18 +1033,44 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize >= size)
{
#ifdef MEMORY_CONTEXT_CHECKING
+ Size oldrequest = chunk->requested_size;
+
#ifdef RANDOMIZE_ALLOCATED_MEMORY
/* We can only fill the extra space if we know the prior request */
- if (size > chunk->requested_size)
- randomize_mem((char *) AllocChunkGetPointer(chunk) + chunk->requested_size,
- size - chunk->requested_size);
+ if (size > oldrequest)
+ randomize_mem((char *) pointer + oldrequest,
+ size - oldrequest);
#endif
chunk->requested_size = size;
+ VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
+ sizeof(chunk->requested_size));
+
+ /*
+ * If this is an increase, mark any newly-available part UNDEFINED.
+ * Otherwise, mark the obsolete part NOACCESS.
+ */
+ if (size > oldrequest)
+ VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
+ size - oldrequest);
+ else
+ VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
+ oldsize - size);
+
/* set mark to catch clobber of "unused" space */
if (size < oldsize)
set_sentinel(pointer, size);
+#else /* !MEMORY_CONTEXT_CHECKING */
+
+ /*
+ * We don't have the information to determine whether we're growing
+ * the old request or shrinking it, so we conservatively mark the
+ * entire new allocation DEFINED.
+ */
+ VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize);
+ VALGRIND_MAKE_MEM_DEFINED(pointer, size);
#endif
+
return pointer;
}
@@ -997,6 +1116,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
/* Update pointers since block has likely been moved */
chunk = (AllocChunk) (((char *) block) + ALLOC_BLOCKHDRSZ);
+ pointer = AllocChunkGetPointer(chunk);
if (prevblock == NULL)
set->blocks = block;
else
@@ -1006,16 +1126,37 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
#ifdef MEMORY_CONTEXT_CHECKING
#ifdef RANDOMIZE_ALLOCATED_MEMORY
/* We can only fill the extra space if we know the prior request */
- randomize_mem((char *) AllocChunkGetPointer(chunk) + chunk->requested_size,
+ randomize_mem((char *) pointer + chunk->requested_size,
size - chunk->requested_size);
#endif
+ /*
+ * realloc() (or randomize_mem()) will have left the newly-allocated
+ * part UNDEFINED, but we may need to adjust trailing bytes from the
+ * old allocation.
+ */
+ VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
+ oldsize - chunk->requested_size);
+
chunk->requested_size = size;
+ VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
+ sizeof(chunk->requested_size));
+
/* set mark to catch clobber of "unused" space */
if (size < chunk->size)
set_sentinel(AllocChunkGetPointer(chunk), size);
+#else /* !MEMORY_CONTEXT_CHECKING */
+
+ /*
+ * We don't know how much of the old chunk size was the actual
+ * allocation; it could have been as small as one byte. We have to be
+ * conservative and just mark the entire old portion DEFINED.
+ */
+ VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
#endif
+ /* Make any trailing alignment padding NOACCESS. */
+ VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
return AllocChunkGetPointer(chunk);
}
else
@@ -1036,6 +1177,20 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
/* allocate new chunk */
newPointer = AllocSetAlloc((MemoryContext) set, size);
+ /*
+ * AllocSetAlloc() just made the region NOACCESS. Change it to
+ * UNDEFINED for the moment; memcpy() will then transfer definedness
+ * from the old allocation to the new. If we know the old allocation,
+ * copy just that much. Otherwise, make the entire old chunk defined
+ * to avoid errors as we copy the currently-NOACCESS trailing bytes.
+ */
+ VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
+#ifdef MEMORY_CONTEXT_CHECKING
+ oldsize = chunk->requested_size;
+#else
+ VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
+#endif
+
/* transfer existing data (certain to fit) */
memcpy(newPointer, pointer, oldsize);
@@ -1164,7 +1319,12 @@ AllocSetCheck(MemoryContext context)
dsize;
chsize = chunk->size; /* aligned chunk size */
+ VALGRIND_MAKE_MEM_DEFINED(&chunk->requested_size,
+ sizeof(chunk->requested_size));
dsize = chunk->requested_size; /* real data */
+ if (dsize > 0) /* not on a free list */
+ VALGRIND_MAKE_MEM_NOACCESS(&chunk->requested_size,
+ sizeof(chunk->requested_size));
/*
* Check chunk size
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index c72e3470004..46961e9ee99 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -24,6 +24,7 @@
#include "postgres.h"
+#include "utils/memdebug.h"
#include "utils/memutils.h"
@@ -135,6 +136,8 @@ MemoryContextReset(MemoryContext context)
{
(*context->methods->reset) (context);
context->isReset = true;
+ VALGRIND_DESTROY_MEMPOOL(context);
+ VALGRIND_CREATE_MEMPOOL(context, 0, false);
}
}
@@ -184,6 +187,7 @@ MemoryContextDelete(MemoryContext context)
MemoryContextSetParent(context, NULL);
(*context->methods->delete_context) (context);
+ VALGRIND_DESTROY_MEMPOOL(context);
pfree(context);
}
@@ -555,6 +559,8 @@ MemoryContextCreate(NodeTag tag, Size size,
parent->firstchild = node;
}
+ VALGRIND_CREATE_MEMPOOL(node, 0, false);
+
/* Return to type-specific creation routine to finish up */
return node;
}
@@ -580,6 +586,7 @@ MemoryContextAlloc(MemoryContext context, Size size)
context->isReset = false;
ret = (*context->methods->alloc) (context, size);
+ VALGRIND_MEMPOOL_ALLOC(context, ret, size);
return ret;
}
@@ -605,6 +612,7 @@ MemoryContextAllocZero(MemoryContext context, Size size)
context->isReset = false;
ret = (*context->methods->alloc) (context, size);
+ VALGRIND_MEMPOOL_ALLOC(context, ret, size);
MemSetAligned(ret, 0, size);
@@ -632,6 +640,7 @@ MemoryContextAllocZeroAligned(MemoryContext context, Size size)
context->isReset = false;
ret = (*context->methods->alloc) (context, size);
+ VALGRIND_MEMPOOL_ALLOC(context, ret, size);
MemSetLoop(ret, 0, size);
@@ -653,6 +662,7 @@ palloc(Size size)
CurrentMemoryContext->isReset = false;
ret = (*CurrentMemoryContext->methods->alloc) (CurrentMemoryContext, size);
+ VALGRIND_MEMPOOL_ALLOC(CurrentMemoryContext, ret, size);
return ret;
}
@@ -672,6 +682,7 @@ palloc0(Size size)
CurrentMemoryContext->isReset = false;
ret = (*CurrentMemoryContext->methods->alloc) (CurrentMemoryContext, size);
+ VALGRIND_MEMPOOL_ALLOC(CurrentMemoryContext, ret, size);
MemSetAligned(ret, 0, size);
@@ -704,6 +715,7 @@ pfree(void *pointer)
AssertArg(MemoryContextIsValid(context));
(*context->methods->free_p) (context, pointer);
+ VALGRIND_MEMPOOL_FREE(context, pointer);
}
/*
@@ -740,6 +752,7 @@ repalloc(void *pointer, Size size)
Assert(!context->isReset);
ret = (*context->methods->realloc) (context, pointer, size);
+ VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
return ret;
}
diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index 02bcd9255d4..1d60be2c477 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -207,6 +207,21 @@
*/
/*
+ * Include Valgrind "client requests", mostly in the memory allocator, so
+ * Valgrind understands PostgreSQL memory contexts. This permits detecting
+ * memory errors that Valgrind would not detect on a vanilla build. See also
+ * src/tools/valgrind.supp. "make installcheck" runs 20-30x longer under
+ * Valgrind. Note that USE_VALGRIND slowed older versions of Valgrind by an
+ * additional order of magnitude; Valgrind 3.8.1 does not have this problem.
+ * The client requests fall in hot code paths, so USE_VALGRIND also slows
+ * native execution by a few percentage points.
+ *
+ * You should normally use MEMORY_CONTEXT_CHECKING with USE_VALGRIND;
+ * instrumentation of repalloc() is inferior without it.
+ */
+/* #define USE_VALGRIND */
+
+/*
* Define this to cause pfree()'d memory to be cleared immediately, to
* facilitate catching bugs that refer to already-freed values.
* Right now, this gets defined automatically if --enable-cassert.
@@ -218,9 +233,9 @@
/*
* Define this to check memory allocation errors (scribbling on more
* bytes than were allocated). Right now, this gets defined
- * automatically if --enable-cassert.
+ * automatically if --enable-cassert or USE_VALGRIND.
*/
-#ifdef USE_ASSERT_CHECKING
+#if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
#define MEMORY_CONTEXT_CHECKING
#endif
diff --git a/src/include/utils/memdebug.h b/src/include/utils/memdebug.h
new file mode 100644
index 00000000000..0b955693e61
--- /dev/null
+++ b/src/include/utils/memdebug.h
@@ -0,0 +1,34 @@
+/*-------------------------------------------------------------------------
+ *
+ * memdebug.h
+ * Memory debugging support.
+ *
+ * Currently, this file either wraps <valgrind/memcheck.h> or substitutes
+ * empty definitions for Valgrind client request macros we use.
+ *
+ *
+ * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/utils/memdebug.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef MEMDEBUG_H
+#define MEMDEBUG_H
+
+#ifdef USE_VALGRIND
+#include <valgrind/memcheck.h>
+#else
+#define VALGRIND_CHECK_MEM_IS_DEFINED(addr, size) do {} while (0)
+#define VALGRIND_CREATE_MEMPOOL(context, redzones, zeroed) do {} while (0)
+#define VALGRIND_DESTROY_MEMPOOL(context) do {} while (0)
+#define VALGRIND_MAKE_MEM_DEFINED(addr, size) do {} while (0)
+#define VALGRIND_MAKE_MEM_NOACCESS(addr, size) do {} while (0)
+#define VALGRIND_MAKE_MEM_UNDEFINED(addr, size) do {} while (0)
+#define VALGRIND_MEMPOOL_ALLOC(context, addr, size) do {} while (0)
+#define VALGRIND_MEMPOOL_FREE(context, addr) do {} while (0)
+#define VALGRIND_MEMPOOL_CHANGE(context, optr, nptr, size) do {} while (0)
+#endif
+
+#endif /* MEMDEBUG_H */
diff --git a/src/tools/valgrind.supp b/src/tools/valgrind.supp
new file mode 100644
index 00000000000..8c14d6962e8
--- /dev/null
+++ b/src/tools/valgrind.supp
@@ -0,0 +1,94 @@
+# This is a suppression file for use with Valgrind tools. File format
+# documentation:
+# http://valgrind.org/docs/manual/mc-manual.html#mc-manual.suppfiles
+
+# The libc symbol that implements a particular standard interface is
+# implementation-dependent. For example, strncpy() shows up as "__GI_strncpy"
+# on some platforms. Use wildcards to avoid mentioning such specific names.
+
+
+# We have occasion to write raw binary structures to disk or to the network.
+# These may contain uninitialized padding bytes. Since recipients also ignore
+# those bytes as padding, this is harmless.
+
+{
+ padding_pgstat_send
+ Memcheck:Param
+ socketcall.send(msg)
+
+ fun:*send*
+ fun:pgstat_send
+}
+
+{
+ padding_pgstat_sendto
+ Memcheck:Param
+ socketcall.sendto(msg)
+
+ fun:*send*
+ fun:pgstat_send
+}
+
+{
+ padding_pgstat_write
+ Memcheck:Param
+ write(buf)
+
+ ...
+ fun:pgstat_write_statsfiles
+}
+
+{
+ padding_XLogRecData_CRC
+ Memcheck:Value8
+
+ fun:XLogInsert
+}
+
+{
+ padding_XLogRecData_write
+ Memcheck:Param
+ write(buf)
+
+ ...
+ fun:XLogWrite
+}
+
+{
+ padding_relcache
+ Memcheck:Param
+ write(buf)
+
+ ...
+ fun:write_relcache_init_file
+}
+
+
+# resolve_polymorphic_tupdesc(), a subroutine of internal_get_result_type(),
+# can instigate a memcpy() call where the two pointer arguments are exactly
+# equal. The behavior thereof is formally undefined, but implementations
+# where it's anything other than a no-op are thought unlikely.
+{
+ noopmemcpy_internal_get_result_type
+ Memcheck:Overlap
+
+ fun:*strncpy*
+ fun:namestrcpy
+ fun:TupleDescInitEntry
+ ...
+ fun:internal_get_result_type
+}
+
+
+# gcc on ppc64 can generate a four-byte read to fetch the final "char" fields
+# of a FormData_pg_cast. This is valid compiler behavior, because a proper
+# FormData_pg_cast has trailing padding. Tuples we treat as structures omit
+# that padding, so Valgrind reports an invalid read. Practical trouble would
+# entail the missing pad bytes falling in a different memory page. So long as
+# the structure is aligned, that will not happen.
+{
+ overread_tuplestruct_pg_cast
+ Memcheck:Addr4
+
+ fun:IsBinaryCoercible
+}