<listitem>
<para>
If on, emit information about resource usage during sort operations.
- This parameter is only available if the <symbol>TRACE_SORT</symbol> macro
- was defined when <productname>PostgreSQL</productname> was compiled.
- (However, <symbol>TRACE_SORT</symbol> is currently defined by default.)
</para>
</listitem>
</varlistentry>
*/
if (abbr_card > 100000.0)
{
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"macaddr_abbrev: estimation ends at cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count, memtupcount);
-#endif
uss->estimating = false;
return false;
}
*/
if (abbr_card < uss->input_count / 2000.0 + 0.5)
{
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"macaddr_abbrev: aborting abbreviation at cardinality %f"
" below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count,
memtupcount);
-#endif
return true;
}
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"macaddr_abbrev: cardinality %f after " INT64_FORMAT
" values (%d rows)", abbr_card, uss->input_count, memtupcount);
-#endif
return false;
}
*/
if (abbr_card > 100000.0)
{
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"network_abbrev: estimation ends at cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count, memtupcount);
-#endif
uss->estimating = false;
return false;
}
*/
if (abbr_card < uss->input_count / 2000.0 + 0.5)
{
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"network_abbrev: aborting abbreviation at cardinality %f"
" below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count,
memtupcount);
-#endif
return true;
}
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"network_abbrev: cardinality %f after " INT64_FORMAT
" values (%d rows)", abbr_card, uss->input_count, memtupcount);
-#endif
return false;
}
*/
if (abbr_card > 100000.0)
{
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"numeric_abbrev: estimation ends at cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, nss->input_count, memtupcount);
-#endif
nss->estimating = false;
return false;
}
*/
if (abbr_card < nss->input_count / 10000.0 + 0.5)
{
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"numeric_abbrev: aborting abbreviation at cardinality %f"
" below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, nss->input_count / 10000.0 + 0.5,
nss->input_count, memtupcount);
-#endif
return true;
}
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"numeric_abbrev: cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, nss->input_count, memtupcount);
-#endif
return false;
}
*/
if (abbr_card > 100000.0)
{
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"uuid_abbrev: estimation ends at cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count, memtupcount);
-#endif
uss->estimating = false;
return false;
}
*/
if (abbr_card < uss->input_count / 2000.0 + 0.5)
{
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"uuid_abbrev: aborting abbreviation at cardinality %f"
" below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count,
memtupcount);
-#endif
return true;
}
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"uuid_abbrev: cardinality %f after " INT64_FORMAT
" values (%d rows)", abbr_card, uss->input_count, memtupcount);
-#endif
return false;
}
* time there are differences within full key strings not captured in
* abbreviations.
*/
-#ifdef TRACE_SORT
if (trace_sort)
{
double norm_abbrev_card = abbrev_distinct / (double) memtupcount;
memtupcount, abbrev_distinct, key_distinct, norm_abbrev_card,
sss->prop_card);
}
-#endif
/*
* If the number of distinct abbreviated keys approximately matches the
* of moderately high to high abbreviated cardinality. There is little to
* lose but much to gain, which our strategy reflects.
*/
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "varstr_abbrev: aborted abbreviation at %d "
"(abbrev_distinct: %f, key_distinct: %f, prop_card: %f)",
memtupcount, abbrev_distinct, key_distinct, sss->prop_card);
-#endif
return true;
}
NULL, NULL, NULL
},
-#ifdef TRACE_SORT
{
{"trace_sort", PGC_USERSET, DEVELOPER_OPTIONS,
gettext_noop("Emit information about resource usage in sorting."),
false,
NULL, NULL, NULL
},
-#endif
#ifdef TRACE_SYNCSCAN
/* this is undocumented because not exposed in a standard build */
ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1)
/* GUC variables */
-#ifdef TRACE_SORT
bool trace_sort = false;
-#endif
#ifdef DEBUG_BOUNDED_SORT
bool optimize_bounded_sort = true;
/*
* Resource snapshot for time of sort start.
*/
-#ifdef TRACE_SORT
PGRUsage ru_start;
-#endif
};
/*
state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
-#ifdef TRACE_SORT
if (trace_sort)
pg_rusage_init(&state->ru_start);
-#endif
state->base.sortopt = sortopt;
state->base.tuples = true;
{
/* context swap probably not needed, but let's be safe */
MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
-
-#ifdef TRACE_SORT
int64 spaceUsed;
if (state->tapeset)
spaceUsed = LogicalTapeSetBlocks(state->tapeset);
else
spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
-#endif
/*
* Delete temporary "tape" files, if any.
*
- * Note: want to include this in reported total cost of sort, hence need
- * for two #ifdef TRACE_SORT sections.
- *
* We don't bother to destroy the individual tapes here. They will go away
* with the sortcontext. (In TSS_FINALMERGE state, we have closed
* finished tapes already.)
if (state->tapeset)
LogicalTapeSetClose(state->tapeset);
-#ifdef TRACE_SORT
if (trace_sort)
{
if (state->tapeset)
}
TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
-#else
-
- /*
- * If you disabled TRACE_SORT, you can still probe sort__done, but you
- * ain't getting space-used stats.
- */
- TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
-#endif
FREESTATE(state);
MemoryContextSwitchTo(oldcontext);
(state->memtupcount > state->bound * 2 ||
(state->memtupcount > state->bound && LACKMEM(state))))
{
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "switching to bounded heapsort at %d tuples: %s",
state->memtupcount,
pg_rusage_show(&state->ru_start));
-#endif
make_bounded_heap(state);
MemoryContextSwitchTo(oldcontext);
return;
{
MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "performsort of worker %d starting: %s",
state->worker, pg_rusage_show(&state->ru_start));
-#endif
switch (state->status)
{
break;
}
-#ifdef TRACE_SORT
if (trace_sort)
{
if (state->status == TSS_FINALMERGE)
elog(LOG, "performsort of worker %d done: %s",
state->worker, pg_rusage_show(&state->ru_start));
}
-#endif
MemoryContextSwitchTo(oldcontext);
}
state->maxTapes = MINORDER;
}
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d switching to external sort with %d tapes: %s",
state->worker, state->maxTapes, pg_rusage_show(&state->ru_start));
-#endif
/* Create the tape set */
inittapestate(state, state->maxTapes);
*/
state->tape_buffer_mem = state->availMem;
USEMEM(state, state->tape_buffer_mem);
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d using %zu KB of memory for tape buffers",
state->worker, state->tape_buffer_mem / 1024);
-#endif
for (;;)
{
state->nInputRuns,
state->maxTapes);
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s",
state->nInputRuns, state->nInputTapes, input_buffer_size / 1024,
pg_rusage_show(&state->ru_start));
-#endif
/* Prepare the new input tapes for merge pass. */
for (tapenum = 0; tapenum < state->nInputTapes; tapenum++)
state->currentRun++;
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d starting quicksort of run %d: %s",
state->worker, state->currentRun,
pg_rusage_show(&state->ru_start));
-#endif
/*
* Sort all tuples accumulated within the allowed amount of memory for
*/
tuplesort_sort_memtuples(state);
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d finished quicksort of run %d: %s",
state->worker, state->currentRun,
pg_rusage_show(&state->ru_start));
-#endif
memtupwrite = state->memtupcount;
for (i = 0; i < memtupwrite; i++)
markrunend(state->destTape);
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d finished writing run %d to tape %d: %s",
state->worker, state->currentRun, (state->currentRun - 1) % state->nOutputTapes + 1,
pg_rusage_show(&state->ru_start));
-#endif
}
/*
Assert(nkeys > 0);
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
-#endif
base->nKeys = nkeys;
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortClusterArg *) palloc0(sizeof(TuplesortClusterArg));
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
RelationGetNumberOfAttributes(indexRel),
workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
-#endif
base->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel);
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortIndexBTreeArg *) palloc(sizeof(TuplesortIndexBTreeArg));
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin index sort: unique = %c, workMem = %d, randomAccess = %c",
enforceUnique ? 't' : 'f',
workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
-#endif
base->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel);
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortIndexHashArg *) palloc(sizeof(TuplesortIndexHashArg));
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin index sort: high_mask = 0x%x, low_mask = 0x%x, "
max_buckets,
workMem,
sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
-#endif
base->nKeys = 1; /* Only one sort column, the hash code */
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortIndexBTreeArg *) palloc(sizeof(TuplesortIndexBTreeArg));
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin index sort: workMem = %d, randomAccess = %c",
workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
-#endif
base->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel);
sortopt);
TuplesortPublic *base = TuplesortstateGetPublic(state);
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin index sort: workMem = %d, randomAccess = %c",
workMem,
sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
-#endif
base->nKeys = 1; /* Only one sort column, the block number */
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortDatumArg *) palloc(sizeof(TuplesortDatumArg));
-#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin datum sort: workMem = %d, randomAccess = %c",
workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
-#endif
base->nKeys = 1; /* always a one-column sort */
*/
/* #define WAL_DEBUG */
-/*
- * Enable tracing of resource consumption during sort operations;
- * see also the trace_sort GUC var. For 8.1 this is enabled by default.
- */
-#define TRACE_SORT 1
-
/*
* Enable tracing of syncscan operations (see also the trace_syncscan GUC var).
*/
extern PGDLLIMPORT char *role_string;
extern PGDLLIMPORT bool in_hot_standby_guc;
-
-#ifdef TRACE_SORT
extern PGDLLIMPORT bool trace_sort;
-#endif
#ifdef DEBUG_BOUNDED_SORT
extern PGDLLIMPORT bool optimize_bounded_sort;