summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCédric Villemain2011-05-13 20:55:39 +0000
committerCédric Villemain2011-05-13 20:55:39 +0000
commite0c3b474d5436c7874aef36988f2646bdb890249 (patch)
tree49c41d2b8abbd9bae4096643d840859f3a02a08c /src
parent40cefa392974c73ec20deb3c15fb5111ed7fad17 (diff)
parent9bb6d9795253bb521f81c626fea49a704a369ca9 (diff)
Merge branch 'master' into analyze_cacheanalyze_cache
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/transam/clog.c4
-rw-r--r--src/backend/access/transam/varsup.c5
-rw-r--r--src/backend/access/transam/xact.c7
-rw-r--r--src/backend/access/transam/xlog.c9
-rw-r--r--src/backend/catalog/information_schema.sql4
-rw-r--r--src/backend/commands/cluster.c19
-rw-r--r--src/backend/commands/indexcmds.c20
-rw-r--r--src/backend/optimizer/plan/subselect.c5
-rw-r--r--src/backend/optimizer/prep/prepjointree.c41
-rw-r--r--src/backend/parser/gram.y31
-rw-r--r--src/backend/postmaster/autovacuum.c1
-rw-r--r--src/backend/storage/lmgr/predicate.c3
-rw-r--r--src/backend/tsearch/spell.c2
-rw-r--r--src/backend/utils/adt/datetime.c31
-rw-r--r--src/backend/utils/init/postinit.c2
-rw-r--r--src/backend/utils/misc/guc-file.l41
-rw-r--r--src/backend/utils/misc/guc.c33
-rw-r--r--src/backend/utils/misc/postgresql.conf.sample6
-rw-r--r--src/backend/utils/mmgr/aset.c25
-rw-r--r--src/bin/initdb/initdb.c25
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c68
-rw-r--r--src/bin/pg_dump/pg_dump.c3
-rw-r--r--src/bin/pg_dump/pg_dumpall.c2
-rw-r--r--src/bin/psql/tab-complete.c3
-rw-r--r--src/bin/scripts/createdb.c11
-rw-r--r--src/bin/scripts/dropdb.c5
-rw-r--r--src/include/catalog/catversion.h2
-rw-r--r--src/include/utils/guc.h3
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt_common.c31
-rw-r--r--src/interfaces/libpq/fe-connect.c20
-rw-r--r--src/makefiles/pgxs.mk35
-rw-r--r--src/pl/plperl/GNUmakefile5
-rw-r--r--src/pl/plpgsql/src/Makefile5
-rw-r--r--src/pl/plpython/Makefile7
-rw-r--r--src/pl/tcl/Makefile5
-rw-r--r--src/test/isolation/Makefile13
-rw-r--r--src/test/isolation/isolation_main.c2
-rw-r--r--src/test/isolation/isolationtester.c6
-rw-r--r--src/test/isolation/specparse.y5
-rw-r--r--src/test/regress/expected/foreign_data.out2
-rw-r--r--src/timezone/pgtz.c37
41 files changed, 343 insertions, 241 deletions
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index df0f15679f..d3de8934ee 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -431,8 +431,8 @@ CLOGShmemInit(void)
/*
* This func must be called ONCE on system install. It creates
* the initial CLOG segment. (The CLOG directory is assumed to
- * have been created by the initdb shell script, and CLOGShmemInit
- * must have been called already.)
+ * have been created by initdb, and CLOGShmemInit must have been
+ * called already.)
*/
void
BootStrapCLOG(void)
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 500335bd6f..555bb134f5 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -21,7 +21,6 @@
#include "miscadmin.h"
#include "postmaster/autovacuum.h"
#include "storage/pmsignal.h"
-#include "storage/predicate.h"
#include "storage/proc.h"
#include "utils/builtins.h"
#include "utils/syscache.h"
@@ -162,10 +161,6 @@ GetNewTransactionId(bool isSubXact)
ExtendCLOG(xid);
ExtendSUBTRANS(xid);
- /* If it's top level, the predicate locking system also needs to know. */
- if (!isSubXact)
- RegisterPredicateLockingXid(xid);
-
/*
* Now advance the nextXid counter. This must not happen until after we
* have successfully completed ExtendCLOG() --- if that routine fails, we
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 8a4c4eccd7..2ca1c14549 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -455,6 +455,13 @@ AssignTransactionId(TransactionState s)
SubTransSetParent(s->transactionId, s->parent->transactionId, false);
/*
+ * If it's a top-level transaction, the predicate locking system needs to
+ * be told about it too.
+ */
+ if (!isSubXact)
+ RegisterPredicateLockingXid(s->transactionId);
+
+ /*
* Acquire lock on the transaction XID. (We assume this cannot block.) We
* have to ensure that the lock is assigned to the transaction's own
* ResourceOwner.
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index b0e4c41d6f..e71090f71b 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -6611,12 +6611,11 @@ StartupXLOG(void)
}
/*
- * If we launched a WAL receiver, it should be gone by now. It will trump
- * over the startup checkpoint and subsequent records if it's still alive,
- * so be extra sure that it's gone.
+ * Kill WAL receiver, if it's still running, before we continue to write
+ * the startup checkpoint record. It will trump over the checkpoint and
+ * subsequent records if it's still alive when we start writing WAL.
*/
- if (WalRcvInProgress())
- elog(PANIC, "wal receiver still active");
+ ShutdownWalRcv();
/*
* We don't need the latch anymore. It's not strictly necessary to disown
diff --git a/src/backend/catalog/information_schema.sql b/src/backend/catalog/information_schema.sql
index c623fb7e75..452a0ead44 100644
--- a/src/backend/catalog/information_schema.sql
+++ b/src/backend/catalog/information_schema.sql
@@ -2557,8 +2557,8 @@ CREATE VIEW _pg_foreign_tables AS
WHERE w.oid = s.srvfdw
AND u.oid = c.relowner
AND (pg_has_role(c.relowner, 'USAGE')
- OR has_table_privilege(c.oid, 'SELECT')
- OR has_any_column_privilege(c.oid, 'SELECT'))
+ OR has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER')
+ OR has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES'))
AND n.oid = c.relnamespace
AND c.oid = t.ftrelid
AND c.relkind = 'f'
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 191ef543cd..dc0f6059b0 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -38,6 +38,7 @@
#include "miscadmin.h"
#include "optimizer/planner.h"
#include "storage/bufmgr.h"
+#include "storage/lmgr.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "utils/acl.h"
@@ -751,8 +752,24 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
isnull = (bool *) palloc(natts * sizeof(bool));
/*
+ * If the OldHeap has a toast table, get lock on the toast table to keep
+ * it from being vacuumed. This is needed because autovacuum processes
+ * toast tables independently of their main tables, with no lock on the
+ * latter. If an autovacuum were to start on the toast table after we
+ * compute our OldestXmin below, it would use a later OldestXmin, and then
+ * possibly remove as DEAD toast tuples belonging to main tuples we think
+ * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
+ * tuples.
+ *
+ * We don't need to open the toast relation here, just lock it. The lock
+ * will be held till end of transaction.
+ */
+ if (OldHeap->rd_rel->reltoastrelid)
+ LockRelationOid(OldHeap->rd_rel->reltoastrelid, AccessExclusiveLock);
+
+ /*
* We need to log the copied data in WAL iff WAL archiving/streaming is
- * enabled AND it's not a WAL-logged rel.
+ * enabled AND it's a WAL-logged rel.
*/
use_wal = XLogIsNeeded() && RelationNeedsWAL(NewHeap);
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index ff84045d4f..b91e4a4bd2 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -183,10 +183,22 @@ DefineIndex(RangeVar *heapRelation,
/* Note: during bootstrap may see uncataloged relation */
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_UNCATALOGED)
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table",
- heapRelation->relname)));
+ {
+ if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ /*
+ * Custom error message for FOREIGN TABLE since the term is
+ * close to a regular table and can confuse the user.
+ */
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("cannot create index on foreign table \"%s\"",
+ heapRelation->relname)));
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is not a table",
+ heapRelation->relname)));
+ }
/*
* Don't try to CREATE INDEX on temp tables of other backends.
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 493bc86299..45eaa03fda 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -1065,6 +1065,11 @@ SS_process_ctes(PlannerInfo *root)
* (Notionally, we replace the SubLink with a constant TRUE, then elide the
* redundant constant from the qual.)
*
+ * On success, the caller is also responsible for recursively applying
+ * pull_up_sublinks processing to the rarg and quals of the returned JoinExpr.
+ * (On failure, there is no need to do anything, since pull_up_sublinks will
+ * be applied when we recursively plan the sub-select.)
+ *
* Side effects of a successful conversion include adding the SubLink's
* subselect to the query's rangetable, so that it can be referenced in
* the JoinExpr's rarg.
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index a70439cc67..5d163292c5 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -318,6 +318,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
{
SubLink *sublink = (SubLink *) node;
JoinExpr *j;
+ Relids child_rels;
/* Is it a convertible ANY or EXISTS clause? */
if (sublink->subLinkType == ANY_SUBLINK)
@@ -326,7 +327,18 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
available_rels);
if (j)
{
- /* Yes, insert the new join node into the join tree */
+ /* Yes; recursively process what we pulled up */
+ j->rarg = pull_up_sublinks_jointree_recurse(root,
+ j->rarg,
+ &child_rels);
+ /* Pulled-up ANY/EXISTS quals can use those rels too */
+ child_rels = bms_add_members(child_rels, available_rels);
+ /* ... and any inserted joins get stacked onto j->rarg */
+ j->quals = pull_up_sublinks_qual_recurse(root,
+ j->quals,
+ child_rels,
+ &j->rarg);
+ /* Now insert the new join node into the join tree */
j->larg = *jtlink;
*jtlink = (Node *) j;
/* and return NULL representing constant TRUE */
@@ -339,7 +351,18 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
available_rels);
if (j)
{
- /* Yes, insert the new join node into the join tree */
+ /* Yes; recursively process what we pulled up */
+ j->rarg = pull_up_sublinks_jointree_recurse(root,
+ j->rarg,
+ &child_rels);
+ /* Pulled-up ANY/EXISTS quals can use those rels too */
+ child_rels = bms_add_members(child_rels, available_rels);
+ /* ... and any inserted joins get stacked onto j->rarg */
+ j->quals = pull_up_sublinks_qual_recurse(root,
+ j->quals,
+ child_rels,
+ &j->rarg);
+ /* Now insert the new join node into the join tree */
j->larg = *jtlink;
*jtlink = (Node *) j;
/* and return NULL representing constant TRUE */
@@ -354,6 +377,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
/* If the immediate argument of NOT is EXISTS, try to convert */
SubLink *sublink = (SubLink *) get_notclausearg((Expr *) node);
JoinExpr *j;
+ Relids child_rels;
if (sublink && IsA(sublink, SubLink))
{
@@ -363,7 +387,18 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
available_rels);
if (j)
{
- /* Yes, insert the new join node into the join tree */
+ /* Yes; recursively process what we pulled up */
+ j->rarg = pull_up_sublinks_jointree_recurse(root,
+ j->rarg,
+ &child_rels);
+ /* Pulled-up ANY/EXISTS quals can use those rels too */
+ child_rels = bms_add_members(child_rels, available_rels);
+ /* ... and any inserted joins get stacked onto j->rarg */
+ j->quals = pull_up_sublinks_qual_recurse(root,
+ j->quals,
+ child_rels,
+ &j->rarg);
+ /* Now insert the new join node into the join tree */
j->larg = *jtlink;
*jtlink = (Node *) j;
/* and return NULL representing constant TRUE */
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 34ecd292cb..cc0d6f5557 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -597,7 +597,8 @@ static void SplitColQualList(List *qualList,
* have any bad effects since obviously the keywords will still behave the
* same as if they weren't keywords). We need to do this for PARTITION,
* RANGE, ROWS to support opt_existing_window_name; and for RANGE, ROWS
- * so that they can follow a_expr without creating
+ * so that they can follow a_expr without creating postfix-operator problems;
+ * and for NULL so that it can follow b_expr in ColQualList without creating
* postfix-operator problems.
*
* The frame_bound productions UNBOUNDED PRECEDING and UNBOUNDED FOLLOWING
@@ -610,16 +611,16 @@ static void SplitColQualList(List *qualList,
* blame any funny behavior of UNBOUNDED on the SQL standard, though.
*/
%nonassoc UNBOUNDED /* ideally should have same precedence as IDENT */
-%nonassoc IDENT PARTITION RANGE ROWS PRECEDING FOLLOWING
+%nonassoc IDENT NULL_P PARTITION RANGE ROWS PRECEDING FOLLOWING
%left Op OPERATOR /* multi-character ops and user-defined operators */
%nonassoc NOTNULL
%nonassoc ISNULL
-%nonassoc IS NULL_P TRUE_P FALSE_P UNKNOWN /* sets precedence for IS NULL, etc */
+%nonassoc IS /* sets precedence for IS NULL, etc */
%left '+' '-'
%left '*' '/' '%'
%left '^'
/* Unary Operators */
-%left AT ZONE /* sets precedence for AT TIME ZONE */
+%left AT /* sets precedence for AT TIME ZONE */
%left COLLATE
%right UMINUS
%left '[' ']'
@@ -4125,9 +4126,7 @@ TriggerFuncArg:
}
| FCONST { $$ = makeString($1); }
| Sconst { $$ = makeString($1); }
- | BCONST { $$ = makeString($1); }
- | XCONST { $$ = makeString($1); }
- | ColId { $$ = makeString($1); }
+ | ColLabel { $$ = makeString($1); }
;
OptConstrFromTable:
@@ -9714,7 +9713,7 @@ a_expr: c_expr { $$ = $1; }
n->location = @2;
$$ = (Node *) n;
}
- | a_expr AT TIME ZONE a_expr
+ | a_expr AT TIME ZONE a_expr %prec AT
{
FuncCall *n = makeNode(FuncCall);
n->funcname = SystemFuncName("timezone");
@@ -9896,7 +9895,7 @@ a_expr: c_expr { $$ = $1; }
* a ISNULL
* a NOTNULL
*/
- | a_expr IS NULL_P
+ | a_expr IS NULL_P %prec IS
{
NullTest *n = makeNode(NullTest);
n->arg = (Expr *) $1;
@@ -9910,7 +9909,7 @@ a_expr: c_expr { $$ = $1; }
n->nulltesttype = IS_NULL;
$$ = (Node *)n;
}
- | a_expr IS NOT NULL_P
+ | a_expr IS NOT NULL_P %prec IS
{
NullTest *n = makeNode(NullTest);
n->arg = (Expr *) $1;
@@ -9928,42 +9927,42 @@ a_expr: c_expr { $$ = $1; }
{
$$ = (Node *)makeOverlaps($1, $3, @2, yyscanner);
}
- | a_expr IS TRUE_P
+ | a_expr IS TRUE_P %prec IS
{
BooleanTest *b = makeNode(BooleanTest);
b->arg = (Expr *) $1;
b->booltesttype = IS_TRUE;
$$ = (Node *)b;
}
- | a_expr IS NOT TRUE_P
+ | a_expr IS NOT TRUE_P %prec IS
{
BooleanTest *b = makeNode(BooleanTest);
b->arg = (Expr *) $1;
b->booltesttype = IS_NOT_TRUE;
$$ = (Node *)b;
}
- | a_expr IS FALSE_P
+ | a_expr IS FALSE_P %prec IS
{
BooleanTest *b = makeNode(BooleanTest);
b->arg = (Expr *) $1;
b->booltesttype = IS_FALSE;
$$ = (Node *)b;
}
- | a_expr IS NOT FALSE_P
+ | a_expr IS NOT FALSE_P %prec IS
{
BooleanTest *b = makeNode(BooleanTest);
b->arg = (Expr *) $1;
b->booltesttype = IS_NOT_FALSE;
$$ = (Node *)b;
}
- | a_expr IS UNKNOWN
+ | a_expr IS UNKNOWN %prec IS
{
BooleanTest *b = makeNode(BooleanTest);
b->arg = (Expr *) $1;
b->booltesttype = IS_UNKNOWN;
$$ = (Node *)b;
}
- | a_expr IS NOT UNKNOWN
+ | a_expr IS NOT UNKNOWN %prec IS
{
BooleanTest *b = makeNode(BooleanTest);
b->arg = (Expr *) $1;
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 9fa63a4e6e..fcc912f8e3 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -1108,6 +1108,7 @@ do_start_worker(void)
recentXid = ReadNewTransactionId();
xidForceLimit = recentXid - autovacuum_freeze_max_age;
/* ensure it's a "normal" XID, else TransactionIdPrecedes misbehaves */
+ /* this can cause the limit to go backwards by 3, but that's OK */
if (xidForceLimit < FirstNormalTransactionId)
xidForceLimit -= FirstNormalTransactionId;
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 48ff9cc151..3b3158efe5 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -2283,6 +2283,9 @@ PredicateLockTupleRowVersionLink(const Relation relation,
* locks. Even if a serializable transaction starts concurrently,
* we know it can't take any SIREAD locks on the modified tuple
* because the caller is holding the associated buffer page lock.
+ * Memory reordering isn't an issue; the memory barrier in the
+ * LWLock acquisition guarantees that this read occurs while the
+ * buffer page lock is held.
*/
if (!TransactionIdIsValid(PredXact->SxactGlobalXmin))
return;
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
index 8c0eaa78a7..be1663cd88 100644
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -75,7 +75,7 @@ NIFinishBuild(IspellDict *Conf)
* doesn't need that. The cpalloc and cpalloc0 macros are just documentation
* to indicate which allocations actually require zeroing.
*/
-#define COMPACT_ALLOC_CHUNK 8192 /* must be > aset.c's allocChunkLimit */
+#define COMPACT_ALLOC_CHUNK 8192 /* amount to get from palloc at once */
#define COMPACT_MAX_REQ 1024 /* must be < COMPACT_ALLOC_CHUNK */
static void *
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index db0a6487ac..0a12a9b2e1 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -3569,24 +3569,27 @@ DateTimeParseError(int dterr, const char *str, const char *datatype)
static const datetkn *
datebsearch(const char *key, const datetkn *base, int nel)
{
- const datetkn *last = base + nel - 1,
- *position;
- int result;
-
- while (last >= base)
+ if (nel > 0)
{
- position = base + ((last - base) >> 1);
- result = key[0] - position->token[0];
- if (result == 0)
+ const datetkn *last = base + nel - 1,
+ *position;
+ int result;
+
+ while (last >= base)
{
- result = strncmp(key, position->token, TOKMAXLEN);
+ position = base + ((last - base) >> 1);
+ result = key[0] - position->token[0];
if (result == 0)
- return position;
+ {
+ result = strncmp(key, position->token, TOKMAXLEN);
+ if (result == 0)
+ return position;
+ }
+ if (result < 0)
+ last = position - 1;
+ else
+ base = position + 1;
}
- if (result < 0)
- last = position - 1;
- else
- base = position + 1;
}
return NULL;
}
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 1f6fba5f75..3ac3254afb 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -324,7 +324,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
PGC_INTERNAL, PGC_S_OVERRIDE);
/* If we have no other source of client_encoding, use server encoding */
SetConfigOption("client_encoding", GetDatabaseEncodingName(),
- PGC_BACKEND, PGC_S_DEFAULT);
+ PGC_BACKEND, PGC_S_DYNAMIC_DEFAULT);
/* assign locale variables */
collate = NameStr(dbform->datcollate);
diff --git a/src/backend/utils/misc/guc-file.l b/src/backend/utils/misc/guc-file.l
index 10ef12eb24..78907b939d 100644
--- a/src/backend/utils/misc/guc-file.l
+++ b/src/backend/utils/misc/guc-file.l
@@ -14,6 +14,7 @@
#include <ctype.h>
#include <unistd.h>
+#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "storage/fd.h"
#include "utils/guc.h"
@@ -109,7 +110,6 @@ ProcessConfigFile(GucContext context)
*tail;
char *cvc = NULL;
struct config_string *cvc_struct;
- const char *envvar;
int i;
Assert(context == PGC_POSTMASTER || context == PGC_SIGHUP);
@@ -265,7 +265,7 @@ ProcessConfigFile(GucContext context)
stack->source = PGC_S_DEFAULT;
}
- /* Now we can re-apply the wired-in default */
+ /* Now we can re-apply the wired-in default (i.e., the boot_val) */
set_config_option(gconf->name, NULL, context, PGC_S_DEFAULT,
GUC_ACTION_SET, true);
if (context == PGC_SIGHUP)
@@ -275,25 +275,28 @@ ProcessConfigFile(GucContext context)
}
/*
- * Restore any variables determined by environment variables. This
- * is a no-op except in the case where one of these had been in the
- * config file and is now removed. PGC_S_ENV_VAR will override the
- * wired-in default we just applied, but cannot override any other source.
+ * Restore any variables determined by environment variables or
+ * dynamically-computed defaults. This is a no-op except in the case
+ * where one of these had been in the config file and is now removed.
*
- * Keep this list in sync with InitializeGUCOptions()!
- * PGPORT can be ignored, because it cannot be changed without restart.
- * We assume rlimit hasn't changed, either.
+ * In particular, we *must not* do this during the postmaster's
+ * initial loading of the file, since the timezone functions in
+ * particular should be run only after initialization is complete.
+ *
+ * XXX this is an unmaintainable crock, because we have to know how
+ * to set (or at least what to call to set) every variable that could
+ * potentially have PGC_S_DYNAMIC_DEFAULT or PGC_S_ENV_VAR source.
+ * However, there's no time to redesign it for 9.1.
*/
- envvar = getenv("PGDATESTYLE");
- if (envvar != NULL)
- set_config_option("datestyle", envvar, PGC_POSTMASTER,
- PGC_S_ENV_VAR, GUC_ACTION_SET, true);
-
- envvar = getenv("PGCLIENTENCODING");
- if (envvar != NULL)
- set_config_option("client_encoding", envvar, PGC_POSTMASTER,
- PGC_S_ENV_VAR, GUC_ACTION_SET, true);
-
+ if (context == PGC_SIGHUP)
+ {
+ InitializeGUCOptionsFromEnvironment();
+ pg_timezone_initialize();
+ pg_timezone_abbrev_initialize();
+ /* this selects SQL_ASCII in processes not connected to a database */
+ SetConfigOption("client_encoding", GetDatabaseEncodingName(),
+ PGC_BACKEND, PGC_S_DYNAMIC_DEFAULT);
+ }
/* If we got here all the options checked out okay, so apply them. */
for (item = head; item; item = item->next)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 738e2152ba..92391eda2f 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -502,6 +502,7 @@ const char *const GucContext_Names[] =
const char *const GucSource_Names[] =
{
/* PGC_S_DEFAULT */ "default",
+ /* PGC_S_DYNAMIC_DEFAULT */ "default",
/* PGC_S_ENV_VAR */ "environment variable",
/* PGC_S_FILE */ "configuration file",
/* PGC_S_ARGV */ "command line",
@@ -3269,6 +3270,7 @@ static int GUCNestLevel = 0; /* 1 when in main transaction */
static int guc_var_compare(const void *a, const void *b);
static int guc_name_compare(const char *namea, const char *nameb);
+static void InitializeGUCOptionsFromEnvironment(void);
static void InitializeOneGUCOption(struct config_generic * gconf);
static void push_old_value(struct config_generic * gconf, GucAction action);
static void ReportGUCOption(struct config_generic * record);
@@ -3812,8 +3814,6 @@ void
InitializeGUCOptions(void)
{
int i;
- char *env;
- long stack_rlimit;
/*
* Before log_line_prefix could possibly receive a nonempty setting, make
@@ -3852,9 +3852,25 @@ InitializeGUCOptions(void)
/*
* For historical reasons, some GUC parameters can receive defaults from
- * environment variables. Process those settings. NB: if you add or
- * remove anything here, see also ProcessConfigFile().
+ * environment variables. Process those settings.
*/
+ InitializeGUCOptionsFromEnvironment();
+}
+
+/*
+ * Assign any GUC values that can come from the server's environment.
+ *
+ * This is called from InitializeGUCOptions, and also from ProcessConfigFile
+ * to deal with the possibility that a setting has been removed from
+ * postgresql.conf and should now get a value from the environment.
+ * (The latter is a kludge that should probably go away someday; if so,
+ * fold this back into InitializeGUCOptions.)
+ */
+static void
+InitializeGUCOptionsFromEnvironment(void)
+{
+ char *env;
+ long stack_rlimit;
env = getenv("PGPORT");
if (env != NULL)
@@ -6334,6 +6350,7 @@ define_custom_variable(struct config_generic * variable)
switch (pHolder->gen.source)
{
case PGC_S_DEFAULT:
+ case PGC_S_DYNAMIC_DEFAULT:
case PGC_S_ENV_VAR:
case PGC_S_FILE:
case PGC_S_ARGV:
@@ -8420,15 +8437,13 @@ assign_timezone_abbreviations(const char *newval, void *extra)
*
* This is called after initial loading of postgresql.conf. If no
* timezone_abbreviations setting was found therein, select default.
+ * If a non-default value is already installed, nothing will happen.
*/
void
pg_timezone_abbrev_initialize(void)
{
- if (timezone_abbreviations_string == NULL)
- {
- SetConfigOption("timezone_abbreviations", "Default",
- PGC_POSTMASTER, PGC_S_DEFAULT);
- }
+ SetConfigOption("timezone_abbreviations", "Default",
+ PGC_POSTMASTER, PGC_S_DYNAMIC_DEFAULT);
}
static const char *
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index b8a1582eaa..655dad42c7 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -390,8 +390,7 @@
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
-#log_timezone = unknown # actually, defaults to TZ environment
- # setting
+#log_timezone = '(defaults to server environment setting)'
#------------------------------------------------------------------------------
@@ -471,8 +470,7 @@
#datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
-#timezone = unknown # actually, defaults to TZ environment
- # setting
+#timezone = '(defaults to server environment setting)'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index e95dcb6b7c..140b0c74d9 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -89,7 +89,9 @@
*
* With the current parameters, request sizes up to 8K are treated as chunks,
* larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
- * to adjust the boundary point.
+ * to adjust the boundary point. (But in contexts with small maxBlockSize,
+ * we may set the allocChunkLimit to less than 8K, so as to avoid space
+ * wastage.)
*--------------------
*/
@@ -97,6 +99,8 @@
#define ALLOCSET_NUM_FREELISTS 11
#define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
/* Size of largest chunk that we use a fixed size for */
+#define ALLOC_CHUNK_FRACTION 4
+/* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
/*--------------------
* The first block allocated for an allocset has size initBlockSize.
@@ -380,15 +384,20 @@ AllocSetContextCreate(MemoryContext parent,
/*
* Compute the allocation chunk size limit for this context. It can't be
* more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
- * If maxBlockSize is small then requests exceeding the maxBlockSize
- * should be treated as large chunks, too. We have to have
- * allocChunkLimit a power of two, because the requested and
- * actually-allocated sizes of any chunk must be on the same side of the
- * limit, else we get confused about whether the chunk is "big".
+ * If maxBlockSize is small then requests exceeding the maxBlockSize, or
+ * even a significant fraction of it, should be treated as large chunks
+ * too. For the typical case of maxBlockSize a power of 2, the chunk size
+ * limit will be at most 1/8th maxBlockSize, so that given a stream of
+ * requests that are all the maximum chunk size we will waste at most
+ * 1/8th of the allocated space.
+ *
+ * We have to have allocChunkLimit a power of two, because the requested
+ * and actually-allocated sizes of any chunk must be on the same side of
+ * the limit, else we get confused about whether the chunk is "big".
*/
context->allocChunkLimit = ALLOC_CHUNK_LIMIT;
- while (context->allocChunkLimit >
- (Size) (maxBlockSize - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ))
+ while ((Size) (context->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
+ (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
context->allocChunkLimit >>= 1;
/*
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 3b321494e4..56a396bddd 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -1595,6 +1595,7 @@ setup_collation(void)
size_t len;
int enc;
bool skip;
+ char *quoted_locale;
char alias[NAMEDATALEN];
len = strlen(localebuf);
@@ -1645,8 +1646,10 @@ setup_collation(void)
count++;
- PG_CMD_PRINTF2("INSERT INTO tmp_pg_collation (locale, encoding) VALUES ('%s', %d);\n",
- escape_quotes(localebuf), enc);
+ quoted_locale = escape_quotes(localebuf);
+
+ PG_CMD_PRINTF3("INSERT INTO tmp_pg_collation VALUES (E'%s', E'%s', %d);\n",
+ quoted_locale, quoted_locale, enc);
/*
* Generate aliases such as "en_US" in addition to "en_US.utf8" for
@@ -1654,29 +1657,33 @@ setup_collation(void)
* only, so this doesn't clash with "en_US" for LATIN1, say.
*/
if (normalize_locale_name(alias, localebuf))
- PG_CMD_PRINTF3("INSERT INTO tmp_pg_collation (collname, locale, encoding) VALUES ('%s', '%s', %d);\n",
- escape_quotes(alias), escape_quotes(localebuf), enc);
+ PG_CMD_PRINTF3("INSERT INTO tmp_pg_collation VALUES (E'%s', E'%s', %d);\n",
+ escape_quotes(alias), quoted_locale, enc);
}
/* Add an SQL-standard name */
- PG_CMD_PRINTF1("INSERT INTO tmp_pg_collation (collname, locale, encoding) VALUES ('ucs_basic', 'C', %d);\n", PG_UTF8);
+ PG_CMD_PRINTF1("INSERT INTO tmp_pg_collation VALUES ('ucs_basic', 'C', %d);\n", PG_UTF8);
/*
* When copying collations to the final location, eliminate aliases that
* conflict with an existing locale name for the same encoding. For
* example, "br_FR.iso88591" is normalized to "br_FR", both for encoding
* LATIN1. But the unnormalized locale "br_FR" already exists for LATIN1.
- * Prefer the collation that matches the OS locale name, else the first
+ * Prefer the alias that matches the OS locale name, else the first locale
* name by sort order (arbitrary choice to be deterministic).
+ *
+ * Also, eliminate any aliases that conflict with pg_collation's
+ * hard-wired entries for "C" etc.
*/
PG_CMD_PUTS("INSERT INTO pg_collation (collname, collnamespace, collowner, collencoding, collcollate, collctype) "
- " SELECT DISTINCT ON (final_collname, collnamespace, encoding)"
- " COALESCE(collname, locale) AS final_collname, "
+ " SELECT DISTINCT ON (collname, encoding)"
+ " collname, "
" (SELECT oid FROM pg_namespace WHERE nspname = 'pg_catalog') AS collnamespace, "
" (SELECT relowner FROM pg_class WHERE relname = 'pg_collation') AS collowner, "
" encoding, locale, locale "
" FROM tmp_pg_collation"
- " ORDER BY final_collname, collnamespace, encoding, (collname = locale) DESC, locale;\n");
+ " WHERE NOT EXISTS (SELECT 1 FROM pg_collation WHERE collname = tmp_pg_collation.collname)"
+ " ORDER BY collname, encoding, (collname = locale) DESC, locale;\n");
pclose(locale_a_handle);
PG_CMD_CLOSE;
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index a5d9c2e652..3feb3ee548 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -118,23 +118,23 @@ xmalloc0(int size)
static void
usage(void)
{
- printf(_("%s takes base backups of running PostgreSQL servers\n\n"),
+ printf(_("%s takes a base backup of a running PostgreSQL server.\n\n"),
progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]...\n"), progname);
printf(_("\nOptions controlling the output:\n"));
- printf(_(" -D, --pgdata=directory receive base backup into directory\n"));
- printf(_(" -F, --format=p|t output format (plain, tar)\n"));
- printf(_(" -x, --xlog include required WAL files in backup\n"));
- printf(_(" -Z, --compress=0-9 compress tar output\n"));
+ printf(_(" -D, --pgdata=DIRECTORY receive base backup into directory\n"));
+ printf(_(" -F, --format=p|t output format (plain, tar)\n"));
+ printf(_(" -x, --xlog include required WAL files in backup\n"));
+ printf(_(" -Z, --compress=0-9 compress tar output\n"));
printf(_("\nGeneral options:\n"));
printf(_(" -c, --checkpoint=fast|spread\n"
- " set fast or spread checkpointing\n"));
- printf(_(" -l, --label=label set backup label\n"));
- printf(_(" -P, --progress show progress information\n"));
- printf(_(" -v, --verbose output verbose messages\n"));
- printf(_(" -?, --help show this help, then exit\n"));
- printf(_(" -V, --version output version information, then exit\n"));
+ " set fast or spread checkpointing\n"));
+ printf(_(" -l, --label=LABEL set backup label\n"));
+ printf(_(" -P, --progress show progress information\n"));
+ printf(_(" -v, --verbose output verbose messages\n"));
+ printf(_(" --help show this help, then exit\n"));
+ printf(_(" --version output version information, then exit\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
printf(_(" -p, --port=PORT database server port number\n"));
@@ -337,7 +337,7 @@ ReceiveTarFile(PGconn *conn, PGresult *res, int rownum)
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_COPY_OUT)
{
- fprintf(stderr, _("%s: could not get COPY data stream: %s\n"),
+ fprintf(stderr, _("%s: could not get COPY data stream: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
@@ -398,7 +398,7 @@ ReceiveTarFile(PGconn *conn, PGresult *res, int rownum)
}
else if (r == -2)
{
- fprintf(stderr, _("%s: could not read COPY data: %s\n"),
+ fprintf(stderr, _("%s: could not read COPY data: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
@@ -467,7 +467,7 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_COPY_OUT)
{
- fprintf(stderr, _("%s: could not get COPY data stream: %s\n"),
+ fprintf(stderr, _("%s: could not get COPY data stream: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
@@ -496,7 +496,7 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
}
else if (r == -2)
{
- fprintf(stderr, _("%s: could not read COPY data: %s\n"),
+ fprintf(stderr, _("%s: could not read COPY data: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
@@ -510,7 +510,7 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
*/
if (r != 512)
{
- fprintf(stderr, _("%s: Invalid tar block header size: %i\n"),
+ fprintf(stderr, _("%s: invalid tar block header size: %i\n"),
progname, r);
disconnect_and_exit(1);
}
@@ -518,7 +518,7 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
if (sscanf(copybuf + 124, "%11o", &current_len_left) != 1)
{
- fprintf(stderr, _("%s: could not parse file size!\n"),
+ fprintf(stderr, _("%s: could not parse file size\n"),
progname);
disconnect_and_exit(1);
}
@@ -526,7 +526,7 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
/* Set permissions on the file */
if (sscanf(&copybuf[100], "%07o ", &filemode) != 1)
{
- fprintf(stderr, _("%s: could not parse file mode!\n"),
+ fprintf(stderr, _("%s: could not parse file mode\n"),
progname);
disconnect_and_exit(1);
}
@@ -581,7 +581,7 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
}
else
{
- fprintf(stderr, _("%s: unknown link indicator \"%c\"\n"),
+ fprintf(stderr, _("%s: unrecognized link indicator \"%c\"\n"),
progname, copybuf[156]);
disconnect_and_exit(1);
}
@@ -659,7 +659,7 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
if (file != NULL)
{
- fprintf(stderr, _("%s: last file was never finished!\n"), progname);
+ fprintf(stderr, _("%s: last file was never finished\n"), progname);
disconnect_and_exit(1);
}
@@ -740,7 +740,7 @@ GetConnection(void)
if (PQstatus(tmpconn) != CONNECTION_OK)
{
- fprintf(stderr, _("%s: could not connect to server: %s\n"),
+ fprintf(stderr, _("%s: could not connect to server: %s"),
progname, PQerrorMessage(tmpconn));
exit(1);
}
@@ -780,7 +780,7 @@ BaseBackup(void)
if (PQsendQuery(conn, current_path) == 0)
{
- fprintf(stderr, _("%s: could not start base backup: %s\n"),
+ fprintf(stderr, _("%s: could not start base backup: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
@@ -791,13 +791,13 @@ BaseBackup(void)
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- fprintf(stderr, _("%s: could not initiate base backup: %s\n"),
+ fprintf(stderr, _("%s: could not initiate base backup: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
if (PQntuples(res) != 1)
{
- fprintf(stderr, _("%s: no start point returned from server.\n"),
+ fprintf(stderr, _("%s: no start point returned from server\n"),
progname);
disconnect_and_exit(1);
}
@@ -813,13 +813,13 @@ BaseBackup(void)
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- fprintf(stderr, _("%s: could not get backup header: %s\n"),
+ fprintf(stderr, _("%s: could not get backup header: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
if (PQntuples(res) < 1)
{
- fprintf(stderr, _("%s: no data returned from server.\n"), progname);
+ fprintf(stderr, _("%s: no data returned from server\n"), progname);
disconnect_and_exit(1);
}
@@ -847,7 +847,7 @@ BaseBackup(void)
*/
if (format == 't' && strcmp(basedir, "-") == 0 && PQntuples(res) > 1)
{
- fprintf(stderr, _("%s: can only write single tablespace to stdout, database has %i.\n"),
+ fprintf(stderr, _("%s: can only write single tablespace to stdout, database has %i\n"),
progname, PQntuples(res));
disconnect_and_exit(1);
}
@@ -876,13 +876,13 @@ BaseBackup(void)
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
{
- fprintf(stderr, _("%s: could not get end xlog position from server.\n"),
+ fprintf(stderr, _("%s: could not get end xlog position from server\n"),
progname);
disconnect_and_exit(1);
}
if (PQntuples(res) != 1)
{
- fprintf(stderr, _("%s: no end point returned from server.\n"),
+ fprintf(stderr, _("%s: no end point returned from server\n"),
progname);
disconnect_and_exit(1);
}
@@ -894,7 +894,7 @@ BaseBackup(void)
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- fprintf(stderr, _("%s: final receive failed: %s\n"),
+ fprintf(stderr, _("%s: final receive failed: %s"),
progname, PQerrorMessage(conn));
disconnect_and_exit(1);
}
@@ -905,7 +905,7 @@ BaseBackup(void)
PQfinish(conn);
if (verbose)
- fprintf(stderr, "%s: base backup completed.\n", progname);
+ fprintf(stderr, "%s: base backup completed\n", progname);
}
@@ -1003,12 +1003,6 @@ main(int argc, char **argv)
dbhost = xstrdup(optarg);
break;
case 'p':
- if (atoi(optarg) <= 0)
- {
- fprintf(stderr, _("%s: invalid port number \"%s\"\n"),
- progname, optarg);
- exit(1);
- }
dbport = xstrdup(optarg);
break;
case 'U':
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index afc7fd7032..e474a6980d 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -863,7 +863,7 @@ help(const char *progname)
printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
printf(_(" --no-security-label do not dump security label assignments\n"));
- printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
+ printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
@@ -11804,7 +11804,6 @@ dumpTable(Archive *fout, TableInfo *tbinfo)
namecopy = strdup(fmtId(tbinfo->dobj.name));
dumpACL(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
(tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" :
- (tbinfo->relkind == RELKIND_FOREIGN_TABLE) ? "FOREIGN TABLE" :
"TABLE",
namecopy, NULL, tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name, tbinfo->rolname,
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 963ae54aea..41a330763f 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -554,7 +554,7 @@ help(void)
printf(_(" --quote-all-identifiers quote all identifiers, even if not keywords\n"));
printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
printf(_(" --no-security-label do not dump security label assignments\n"));
- printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
+ printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 3ef2fa421d..9a7eca0766 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -2234,7 +2234,6 @@ psql_completion(char *text, int start, int end)
" UNION SELECT 'DATABASE'"
" UNION SELECT 'FOREIGN DATA WRAPPER'"
" UNION SELECT 'FOREIGN SERVER'"
- " UNION SELECT 'FOREIGN TABLE'"
" UNION SELECT 'FUNCTION'"
" UNION SELECT 'LANGUAGE'"
" UNION SELECT 'LARGE OBJECT'"
@@ -2246,7 +2245,7 @@ psql_completion(char *text, int start, int end)
pg_strcasecmp(prev_wd, "FOREIGN") == 0)
{
static const char *const list_privilege_foreign[] =
- {"DATA WRAPPER", "SERVER", "TABLE", NULL};
+ {"DATA WRAPPER", "SERVER", NULL};
COMPLETE_WITH_LIST(list_privilege_foreign);
}
diff --git a/src/bin/scripts/createdb.c b/src/bin/scripts/createdb.c
index 9b72eac79b..544f2f64b3 100644
--- a/src/bin/scripts/createdb.c
+++ b/src/bin/scripts/createdb.c
@@ -192,6 +192,11 @@ main(int argc, char *argv[])
appendPQExpBuffer(&sql, ";\n");
+ /*
+ * Connect to the 'postgres' database by default, except have
+ * the 'postgres' user use 'template1' so he can create the
+ * 'postgres' database.
+ */
conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
host, port, username, prompt_password, progname);
@@ -208,12 +213,9 @@ main(int argc, char *argv[])
}
PQclear(result);
- PQfinish(conn);
if (comment)
{
- conn = connectDatabase(dbname, host, port, username, prompt_password, progname);
-
printfPQExpBuffer(&sql, "COMMENT ON DATABASE %s IS ", fmtId(dbname));
appendStringLiteralConn(&sql, comment, conn);
appendPQExpBuffer(&sql, ";\n");
@@ -231,9 +233,10 @@ main(int argc, char *argv[])
}
PQclear(result);
- PQfinish(conn);
}
+ PQfinish(conn);
+
exit(0);
}
diff --git a/src/bin/scripts/dropdb.c b/src/bin/scripts/dropdb.c
index 1cf18fd5d8..48f73ae25e 100644
--- a/src/bin/scripts/dropdb.c
+++ b/src/bin/scripts/dropdb.c
@@ -113,6 +113,11 @@ main(int argc, char *argv[])
appendPQExpBuffer(&sql, "DROP DATABASE %s;\n",
fmtId(dbname));
+ /*
+ * Connect to the 'postgres' database by default, except have
+ * the 'postgres' user use 'template1' so he can drop the
+ * 'postgres' database.
+ */
conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
host, port, username, prompt_password, progname);
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index 2df489f32e..49d1e7db6f 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -53,6 +53,6 @@
*/
/* yyyymmddN */
-#define CATALOG_VERSION_NO 201104251
+#define CATALOG_VERSION_NO 201105131
#endif
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index 5a42d8cec3..ee52cd735e 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -82,7 +82,8 @@ typedef enum
*/
typedef enum
{
- PGC_S_DEFAULT, /* wired-in default */
+ PGC_S_DEFAULT, /* hard-wired default ("boot_val") */
+ PGC_S_DYNAMIC_DEFAULT, /* default computed during initialization */
PGC_S_ENV_VAR, /* postmaster environment variable */
PGC_S_FILE, /* postgresql.conf */
PGC_S_ARGV, /* postmaster command line */
diff --git a/src/interfaces/ecpg/pgtypeslib/dt_common.c b/src/interfaces/ecpg/pgtypeslib/dt_common.c
index da3224aae3..45f1f8affd 100644
--- a/src/interfaces/ecpg/pgtypeslib/dt_common.c
+++ b/src/interfaces/ecpg/pgtypeslib/dt_common.c
@@ -512,24 +512,27 @@ char *pgtypes_date_months[] = {"January", "February", "March", "April", "May"
static datetkn *
datebsearch(char *key, datetkn *base, unsigned int nel)
{
- datetkn *last = base + nel - 1,
- *position;
- int result;
-
- while (last >= base)
+ if (nel > 0)
{
- position = base + ((last - base) >> 1);
- result = key[0] - position->token[0];
- if (result == 0)
+ datetkn *last = base + nel - 1,
+ *position;
+ int result;
+
+ while (last >= base)
{
- result = strncmp(key, position->token, TOKMAXLEN);
+ position = base + ((last - base) >> 1);
+ result = key[0] - position->token[0];
if (result == 0)
- return position;
+ {
+ result = strncmp(key, position->token, TOKMAXLEN);
+ if (result == 0)
+ return position;
+ }
+ if (result < 0)
+ last = position - 1;
+ else
+ base = position + 1;
}
- if (result < 0)
- last = position - 1;
- else
- base = position + 1;
}
return NULL;
}
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 1b409d1e44..6648753da0 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -3596,10 +3596,11 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
return 1;
}
- /* concatenate values to a single string */
- for (size = 0, i = 0; values[i] != NULL; ++i)
+ /* concatenate values into a single string with newline terminators */
+ size = 1; /* for the trailing null */
+ for (i = 0; values[i] != NULL; i++)
size += values[i]->bv_len + 1;
- if ((result = malloc(size + 1)) == NULL)
+ if ((result = malloc(size)) == NULL)
{
printfPQExpBuffer(errorMessage,
libpq_gettext("out of memory\n"));
@@ -3607,14 +3608,14 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
ldap_unbind(ld);
return 3;
}
- for (p = result, i = 0; values[i] != NULL; ++i)
+ p = result;
+ for (i = 0; values[i] != NULL; i++)
{
- strncpy(p, values[i]->bv_val, values[i]->bv_len);
+ memcpy(p, values[i]->bv_val, values[i]->bv_len);
p += values[i]->bv_len;
*(p++) = '\n';
- if (values[i + 1] == NULL)
- *(p + 1) = '\0';
}
+ *p = '\0';
ldap_value_free_len(values);
ldap_unbind(ld);
@@ -3643,6 +3644,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
printfPQExpBuffer(errorMessage, libpq_gettext(
"missing \"=\" after \"%s\" in connection info string\n"),
optname);
+ free(result);
return 3;
}
else if (*p == '=')
@@ -3661,6 +3663,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
printfPQExpBuffer(errorMessage, libpq_gettext(
"missing \"=\" after \"%s\" in connection info string\n"),
optname);
+ free(result);
return 3;
}
break;
@@ -3724,6 +3727,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
printfPQExpBuffer(errorMessage,
libpq_gettext("invalid connection option \"%s\"\n"),
optname);
+ free(result);
return 1;
}
optname = NULL;
@@ -3732,6 +3736,8 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
oldstate = state;
}
+ free(result);
+
if (state == 5 || state == 6)
{
printfPQExpBuffer(errorMessage, libpq_gettext(
diff --git a/src/makefiles/pgxs.mk b/src/makefiles/pgxs.mk
index 7fb007fb1c..05ed8416a9 100644
--- a/src/makefiles/pgxs.mk
+++ b/src/makefiles/pgxs.mk
@@ -103,51 +103,30 @@ endif # MODULE_big
install: all installdirs
ifneq (,$(EXTENSION))
- @for file in $(addprefix $(srcdir)/, $(addsuffix .control, $(EXTENSION))); do \
- echo "$(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'"; \
- $(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'; \
- done
+ $(INSTALL_DATA) $(addprefix $(srcdir)/, $(addsuffix .control, $(EXTENSION))) '$(DESTDIR)$(datadir)/extension/'
endif # EXTENSION
ifneq (,$(DATA)$(DATA_built))
- @for file in $(addprefix $(srcdir)/, $(DATA)) $(DATA_built); do \
- echo "$(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/$(datamoduledir)'"; \
- $(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/$(datamoduledir)'; \
- done
+ $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA)) $(DATA_built) '$(DESTDIR)$(datadir)/$(datamoduledir)/'
endif # DATA
ifneq (,$(DATA_TSEARCH))
- @for file in $(addprefix $(srcdir)/, $(DATA_TSEARCH)); do \
- echo "$(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/tsearch_data'"; \
- $(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/tsearch_data'; \
- done
+ $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA_TSEARCH)) '$(DESTDIR)$(datadir)/tsearch_data/'
endif # DATA_TSEARCH
ifdef MODULES
- @for file in $(addsuffix $(DLSUFFIX), $(MODULES)); do \
- echo "$(INSTALL_SHLIB) $$file '$(DESTDIR)$(pkglibdir)'"; \
- $(INSTALL_SHLIB) $$file '$(DESTDIR)$(pkglibdir)'; \
- done
+ $(INSTALL_SHLIB) $(addsuffix $(DLSUFFIX), $(MODULES)) '$(DESTDIR)$(pkglibdir)/'
endif # MODULES
ifdef DOCS
ifdef docdir
- @for file in $(addprefix $(srcdir)/, $(DOCS)); do \
- echo "$(INSTALL_DATA) $$file '$(DESTDIR)$(docdir)/$(docmoduledir)'"; \
- $(INSTALL_DATA) $$file '$(DESTDIR)$(docdir)/$(docmoduledir)'; \
- done
+ $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DOCS)) '$(DESTDIR)$(docdir)/$(docmoduledir)/'
endif # docdir
endif # DOCS
ifdef PROGRAM
$(INSTALL_PROGRAM) $(PROGRAM)$(X) '$(DESTDIR)$(bindir)'
endif # PROGRAM
ifdef SCRIPTS
- @for file in $(addprefix $(srcdir)/, $(SCRIPTS)); do \
- echo "$(INSTALL_SCRIPT) $$file '$(DESTDIR)$(bindir)'"; \
- $(INSTALL_SCRIPT) $$file '$(DESTDIR)$(bindir)'; \
- done
+ $(INSTALL_SCRIPT) $(addprefix $(srcdir)/, $(SCRIPTS)) '$(DESTDIR)$(bindir)/'
endif # SCRIPTS
ifdef SCRIPTS_built
- @for file in $(SCRIPTS_built); do \
- echo "$(INSTALL_SCRIPT) $$file '$(DESTDIR)$(bindir)'"; \
- $(INSTALL_SCRIPT) $$file '$(DESTDIR)$(bindir)'; \
- done
+ $(INSTALL_SCRIPT) $(SCRIPTS_built) '$(DESTDIR)$(bindir)/'
endif # SCRIPTS_built
ifdef MODULE_big
diff --git a/src/pl/plperl/GNUmakefile b/src/pl/plperl/GNUmakefile
index 155b60f43f..79a8d5d3fa 100644
--- a/src/pl/plperl/GNUmakefile
+++ b/src/pl/plperl/GNUmakefile
@@ -82,10 +82,7 @@ installdirs: installdirs-lib
uninstall: uninstall-lib uninstall-data
install-data: installdirs
- @for file in $(addprefix $(srcdir)/, $(DATA)); do \
- echo "$(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'"; \
- $(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'; \
- done
+ $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA)) '$(DESTDIR)$(datadir)/extension/'
uninstall-data:
rm -f $(addprefix '$(DESTDIR)$(datadir)/extension'/, $(notdir $(DATA)))
diff --git a/src/pl/plpgsql/src/Makefile b/src/pl/plpgsql/src/Makefile
index 52fbc1c41a..751a98dbb0 100644
--- a/src/pl/plpgsql/src/Makefile
+++ b/src/pl/plpgsql/src/Makefile
@@ -35,10 +35,7 @@ installdirs: installdirs-lib
uninstall: uninstall-lib uninstall-data
install-data: installdirs
- @for file in $(addprefix $(srcdir)/, $(DATA)); do \
- echo "$(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'"; \
- $(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'; \
- done
+ $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA)) '$(DESTDIR)$(datadir)/extension/'
uninstall-data:
rm -f $(addprefix '$(DESTDIR)$(datadir)/extension'/, $(notdir $(DATA)))
diff --git a/src/pl/plpython/Makefile b/src/pl/plpython/Makefile
index 86d8741e28..2c0575501a 100644
--- a/src/pl/plpython/Makefile
+++ b/src/pl/plpython/Makefile
@@ -114,10 +114,7 @@ installdirs: installdirs-lib
uninstall: uninstall-lib uninstall-data
install-data: installdirs
- @for file in $(addprefix $(srcdir)/, $(DATA)); do \
- echo "$(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'"; \
- $(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'; \
- done
+ $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA)) '$(DESTDIR)$(datadir)/extension/'
uninstall-data:
rm -f $(addprefix '$(DESTDIR)$(datadir)/extension'/, $(notdir $(DATA)))
@@ -141,7 +138,7 @@ prep3:
-e "s/LANGUAGE plpython2u/LANGUAGE plpython3u/g" \
-e "s/EXTENSION plpythonu/EXTENSION plpython3u/g" \
-e "s/EXTENSION plpython2u/EXTENSION plpython3u/g" \
- $$file >`echo $$file | sed 's,$(srcdir),python3,'`; \
+ $$file >`echo $$file | sed 's,$(srcdir),python3,'` || exit; \
done
clean3:
diff --git a/src/pl/tcl/Makefile b/src/pl/tcl/Makefile
index faffd09fd3..77c173bcd8 100644
--- a/src/pl/tcl/Makefile
+++ b/src/pl/tcl/Makefile
@@ -65,10 +65,7 @@ uninstall: uninstall-lib uninstall-data
$(MAKE) -C modules $@
install-data: installdirs
- @for file in $(addprefix $(srcdir)/, $(DATA)); do \
- echo "$(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'"; \
- $(INSTALL_DATA) $$file '$(DESTDIR)$(datadir)/extension'; \
- done
+ $(INSTALL_DATA) $(addprefix $(srcdir)/, $(DATA)) '$(DESTDIR)$(datadir)/extension/'
uninstall-data:
rm -f $(addprefix '$(DESTDIR)$(datadir)/extension'/, $(notdir $(DATA)))
diff --git a/src/test/isolation/Makefile b/src/test/isolation/Makefile
index 0f709a1e3c..6513b5f24e 100644
--- a/src/test/isolation/Makefile
+++ b/src/test/isolation/Makefile
@@ -10,7 +10,7 @@ ifeq ($(PORTNAME), win32)
LDLIBS += -lws2_32
endif
-override CPPFLAGS := -I$(srcdir) -I$(libpq_srcdir) $(CPPFLAGS)
+override CPPFLAGS := -I$(srcdir) -I$(libpq_srcdir) -I$(srcdir)/../regress $(CPPFLAGS)
override LDLIBS := $(libpq_pgport) $(LDLIBS)
OBJS = specparse.o isolationtester.o
@@ -64,7 +64,7 @@ endif
clean distclean:
rm -f isolationtester$(X) pg_isolation_regress$(X) $(OBJS) isolation_main.o
rm -f pg_regress.o
- rm -rf results
+ rm -rf $(pg_regress_clean_files)
maintainer-clean: distclean
rm -f specparse.c specscanner.c
@@ -72,5 +72,10 @@ maintainer-clean: distclean
installcheck: all
./pg_isolation_regress --inputdir=$(srcdir) --schedule=$(srcdir)/isolation_schedule
-check: all
- ./pg_isolation_regress --temp-install=./tmp_check --inputdir=$(srcdir) --top-builddir=$(top_builddir) --schedule=$(srcdir)/isolation_schedule
+# We can't support "make check" because isolationtester requires libpq, and
+# in fact (on typical platforms using shared libraries) requires libpq to
+# already be installed. You could run "make install" and then run a check
+# using a temp installation, but there seems little point in that.
+check:
+ @echo "'make check' is not supported."
+ @echo "Install PostgreSQL, then 'make installcheck' instead."
diff --git a/src/test/isolation/isolation_main.c b/src/test/isolation/isolation_main.c
index 2df12f879e..18cd8ef7fd 100644
--- a/src/test/isolation/isolation_main.c
+++ b/src/test/isolation/isolation_main.c
@@ -10,7 +10,7 @@
*-------------------------------------------------------------------------
*/
-#include "../regress/pg_regress.h"
+#include "pg_regress.h"
/*
* start an isolation tester process for specified file (including
diff --git a/src/test/isolation/isolationtester.c b/src/test/isolation/isolationtester.c
index 44a4858c96..0f77917fb5 100644
--- a/src/test/isolation/isolationtester.c
+++ b/src/test/isolation/isolationtester.c
@@ -5,14 +5,12 @@
* Runs an isolation test specified by a spec file.
*/
+#include "postgres_fe.h"
+
#ifdef WIN32
#include <windows.h>
#endif
-#include <stddef.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
#include "libpq-fe.h"
#include "isolationtester.h"
diff --git a/src/test/isolation/specparse.y b/src/test/isolation/specparse.y
index c684780216..47bfbc4f39 100644
--- a/src/test/isolation/specparse.y
+++ b/src/test/isolation/specparse.y
@@ -10,10 +10,7 @@
*-------------------------------------------------------------------------
*/
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
+#include "postgres_fe.h"
#include "isolationtester.h"
diff --git a/src/test/regress/expected/foreign_data.out b/src/test/regress/expected/foreign_data.out
index 0dc7d045c7..e18eed8c1c 100644
--- a/src/test/regress/expected/foreign_data.out
+++ b/src/test/regress/expected/foreign_data.out
@@ -670,7 +670,7 @@ Has OIDs: no
(1 row)
CREATE INDEX id_ft1_c2 ON ft1 (c2); -- ERROR
-ERROR: "ft1" is not a table
+ERROR: cannot create index on foreign table "ft1"
SELECT * FROM ft1; -- ERROR
ERROR: foreign-data wrapper "dummy" has no handler
EXPLAIN SELECT * FROM ft1; -- ERROR
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index cb66f6380b..622cf94c5a 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -1438,6 +1438,10 @@ pg_timezone_pre_initialize(void)
* This is called after initial loading of postgresql.conf. If no TimeZone
* setting was found therein, we try to derive one from the environment.
* Likewise for log_timezone.
+ *
+ * Note: this is also called from ProcessConfigFile, to re-establish valid
+ * GUC settings if the GUCs have been reset to default following their
+ * removal from postgresql.conf.
*/
void
pg_timezone_initialize(void)
@@ -1463,21 +1467,34 @@ pg_timezone_initialize(void)
log_timezone = def_tz;
}
- /* Now, set the timezone GUC if it's not already set */
- if (GetConfigOption("timezone", false) == NULL)
- {
- /* Tell GUC about the value. Will redundantly call pg_tzset() */
+ /*
+ * Now, set the timezone and log_timezone GUCs if they're still default.
+ * (This will redundantly call pg_tzset().)
+ *
+ * We choose to label these values PGC_S_ENV_VAR, rather than
+ * PGC_S_DYNAMIC_DEFAULT which would be functionally equivalent, because
+ * they came either from getenv("TZ") or from libc behavior that's
+ * determined by process environment of some kind.
+ *
+ * Note: in the case where a setting has just been removed from
+ * postgresql.conf, this code will not do what you might expect, namely
+ * call select_default_timezone() and install that value as the setting.
+ * Rather, the previously active setting --- typically the one from
+ * postgresql.conf --- will be reinstalled, relabeled as PGC_S_ENV_VAR.
+ * If we did try to install the "correct" default value, the effect would
+ * be that each postmaster child would independently run an extremely
+ * expensive search of the timezone database, bringing the database to its
+ * knees for possibly multiple seconds. This is so unpleasant, and could
+ * so easily be triggered quite unintentionally, that it seems better to
+ * violate the principle of least astonishment.
+ */
+ if (GetConfigOptionResetString("timezone") == NULL)
SetConfigOption("timezone", pg_get_timezone_name(session_timezone),
PGC_POSTMASTER, PGC_S_ENV_VAR);
- }
- /* Likewise for log timezone */
- if (GetConfigOption("log_timezone", false) == NULL)
- {
- /* Tell GUC about the value. Will redundantly call pg_tzset() */
+ if (GetConfigOptionResetString("log_timezone") == NULL)
SetConfigOption("log_timezone", pg_get_timezone_name(log_timezone),
PGC_POSTMASTER, PGC_S_ENV_VAR);
- }
}