You can subscribe to this list here.
2010 |
Jan
|
Feb
|
Mar
|
Apr
(4) |
May
(28) |
Jun
(12) |
Jul
(11) |
Aug
(12) |
Sep
(5) |
Oct
(19) |
Nov
(14) |
Dec
(12) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2011 |
Jan
(18) |
Feb
(30) |
Mar
(115) |
Apr
(89) |
May
(50) |
Jun
(44) |
Jul
(22) |
Aug
(13) |
Sep
(11) |
Oct
(30) |
Nov
(28) |
Dec
(39) |
2012 |
Jan
(38) |
Feb
(18) |
Mar
(43) |
Apr
(91) |
May
(108) |
Jun
(46) |
Jul
(37) |
Aug
(44) |
Sep
(33) |
Oct
(29) |
Nov
(36) |
Dec
(15) |
2013 |
Jan
(35) |
Feb
(611) |
Mar
(5) |
Apr
(55) |
May
(30) |
Jun
(28) |
Jul
(458) |
Aug
(34) |
Sep
(9) |
Oct
(39) |
Nov
(22) |
Dec
(32) |
2014 |
Jan
(16) |
Feb
(16) |
Mar
(42) |
Apr
(179) |
May
(7) |
Jun
(6) |
Jul
(9) |
Aug
|
Sep
(4) |
Oct
|
Nov
(3) |
Dec
|
2015 |
Jan
|
Feb
|
Mar
|
Apr
(2) |
May
(4) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
S | M | T | W | T | F | S |
---|---|---|---|---|---|---|
|
|
|
|
|
1
(2) |
2
(3) |
3
|
4
(2) |
5
(3) |
6
(2) |
7
(8) |
8
(12) |
9
|
10
|
11
(17) |
12
(16) |
13
(4) |
14
(3) |
15
(5) |
16
|
17
|
18
(1) |
19
(3) |
20
(2) |
21
(1) |
22
(1) |
23
|
24
|
25
(3) |
26
(1) |
27
|
28
|
29
|
30
|
From: Abbas B. <ga...@us...> - 2011-04-13 16:17:23
|
Project "Postgres-XC". The branch, master has been updated via 0fd714273601a16501bb3599c60ad0aa46e1f839 (commit) from cac1e04b3eeedbe4d6240dbb00c1a326e65f7f97 (commit) - Log ----------------------------------------------------------------- commit 0fd714273601a16501bb3599c60ad0aa46e1f839 Author: Abbas <abb...@en...> Date: Wed Apr 13 21:16:56 2011 +0500 The output is correct as long as we do not add support for INSERT SELECT diff --git a/src/test/regress/expected/vacuum_1.out b/src/test/regress/expected/vacuum_1.out new file mode 100644 index 0000000..776c258 --- /dev/null +++ b/src/test/regress/expected/vacuum_1.out @@ -0,0 +1,73 @@ +-- +-- VACUUM +-- +CREATE TABLE vactst (i INT); +INSERT INTO vactst VALUES (1); +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst VALUES (0); +SELECT count(*) FROM vactst; + count +------- + 2 +(1 row) + +DELETE FROM vactst WHERE i != 0; +SELECT * FROM vactst; + i +--- + 0 +(1 row) + +VACUUM FULL vactst; +UPDATE vactst SET i = i + 1; +ERROR: Partition column can't be updated in current version +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst VALUES (0); +SELECT count(*) FROM vactst; + count +------- + 2 +(1 row) + +DELETE FROM vactst WHERE i != 0; +VACUUM (FULL) vactst; +DELETE FROM vactst; +SELECT * FROM vactst; + i +--- +(0 rows) + +VACUUM (FULL, FREEZE) vactst; +VACUUM (ANALYZE, FULL) vactst; +CREATE TABLE vaccluster (i INT PRIMARY KEY); +NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "vaccluster_pkey" for table "vaccluster" +ALTER TABLE vaccluster CLUSTER ON vaccluster_pkey; +INSERT INTO vaccluster SELECT * FROM vactst; +CLUSTER vaccluster; +VACUUM FULL pg_am; +VACUUM FULL pg_class; +VACUUM FULL pg_database; +VACUUM FULL vaccluster; +VACUUM FULL vactst; +DROP TABLE vaccluster; +DROP TABLE vactst; ----------------------------------------------------------------------- Summary of changes: .../regress/expected/{vacuum.out => vacuum_1.out} | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) copy src/test/regress/expected/{vacuum.out => vacuum_1.out} (96%) hooks/post-receive -- Postgres-XC |
From: Abbas B. <ga...@us...> - 2011-04-13 15:58:17
|
Project "Postgres-XC". The branch, master has been updated via cac1e04b3eeedbe4d6240dbb00c1a326e65f7f97 (commit) from dfa58f3a944a7b1fe4c3f97281c9b1516f0b69a7 (commit) - Log ----------------------------------------------------------------- commit cac1e04b3eeedbe4d6240dbb00c1a326e65f7f97 Author: Abbas <abb...@en...> Date: Wed Apr 13 20:48:00 2011 +0500 This patch fixes the test case tsearch. Some changes were done in SQL to ensure ORDER etc. The original expected output file was wrong. An alternate expected output file was checked in which is not entirely correct. It contains ZERO row results of count(*) for which the bug ID is 3286054 diff --git a/src/test/regress/expected/tsearch.out b/src/test/regress/expected/tsearch.out index f17c4bb..e1d7646 100644 --- a/src/test/regress/expected/tsearch.out +++ b/src/test/regress/expected/tsearch.out @@ -46,7 +46,7 @@ WHERE mapcfg = 0 OR mapdict = 0; -- Look for pg_ts_config_map entries that aren't one of parser's token types SELECT * FROM ( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid - FROM pg_ts_config ) AS tt + FROM pg_ts_config ) AS tt RIGHT JOIN pg_ts_config_map AS m ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype) WHERE @@ -188,7 +188,6 @@ SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; 494 (1 row) - RESET enable_seqscan; INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH'); SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10; @@ -225,7 +224,7 @@ SELECT ts_lexize('english_stem', 'identity'); {ident} (1 row) -SELECT * FROM ts_token_type('default') ORDER BY tokid; +SELECT * FROM ts_token_type('default'); tokid | alias | description -------+-----------------+------------------------------------------ 1 | asciiword | Word, all ASCII @@ -255,143 +254,145 @@ SELECT * FROM ts_token_type('default') ORDER BY tokid; SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 te...@st... qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>"> /usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 -<i <b> wow < jqw <> qwerty') ORDER BY tokid,token; +<i <b> wow < jqw <> qwerty'); tokid | token -------+-------------------------------------- - 1 | ad - 1 | asdf - 1 | dw - 1 | hjwer - 1 | i - 1 | jf - 1 | jqw - 1 | qwe - 1 | qwe - 1 | qwe + 22 | 345 + 12 | 1 | qwe - 1 | qwer - 1 | qwerty - 1 | qwqwe - 1 | readline - 1 | readline - 1 | readline - 1 | sdjk - 1 | we - 1 | wefjn - 1 | wow - 3 | ewr1 - 3 | ewri2 - 4 | te...@st... - 5 | 1aew.werc.ewr/?ad=qwe&dw - 5 | 3aew.werc.ewr/?ad=qwe&dw - 5 | 6aew.werc.ewr:8100/?ad=qwe&dw - 5 | 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 + 12 | @ + 19 | efd.r + 12 | ' + 14 | http:// + 6 | www.com + 12 | / + 14 | http:// 5 | aew.werc.ewr/?ad=qwe&dw - 6 | 1aew.werc.ewr - 6 | 2aew.werc.ewr - 6 | 3aew.werc.ewr - 6 | 4aew.werc.ewr - 6 | 5aew.werc.ewr:8100 - 6 | 6aew.werc.ewr:8100 - 6 | 7aew.werc.ewr:8100 6 | aew.werc.ewr - 6 | www.com - 7 | +4.0e-10 - 11 | qwe - 11 | wer - 12 | - : - 12 | - : - 12 | - 12 | + 18 | /?ad=qwe&dw 12 | + 5 | 1aew.werc.ewr/?ad=qwe&dw + 6 | 1aew.werc.ewr + 18 | /?ad=qwe&dw 12 | + 6 | 2aew.werc.ewr 12 | + 14 | http:// + 5 | 3aew.werc.ewr/?ad=qwe&dw + 6 | 3aew.werc.ewr + 18 | /?ad=qwe&dw 12 | + 14 | http:// + 6 | 4aew.werc.ewr 12 | + 14 | http:// + 5 | 5aew.werc.ewr:8100/? + 6 | 5aew.werc.ewr:8100 + 18 | /? + 12 | + 1 | ad + 12 | = + 1 | qwe + 12 | & + 1 | dw 12 | + 5 | 6aew.werc.ewr:8100/?ad=qwe&dw + 6 | 6aew.werc.ewr:8100 + 18 | /?ad=qwe&dw 12 | + 5 | 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 + 6 | 7aew.werc.ewr:8100 + 18 | /?ad=qwe&dw=%20%32 12 | + 7 | +4.0e-10 12 | + 1 | qwe 12 | + 1 | qwe 12 | + 1 | qwqwe 12 | + 20 | 234.435 12 | + 22 | 455 12 | + 20 | 5.005 12 | + 4 | te...@st... 12 | + 16 | qwe-wer + 11 | qwe + 12 | - + 11 | wer 12 | + 1 | asdf 12 | + 13 | <fr> + 1 | qwer 12 | + 1 | jf 12 | + 1 | sdjk + 12 | < + 1 | we 12 | + 1 | hjwer 12 | + 13 | <werrwe> 12 | + 3 | ewr1 + 12 | > + 3 | ewri2 12 | + 13 | <a href="qwe<qwe>"> + 12 | + + | + 19 | /usr/local/fff 12 | + 19 | /awdf/dwqe/4325 12 | + 19 | rewt/ewr 12 | + 1 | wefjn 12 | + 19 | /wqe-324/ewr 12 | + 19 | gist.h 12 | + 19 | gist.h.c 12 | + 19 | gist.c + 12 | . + 1 | readline 12 | + 20 | 4.2 12 | + 20 | 4.2 + 12 | . + 20 | 4.2 + 12 | , + 1 | readline + 20 | -4.2 12 | + 1 | readline + 20 | -4.2 + 12 | . + 22 | 234 + 12 | + + | + 12 | < + 1 | i 12 | + 13 | <b> 12 | + 1 | wow 12 | - 12 | ' - 12 | & - 12 | , - 12 | - - 12 | . - 12 | . - 12 | . - 12 | / - 12 | /? - 12 | < - 12 | < 12 | < + 1 | jqw + 12 | 12 | <> - 12 | = - 12 | > - 12 | @ - 13 | <a href="qwe<qwe>"> - 13 | <b> - 13 | <fr> - 13 | <werrwe> - 14 | http:// - 14 | http:// - 14 | http:// - 14 | http:// - 14 | http:// - 16 | qwe-wer - 18 | /?ad=qwe&dw - 18 | /?ad=qwe&dw - 18 | /?ad=qwe&dw - 18 | /?ad=qwe&dw - 18 | /?ad=qwe&dw=%20%32 - 19 | /awdf/dwqe/4325 - 19 | /usr/local/fff - 19 | /wqe-324/ewr - 19 | efd.r - 19 | gist.c - 19 | gist.h - 19 | gist.h.c - 19 | rewt/ewr - 20 | -4.2 - 20 | -4.2 - 20 | 234.435 - 20 | 4.2 - 20 | 4.2 - 20 | 4.2 - 20 | 5.005 - 22 | 234 - 22 | 345 - 22 | 455 -(131 rows) + 1 | qwerty +(133 rows) SELECT to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 te...@st... qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>"> /usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 @@ -670,7 +671,7 @@ to_tsquery('english', 'sea&foo'), 'HighlightAll=true'); </html> (1 row) ---Check if headline fragments work +--Check if headline fragments work SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -1082,7 +1083,7 @@ select * from pendtest where 'ipsa:*'::tsquery @@ ts; 'ipsam':2 'lore':1 (1 row) -select * from pendtest where 'ips:*'::tsquery @@ ts ORDER BY 1; +select * from pendtest where 'ips:*'::tsquery @@ ts; ts -------------------- 'ipsam':2 'lore':1 diff --git a/src/test/regress/expected/tsearch.out b/src/test/regress/expected/tsearch_1.out similarity index 98% copy from src/test/regress/expected/tsearch.out copy to src/test/regress/expected/tsearch_1.out index f17c4bb..e8c35d4 100644 --- a/src/test/regress/expected/tsearch.out +++ b/src/test/regress/expected/tsearch_1.out @@ -46,7 +46,7 @@ WHERE mapcfg = 0 OR mapdict = 0; -- Look for pg_ts_config_map entries that aren't one of parser's token types SELECT * FROM ( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid - FROM pg_ts_config ) AS tt + FROM pg_ts_config ) AS tt RIGHT JOIN pg_ts_config_map AS m ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype) WHERE @@ -188,7 +188,6 @@ SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; 494 (1 row) - RESET enable_seqscan; INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH'); SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10; @@ -284,6 +283,7 @@ SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.w 4 | te...@st... 5 | 1aew.werc.ewr/?ad=qwe&dw 5 | 3aew.werc.ewr/?ad=qwe&dw + 5 | 5aew.werc.ewr:8100/? 5 | 6aew.werc.ewr:8100/?ad=qwe&dw 5 | 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 5 | aew.werc.ewr/?ad=qwe&dw @@ -299,10 +299,12 @@ SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.w 7 | +4.0e-10 11 | qwe 11 | wer - 12 | - : - 12 | - : + 12 | < + 12 | < + 12 | <> + 12 | < + 12 | = + 12 | > 12 | 12 | 12 | @@ -342,22 +344,20 @@ SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.w 12 | 12 | 12 | + 12 | 12 | ' - 12 | & - 12 | , 12 | - + 12 | , + 12 | / 12 | . 12 | . 12 | . - 12 | / - 12 | /? - 12 | < - 12 | < - 12 | < - 12 | <> - 12 | = - 12 | > 12 | @ + 12 | & + 12 | + + | + 12 | + + | 13 | <a href="qwe<qwe>"> 13 | <b> 13 | <fr> @@ -368,22 +368,23 @@ SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.w 14 | http:// 14 | http:// 16 | qwe-wer + 18 | /? 18 | /?ad=qwe&dw 18 | /?ad=qwe&dw 18 | /?ad=qwe&dw 18 | /?ad=qwe&dw 18 | /?ad=qwe&dw=%20%32 19 | /awdf/dwqe/4325 - 19 | /usr/local/fff - 19 | /wqe-324/ewr 19 | efd.r 19 | gist.c 19 | gist.h 19 | gist.h.c 19 | rewt/ewr + 19 | /usr/local/fff + 19 | /wqe-324/ewr + 20 | 234.435 20 | -4.2 20 | -4.2 - 20 | 234.435 20 | 4.2 20 | 4.2 20 | 4.2 @@ -391,7 +392,7 @@ SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.w 22 | 234 22 | 345 22 | 455 -(131 rows) +(133 rows) SELECT to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 te...@st... qwe-wer asdf <fr>qwer jf sdjk<we hjwer <werrwe> ewr1> ewri2 <a href="qwe<qwe>"> /usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 @@ -410,18 +411,18 @@ SELECT length(to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://ae (1 row) -- ts_debug -SELECT * from ts_debug('english', '<myns:foo-bar_baz.blurfl>abc&nm1;def©ghiõjkl</myns:foo-bar_baz.blurfl>'); +SELECT * from ts_debug('english', '<myns:foo-bar_baz.blurfl>abc&nm1;def©ghiõjkl</myns:foo-bar_baz.blurfl>') ORDER BY alias, description, token; alias | description | token | dictionaries | dictionary | lexemes -----------+-----------------+----------------------------+----------------+--------------+--------- - tag | XML tag | <myns:foo-bar_baz.blurfl> | {} | | asciiword | Word, all ASCII | abc | {english_stem} | english_stem | {abc} - entity | XML entity | &nm1; | {} | | asciiword | Word, all ASCII | def | {english_stem} | english_stem | {def} - entity | XML entity | © | {} | | asciiword | Word, all ASCII | ghi | {english_stem} | english_stem | {ghi} - entity | XML entity | õ | {} | | asciiword | Word, all ASCII | jkl | {english_stem} | english_stem | {jkl} + entity | XML entity | õ | {} | | + entity | XML entity | &nm1; | {} | | + entity | XML entity | © | {} | | tag | XML tag | </myns:foo-bar_baz.blurfl> | {} | | + tag | XML tag | <myns:foo-bar_baz.blurfl> | {} | | (9 rows) -- check parsing of URLs @@ -670,7 +671,7 @@ to_tsquery('english', 'sea&foo'), 'HighlightAll=true'); </html> (1 row) ---Check if headline fragments work +--Check if headline fragments work SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -800,6 +801,7 @@ SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new & york'; (1 row) CREATE UNIQUE INDEX bt_tsq ON test_tsquery (keyword); +ERROR: Cannot locally enforce a unique index on round robin distributed table. SET enable_seqscan=OFF; SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new & york'; count @@ -1038,35 +1040,33 @@ SELECT to_tsquery('SKIES & My | booKs'); CREATE TRIGGER tsvectorupdate BEFORE UPDATE OR INSERT ON test_tsvector FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(a, 'pg_catalog.english', t); +ERROR: Postgres-XC does not support TRIGGER yet +DETAIL: The feature is not currently supported SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); count ------- - 0 -(1 row) +(0 rows) INSERT INTO test_tsvector (t) VALUES ('345 qwerty'); SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); count ------- - 1 -(1 row) +(0 rows) UPDATE test_tsvector SET t = null WHERE t = '345 qwerty'; SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); count ------- - 0 -(1 row) +(0 rows) INSERT INTO test_tsvector (t) VALUES ('345 qwerty'); SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); count ------- - 1 -(1 row) +(0 rows) -- test finding items in GIN's pending list -create temp table pendtest (ts tsvector); +create table pendtest (ts tsvector); create index pendtest_idx on pendtest using gin(ts); insert into pendtest values (to_tsvector('Lore ipsam')); insert into pendtest values (to_tsvector('Lore ipsum')); @@ -1099,3 +1099,4 @@ select * from pendtest where 'ipi:*'::tsquery @@ ts; ---- (0 rows) +drop table pendtest; diff --git a/src/test/regress/sql/tsearch.sql b/src/test/regress/sql/tsearch.sql index a9e814b..270bfd2 100644 --- a/src/test/regress/sql/tsearch.sql +++ b/src/test/regress/sql/tsearch.sql @@ -33,7 +33,7 @@ WHERE mapcfg = 0 OR mapdict = 0; -- Look for pg_ts_config_map entries that aren't one of parser's token types SELECT * FROM ( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid - FROM pg_ts_config ) AS tt + FROM pg_ts_config ) AS tt RIGHT JOIN pg_ts_config_map AS m ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype) WHERE @@ -76,7 +76,7 @@ SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; - + RESET enable_seqscan; INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH'); SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10; @@ -214,7 +214,7 @@ ff-bg </html>', to_tsquery('english', 'sea&foo'), 'HighlightAll=true'); ---Check if headline fragments work +--Check if headline fragments work SELECT ts_headline('english', ' Day after day, day after day, We stuck, nor breath nor motion, @@ -369,7 +369,7 @@ INSERT INTO test_tsvector (t) VALUES ('345 qwerty'); SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); -- test finding items in GIN's pending list -create temp table pendtest (ts tsvector); +create table pendtest (ts tsvector); create index pendtest_idx on pendtest using gin(ts); insert into pendtest values (to_tsvector('Lore ipsam')); insert into pendtest values (to_tsvector('Lore ipsum')); @@ -378,3 +378,5 @@ select * from pendtest where 'ipsa:*'::tsquery @@ ts; select * from pendtest where 'ips:*'::tsquery @@ ts ORDER BY 1; select * from pendtest where 'ipt:*'::tsquery @@ ts; select * from pendtest where 'ipi:*'::tsquery @@ ts; +drop table pendtest; + ----------------------------------------------------------------------- Summary of changes: src/test/regress/expected/tsearch.out | 197 ++++++++++---------- .../expected/{tsearch.out => tsearch_1.out} | 71 ++++---- src/test/regress/sql/tsearch.sql | 10 +- 3 files changed, 141 insertions(+), 137 deletions(-) copy src/test/regress/expected/{tsearch.out => tsearch_1.out} (98%) hooks/post-receive -- Postgres-XC |
From: Abbas B. <ga...@us...> - 2011-04-13 12:18:40
|
Project "Postgres-XC". The branch, master has been updated via dfa58f3a944a7b1fe4c3f97281c9b1516f0b69a7 (commit) from 04202cbdd151776cb44eefac7eeecdf05467cca7 (commit) - Log ----------------------------------------------------------------- commit dfa58f3a944a7b1fe4c3f97281c9b1516f0b69a7 Author: Abbas <abb...@en...> Date: Wed Apr 13 17:18:10 2011 +0500 SQL Changes and an alternate expected output file for making largeobject test case pass diff --git a/src/test/regress/input/largeobject.source b/src/test/regress/input/largeobject.source index 807cfd7..e4e38e2 100644 --- a/src/test/regress/input/largeobject.source +++ b/src/test/regress/input/largeobject.source @@ -6,11 +6,11 @@ SET bytea_output TO escape; -- Load a file -CREATE TABLE lotest_stash_values (loid oid, fd integer); +CREATE TABLE lotest_stash_values (loid oid, junk integer, fd integer); -- lo_creat(mode integer) returns oid -- The mode arg to lo_creat is unused, some vestigal holdover from ancient times -- returns the large object id -INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); +INSERT INTO lotest_stash_values (loid) VALUES( lo_creat(42) ); -- NOTE: large objects require transactions BEGIN; @@ -116,7 +116,7 @@ SELECT lo_unlink(loid) from lotest_stash_values; TRUNCATE lotest_stash_values; -INSERT INTO lotest_stash_values (loid) SELECT lo_import('@abs_srcdir@/data/tenk.data'); +INSERT INTO lotest_stash_values (loid) VALUES( lo_import('@abs_srcdir@/data/tenk.data') ); BEGIN; UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); @@ -144,12 +144,12 @@ END; SELECT lo_export(loid, '@abs_builddir@/results/lotest.txt') FROM lotest_stash_values; -\lo_import 'results/lotest.txt' +\lo_import '@abs_builddir@/results/lotest.txt' \set newloid :LASTOID -- just make sure \lo_export does not barf -\lo_export :newloid 'results/lotest2.txt' +\lo_export :newloid '@abs_builddir@/results/lotest2.txt' -- This is a hack to test that export/import are reversible -- This uses knowledge about the inner workings of large object mechanism diff --git a/src/test/regress/input/largeobject.source b/src/test/regress/output/largeobject_2.source similarity index 68% copy from src/test/regress/input/largeobject.source copy to src/test/regress/output/largeobject_2.source index 807cfd7..553521e 100644 --- a/src/test/regress/input/largeobject.source +++ b/src/test/regress/output/largeobject_2.source @@ -1,20 +1,16 @@ -- -- Test large object support -- - -- ensure consistent test output regardless of the default bytea format SET bytea_output TO escape; - -- Load a file -CREATE TABLE lotest_stash_values (loid oid, fd integer); +CREATE TABLE lotest_stash_values (loid oid, junk integer, fd integer); -- lo_creat(mode integer) returns oid -- The mode arg to lo_creat is unused, some vestigal holdover from ancient times -- returns the large object id -INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); - +INSERT INTO lotest_stash_values (loid) VALUES( lo_creat(42) ); -- NOTE: large objects require transactions BEGIN; - -- lo_open(lobjId oid, mode integer) returns integer -- The mode parameter to lo_open uses two constants: -- INV_READ = 0x20000 @@ -22,7 +18,6 @@ BEGIN; -- The return value is a file descriptor-like value which remains valid for the -- transaction. UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); - -- loread/lowrite names are wonky, different from other functions which are lo_* -- lowrite(fd integer, data bytea) returns integer -- the integer is the number of bytes written @@ -49,17 +44,23 @@ And miles to go before I sleep. -- Robert Frost ') FROM lotest_stash_values; + lowrite +--------- + 578 +(1 row) -- lo_close(fd integer) returns integer -- return value is 0 for success, or <0 for error (actually only -1, but...) SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) END; - -- Read out a portion BEGIN; UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); - -- lo_lseek(fd integer, offset integer, whence integer) returns integer -- offset is in bytes, whence is one of three values: -- SEEK_SET (= 0) meaning relative to beginning @@ -67,99 +68,219 @@ UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS inte -- SEEK_END (= 2) meaning relative to end (offset better be negative) -- returns current position in file SELECT lo_lseek(fd, 422, 0) FROM lotest_stash_values; + lo_lseek +---------- + 422 +(1 row) -- loread/lowrite names are wonky, different from other functions which are lo_* -- loread(fd integer, len integer) returns bytea SELECT loread(fd, 35) FROM lotest_stash_values; + loread +------------------------------------- + The woods are lovely, dark and deep +(1 row) SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; + lo_lseek +---------- + 438 +(1 row) SELECT lowrite(fd, 'n') FROM lotest_stash_values; + lowrite +--------- + 1 +(1 row) SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 439 +(1 row) SELECT lo_lseek(fd, -156, 2) FROM lotest_stash_values; + lo_lseek +---------- + 422 +(1 row) SELECT loread(fd, 35) FROM lotest_stash_values; + loread +------------------------------------- + The woods are lonely, dark and deep +(1 row) SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) END; - -- Test resource management BEGIN; SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; -ABORT; + lo_open +--------- + 0 +(1 row) +ABORT; -- Test truncation. BEGIN; UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); - SELECT lo_truncate(fd, 10) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + SELECT loread(fd, 15) FROM lotest_stash_values; + loread +--------------- + \012Whose woo +(1 row) SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + SELECT loread(fd, 10) FROM lotest_stash_values; + loread +------------------------------------------ + \000\000\000\000\000\000\000\000\000\000 +(1 row) + SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 10000 +(1 row) + SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 10000 +(1 row) SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 5000 +(1 row) + SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 5000 +(1 row) SELECT lo_close(fd) FROM lotest_stash_values; -END; + lo_close +---------- + 0 +(1 row) +END; -- lo_unlink(lobjId oid) returns integer -- return value appears to always be 1 SELECT lo_unlink(loid) from lotest_stash_values; + lo_unlink +----------- + 1 +(1 row) TRUNCATE lotest_stash_values; - -INSERT INTO lotest_stash_values (loid) SELECT lo_import('@abs_srcdir@/data/tenk.data'); - +INSERT INTO lotest_stash_values (loid) VALUES( lo_import('/home/abbas/pgxc/postgres-xc/src/test/regress/data/tenk.data') ); BEGIN; UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); - -- with the default BLKSZ, LOBLKSZ = 2048, so this positions us for a block -- edge case SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) -- this should get half of the value from page 0 and half from page 1 of the -- large object SELECT loread(fd, 36) FROM lotest_stash_values; + loread +----------------------------------------------------------------- + AAA\011FBAAAA\011VVVVxx\0122513\01132\0111\0111\0113\01113\0111 +(1 row) SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 2066 +(1 row) SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; + lo_lseek +---------- + 2040 +(1 row) SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; + lowrite +--------- + 16 +(1 row) SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) SELECT loread(fd, 36) FROM lotest_stash_values; + loread +----------------------------------------------------- + AAA\011FBAAAAabcdefghijklmnop1\0111\0113\01113\0111 +(1 row) SELECT lo_close(fd) FROM lotest_stash_values; -END; + lo_close +---------- + 0 +(1 row) +END; SELECT lo_export(loid, '@abs_builddir@/results/lotest.txt') FROM lotest_stash_values; + lo_export +----------- + 1 +(1 row) -\lo_import 'results/lotest.txt' - +\lo_import '@abs_builddir@/results/lotest.txt' \set newloid :LASTOID - -- just make sure \lo_export does not barf -\lo_export :newloid 'results/lotest2.txt' - +\lo_export :newloid '@abs_builddir@/results/lotest2.txt' -- This is a hack to test that export/import are reversible -- This uses knowledge about the inner workings of large object mechanism -- which should not be used outside it. This makes it a HACK SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) EXCEPT SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; - + pageno | data +--------+------ +(0 rows) SELECT lo_unlink(loid) FROM lotest_stash_values; -\lo_unlink :newloid + lo_unlink +----------- + 1 +(1 row) +\lo_unlink :newloid TRUNCATE lotest_stash_values; ----------------------------------------------------------------------- Summary of changes: src/test/regress/input/largeobject.source | 10 +++++----- .../{largeobject.source => largeobject_2.source} | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) copy src/test/regress/output/{largeobject.source => largeobject_2.source} (94%) hooks/post-receive -- Postgres-XC |
From: Michael P. <mic...@us...> - 2011-04-13 03:25:41
|
Project "Postgres-XC". The branch, master has been updated via 04202cbdd151776cb44eefac7eeecdf05467cca7 (commit) from 2764f7b5379e3eb66d038a74aa0031e29a0de72c (commit) - Log ----------------------------------------------------------------- commit 04202cbdd151776cb44eefac7eeecdf05467cca7 Author: Michael P <mic...@us...> Date: Wed Apr 13 12:17:38 2011 +0900 Extension of CLEAN CONNECTION for specific users New synopsis of this query becomes: CLEAN CONNECTION TO (COORDINATOR num | DATANODE num | ALL {FORCE}) [ FOR DATABASE dbname ] [ TO USER username ]; This increases cleaning granularity of XC cluster. This query cannot be launched if no database or no user are specified. If only a user name is specified, cleaning is made for all the databases of this user. If only a database name is specified, cleaning is made for all the users of this database. It is also possible to specify both database and user name. This commit also contains modifications for cleaning connections when dropping a database. Now before dropping a database, CLEAN CONNECTION is launched first on all the Coordinators. Then drop query is sent to backend nodes. diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index 0c7a3b5..1b43d6e 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -343,7 +343,7 @@ static TypeName *TableFuncTypeName(List *columns); %type <defelt> opt_binary opt_oids copy_delimiter /* PGXC_BEGIN */ %type <list> data_node_list coord_list -%type <str> DirectStmt +%type <str> DirectStmt CleanConnDbName CleanConnUserName /* PGXC_END */ %type <boolean> copy_from @@ -7144,38 +7144,52 @@ data_node_list: * QUERY: * * CLEAN CONNECTION TO (COORDINATOR num | NODE num | ALL {FORCE}) - * FOR DATABASE dbname + * [ FOR DATABASE dbname ] + * [ TO USER username ] * *****************************************************************************/ -CleanConnStmt: CLEAN CONNECTION TO COORDINATOR coord_list FOR DATABASE database_name +CleanConnStmt: CLEAN CONNECTION TO COORDINATOR coord_list CleanConnDbName CleanConnUserName { CleanConnStmt *n = makeNode(CleanConnStmt); n->is_coord = true; n->nodes = $5; n->is_force = false; - n->dbname = $8; + n->dbname = $6; + n->username = $7; $$ = (Node *)n; } - | CLEAN CONNECTION TO NODE data_node_list FOR DATABASE database_name + | CLEAN CONNECTION TO NODE data_node_list CleanConnDbName CleanConnUserName { CleanConnStmt *n = makeNode(CleanConnStmt); n->is_coord = false; n->nodes = $5; n->is_force = false; - n->dbname = $8; + n->dbname = $6; + n->username = $7; $$ = (Node *)n; } - | CLEAN CONNECTION TO ALL opt_force FOR DATABASE database_name + | CLEAN CONNECTION TO ALL opt_force CleanConnDbName CleanConnUserName { CleanConnStmt *n = makeNode(CleanConnStmt); n->is_coord = true; n->nodes = NIL; n->is_force = $5; - n->dbname = $8; + n->dbname = $6; + n->username = $7; $$ = (Node *)n; } ; + +CleanConnDbName: FOR DATABASE database_name { $$ = $3; } + | FOR database_name { $$ = $2; } + | /* EMPTY */ { $$ = NIL; } + ; + +CleanConnUserName: TO USER RoleId { $$ = $3; } + | TO RoleId { $$ = $2; } + | /* EMPTY */ { $$ = NIL; } + ; /* PGXC_END */ /***************************************************************************** diff --git a/src/backend/pgxc/pool/poolmgr.c b/src/backend/pgxc/pool/poolmgr.c index ecd18f9..717e3e3 100644 --- a/src/backend/pgxc/pool/poolmgr.c +++ b/src/backend/pgxc/pool/poolmgr.c @@ -98,7 +98,10 @@ static DatabasePool *create_database_pool(const char *database, const char *user static void insert_database_pool(DatabasePool *pool); static int destroy_database_pool(const char *database, const char *user_name); static DatabasePool *find_database_pool(const char *database, const char *user_name); -static DatabasePool *find_database_pool_to_clean(const char *database, List *dn_list, List *co_list); +static DatabasePool *find_database_pool_to_clean(const char *database, + const char *user_name, + List *dn_list, + List *co_list); static DatabasePool *remove_database_pool(const char *database, const char *user_name); static int *agent_acquire_connections(PoolAgent *agent, List *datanodelist, List *coordlist); static PGXCNodePoolSlot *acquire_connection(DatabasePool *dbPool, int node, char client_conn_type); @@ -110,8 +113,14 @@ static void destroy_slot(PGXCNodePoolSlot *slot); static void grow_pool(DatabasePool *dbPool, int index, char client_conn_type); static void destroy_node_pool(PGXCNodePool *node_pool); static void PoolerLoop(void); -static int clean_connection(List *dn_discard, List *co_discard, const char *database); -static int *abort_pids(int *count, int pid, const char *database); +static int clean_connection(List *dn_discard, + List *co_discard, + const char *database, + const char *user_name); +static int *abort_pids(int *count, + int pid, + const char *database, + const char *user_name); /* Signal handlers */ static void pooler_die(SIGNAL_ARGS); @@ -742,13 +751,39 @@ PoolManagerGetConnections(List *datanodelist, List *coordlist) * Take a lock forbidding access to Pooler for new transactions. */ int -PoolManagerAbortTransactions(char *dbname, int **proc_pids) +PoolManagerAbortTransactions(char *dbname, char *username, int **proc_pids) { - int num_proc_ids = 0; + int num_proc_ids = 0; + int n32, msglen; + char msgtype = 'a'; + int dblen = dbname ? strlen(dbname) + 1 : 0; + int userlen = username ? strlen(username) + 1 : 0; Assert(Handle); - pool_putmessage(&Handle->port, 'a', dbname, strlen(dbname) + 1); + /* Message type */ + pool_putbytes(&Handle->port, &msgtype, 1); + + /* Message length */ + msglen = dblen + userlen + 12; + n32 = htonl(msglen); + pool_putbytes(&Handle->port, (char *) &n32, 4); + + /* Length of Database string */ + n32 = htonl(dblen); + pool_putbytes(&Handle->port, (char *) &n32, 4); + + /* Send database name, followed by \0 terminator if necessary */ + if (dbname) + pool_putbytes(&Handle->port, dbname, dblen); + + /* Length of Username string */ + n32 = htonl(userlen); + pool_putbytes(&Handle->port, (char *) &n32, 4); + + /* Send user name, followed by \0 terminator if necessary */ + if (username) + pool_putbytes(&Handle->port, username, userlen); pool_flush(&Handle->port); @@ -763,13 +798,15 @@ PoolManagerAbortTransactions(char *dbname, int **proc_pids) * Clean up Pooled connections */ void -PoolManagerCleanConnection(List *datanodelist, List *coordlist, char *dbname) +PoolManagerCleanConnection(List *datanodelist, List *coordlist, char *dbname, char *username) { int totlen = list_length(datanodelist) + list_length(coordlist); int nodes[totlen + 2]; ListCell *nodelist_item; - int i, n32; + int i, n32, msglen; char msgtype = 'f'; + int userlen = username ? strlen(username) + 1 : 0; + int dblen = dbname ? strlen(dbname) + 1 : 0; nodes[0] = htonl(list_length(datanodelist)); i = 1; @@ -794,18 +831,29 @@ PoolManagerCleanConnection(List *datanodelist, List *coordlist, char *dbname) pool_putbytes(&Handle->port, &msgtype, 1); /* Message length */ - n32 = htonl(sizeof(int) * (totlen + 2) + strlen(dbname) + 9); + msglen = sizeof(int) * (totlen + 2) + dblen + userlen + 12; + n32 = htonl(msglen); pool_putbytes(&Handle->port, (char *) &n32, 4); /* Send list of nodes */ pool_putbytes(&Handle->port, (char *) nodes, sizeof(int) * (totlen + 2)); /* Length of Database string */ - n32 = htonl(strlen(dbname) + 1); + n32 = htonl(dblen); pool_putbytes(&Handle->port, (char *) &n32, 4); - /* Send database name, followed by \0 terminator */ - pool_putbytes(&Handle->port, dbname, strlen(dbname) + 1); + /* Send database name, followed by \0 terminator if necessary */ + if (dbname) + pool_putbytes(&Handle->port, dbname, dblen); + + /* Length of Username string */ + n32 = htonl(userlen); + pool_putbytes(&Handle->port, (char *) &n32, 4); + + /* Send user name, followed by \0 terminator if necessary */ + if (username) + pool_putbytes(&Handle->port, username, userlen); + pool_flush(&Handle->port); /* Receive result message */ @@ -830,8 +878,8 @@ agent_handle_input(PoolAgent * agent, StringInfo s) */ for (;;) { - const char *database; - const char *user_name; + const char *database = NULL; + const char *user_name = NULL; const char *set_command; bool is_local; int datanodecount; @@ -858,9 +906,17 @@ agent_handle_input(PoolAgent * agent, StringInfo s) { case 'a': /* ABORT */ pool_getmessage(&agent->port, s, 0); - database = pq_getmsgstring(s); + len = pq_getmsgint(s, 4); + if (len > 0) + database = pq_getmsgbytes(s, len); + + len = pq_getmsgint(s, 4); + if (len > 0) + user_name = pq_getmsgbytes(s, len); + pq_getmsgend(s); - pids = abort_pids(&len, agent->pid, database); + + pids = abort_pids(&len, agent->pid, database, user_name); pool_sendpids(&agent->port, pids, len); if (pids) @@ -896,11 +952,16 @@ agent_handle_input(PoolAgent * agent, StringInfo s) for (i = 0; i < coordcount; i++) coordlist = lappend_int(coordlist, pq_getmsgint(s, 4)); len = pq_getmsgint(s, 4); - database = pq_getmsgbytes(s, len); + if (len > 0) + database = pq_getmsgbytes(s, len); + len = pq_getmsgint(s, 4); + if (len > 0) + user_name = pq_getmsgbytes(s, len); + pq_getmsgend(s); /* Clean up connections here */ - res = clean_connection(datanodelist, coordlist, database); + res = clean_connection(datanodelist, coordlist, database, user_name); list_free(datanodelist); list_free(coordlist); @@ -1551,7 +1612,10 @@ find_database_pool(const char *database, const char *user_name) * Find pool to be cleaned for specified database in the list */ static DatabasePool * -find_database_pool_to_clean(const char *database, List *dn_list, List *co_list) +find_database_pool_to_clean(const char *database, + const char *user_name, + List *dn_list, + List *co_list) { DatabasePool *databasePool; @@ -1559,33 +1623,44 @@ find_database_pool_to_clean(const char *database, List *dn_list, List *co_list) databasePool = databasePools; while (databasePool) { - /* Check for given database name */ - if (strcmp(database, databasePool->database) == 0) + ListCell *nodelist_item; + + /* If database name does not correspond, move to next one */ + if (database && strcmp(database, databasePool->database) != 0) { - ListCell *nodelist_item; + databasePool = databasePool->next; + continue; + } - /* Check if this database pool is clean for given coordinator list */ - foreach (nodelist_item, co_list) - { - int nodenum = lfirst_int(nodelist_item); + /* If user name does not correspond, move to next one */ + if (user_name && strcmp(user_name, databasePool->user_name) != 0) + { + databasePool = databasePool->next; + continue; + } - if (databasePool->coordNodePools && - databasePool->coordNodePools[nodenum - 1] && - databasePool->coordNodePools[nodenum - 1]->freeSize != 0) - return databasePool; - } + /* Check if this database pool is clean for given coordinator list */ + foreach (nodelist_item, co_list) + { + int nodenum = lfirst_int(nodelist_item); - /* Check if this database pool is clean for given datanode list */ - foreach (nodelist_item, dn_list) - { - int nodenum = lfirst_int(nodelist_item); + if (databasePool->coordNodePools && + databasePool->coordNodePools[nodenum - 1] && + databasePool->coordNodePools[nodenum - 1]->freeSize != 0) + return databasePool; + } - if (databasePool->dataNodePools && - databasePool->dataNodePools[nodenum - 1] && - databasePool->dataNodePools[nodenum - 1]->freeSize != 0) - return databasePool; - } + /* Check if this database pool is clean for given datanode list */ + foreach (nodelist_item, dn_list) + { + int nodenum = lfirst_int(nodelist_item); + + if (databasePool->dataNodePools && + databasePool->dataNodePools[nodenum - 1] && + databasePool->dataNodePools[nodenum - 1]->freeSize != 0) + return databasePool; } + databasePool = databasePool->next; } return databasePool; @@ -2016,7 +2091,7 @@ PoolerLoop(void) #define TIMEOUT_CLEAN_LOOP 10 int -clean_connection(List *dn_discard, List *co_discard, const char *database) +clean_connection(List *dn_discard, List *co_discard, const char *database, const char *user_name) { DatabasePool *databasePool; int dn_len = list_length(dn_discard); @@ -2038,11 +2113,11 @@ clean_connection(List *dn_discard, List *co_discard, const char *database) co_list[count++] = lfirst_int(nodelist_item); /* Find correct Database pool to clean */ - databasePool = find_database_pool_to_clean(database, dn_discard, co_discard); + databasePool = find_database_pool_to_clean(database, user_name, dn_discard, co_discard); while (databasePool) { - databasePool = find_database_pool_to_clean(database, dn_discard, co_discard); + databasePool = find_database_pool_to_clean(database, user_name, dn_discard, co_discard); /* Database pool has not been found, cleaning is over */ if (!databasePool) @@ -2125,7 +2200,7 @@ clean_connection(List *dn_discard, List *co_discard, const char *database) * Send back to client list of PIDs signaled to watch them. */ int * -abort_pids(int *len, int pid, const char *database) +abort_pids(int *len, int pid, const char *database, const char *user_name) { int *pids = NULL; int i = 0; @@ -2141,15 +2216,20 @@ abort_pids(int *len, int pid, const char *database) /* Send a SIGTERM signal to all processes of Pooler agents except this one */ for (count = 0; count < agentCount; count++) { - if (poolAgents[count]->pid != pid && - strcmp(poolAgents[count]->pool->database, database) == 0) - { - if (kill(poolAgents[count]->pid, SIGTERM) < 0) - elog(ERROR, "kill(%ld,%d) failed: %m", - (long) poolAgents[count]->pid, SIGTERM); + if (poolAgents[count]->pid == pid) + continue; - pids[i++] = poolAgents[count]->pid; - } + if (database && strcmp(poolAgents[count]->pool->database, database) != 0) + continue; + + if (user_name && strcmp(poolAgents[count]->pool->user_name, user_name) != 0) + continue; + + if (kill(poolAgents[count]->pid, SIGTERM) < 0) + elog(ERROR, "kill(%ld,%d) failed: %m", + (long) poolAgents[count]->pid, SIGTERM); + + pids[i++] = poolAgents[count]->pid; } *len = i; diff --git a/src/backend/pgxc/pool/poolutils.c b/src/backend/pgxc/pool/poolutils.c index 24a5c72..cf16c35 100644 --- a/src/backend/pgxc/pool/poolutils.c +++ b/src/backend/pgxc/pool/poolutils.c @@ -23,6 +23,7 @@ #include "pgxc/poolutils.h" #include "access/gtm.h" #include "commands/dbcommands.h" +#include "utils/lsyscache.h" #include "utils/acl.h" #include "nodes/parsenodes.h" @@ -36,30 +37,40 @@ * Use of CLEAN CONNECTION is limited to a super user. * It is advised to clean connections before shutting down a Node or drop a Database. * - * Pool cleaning is done for all the users of a given database. - * * SQL query synopsis is as follows: * CLEAN CONNECTION TO * (COORDINATOR num | DATANODE num | ALL {FORCE}) - * FOR DATABASE dbname + * [ FOR DATABASE dbname ] + * [ TO USER username ] * - * Connection cleaning has to be made on a chosen database called dbname. + * Connection cleaning can be made on a chosen database called dbname + * or/and a chosen user. + * Cleaning is done for all the users of a given database + * if no user name is specified. + * Cleaning is done for all the databases for one user + * if no database name is specified. * * It is also possible to clean connections of several Coordinators or Datanodes * Ex: CLEAN CONNECTION TO DATANODE 1,5,7 FOR DATABASE template1 * CLEAN CONNECTION TO COORDINATOR 2,4,6 FOR DATABASE template1 + * CLEAN CONNECTION TO DATANODE 3,5 TO USER postgres + * CLEAN CONNECTION TO COORDINATOR 6,1 FOR DATABASE template1 TO USER postgres * * Or even to all Coordinators/Datanodes at the same time * Ex: CLEAN CONNECTION TO DATANODE * FOR DATABASE template1 * CLEAN CONNECTION TO COORDINATOR * FOR DATABASE template1 + * CLEAN CONNECTION TO COORDINATOR * TO USER postgres + * CLEAN CONNECTION TO COORDINATOR * FOR DATABASE template1 TO USER postgres * * When FORCE is used, all the transactions using pooler connections are aborted, * and pooler connections are cleaned up. * Ex: CLEAN CONNECTION TO ALL FORCE FOR DATABASE template1; + * CLEAN CONNECTION TO ALL FORCE TO USER postgres; + * CLEAN CONNECTION TO ALL FORCE FOR DATABASE template1 TO USER postgres; * * FORCE can only be used with TO ALL, as it takes a lock on pooler to stop requests * asking for connections, aborts all the connections in the cluster, and cleans up - * pool connections. + * pool connections associated to the given user and/or database. */ void CleanConnection(CleanConnStmt *stmt) @@ -69,10 +80,10 @@ CleanConnection(CleanConnStmt *stmt) List *dn_list = NIL; List *stmt_nodes = NIL; char *dbname = stmt->dbname; + char *username = stmt->username; bool is_coord = stmt->is_coord; bool is_force = stmt->is_force; int max_node_number = 0; - Oid oid; /* Only a DB administrator can clean pooler connections */ if (!superuser()) @@ -80,9 +91,15 @@ CleanConnection(CleanConnStmt *stmt) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to clean pool connections"))); + /* Database name or user name is mandatory */ + if (!dbname && !username) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("must define Database name or user name"))); + /* Check if the Database exists by getting its Oid */ - oid = get_database_oid(dbname); - if (!OidIsValid(oid)) + if (dbname && + !OidIsValid(get_database_oid(dbname))) { ereport(WARNING, (errcode(ERRCODE_UNDEFINED_DATABASE), @@ -90,6 +107,16 @@ CleanConnection(CleanConnStmt *stmt) return; } + /* Check if role exists */ + if (username && + !OidIsValid(get_roleid(username))) + { + ereport(WARNING, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("role \"%s\" does not exist", username))); + return; + } + /* * FORCE is activated, * Send a SIGTERM signal to all the processes and take a lock on Pooler @@ -102,7 +129,7 @@ CleanConnection(CleanConnStmt *stmt) int *proc_pids = NULL; int num_proc_pids, count; - num_proc_pids = PoolManagerAbortTransactions(dbname, &proc_pids); + num_proc_pids = PoolManagerAbortTransactions(dbname, username, &proc_pids); /* * Watch the processes that received a SIGTERM. @@ -180,7 +207,7 @@ CleanConnection(CleanConnStmt *stmt) */ /* Finish by contacting Pooler Manager */ - PoolManagerCleanConnection(dn_list, co_list, dbname); + PoolManagerCleanConnection(dn_list, co_list, dbname, username); /* Clean up memory */ if (co_list) @@ -200,13 +227,14 @@ DropDBCleanConnection(char *dbname) { List *co_list = GetAllCoordNodes(); List *dn_list = GetAllDataNodes(); + char query[256]; /* Check permissions for this database */ if (!pg_database_ownercheck(get_database_oid(dbname), GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE, dbname); - PoolManagerCleanConnection(dn_list, co_list, dbname); + PoolManagerCleanConnection(dn_list, co_list, dbname, NULL); /* Clean up memory */ if (co_list) diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index 98d19fa..e4f33c5 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1355,8 +1355,16 @@ standard_ProcessUtility(Node *parsetree, #ifdef PGXC /* Clean connections before dropping a database on local node */ - if (IS_PGXC_COORDINATOR) + if (IS_PGXC_COORDINATOR && !IsConnFromCoord()) + { + char query[256]; DropDBCleanConnection(stmt->dbname); + + /* Clean also remote Coordinators */ + sprintf(query, "CLEAN CONNECTION TO ALL FOR DATABASE %s;", stmt->dbname); + + ExecUtilityStmtOnNodes(query, NULL, true, EXEC_ON_COORDS); + } #endif PreventTransactionChain(isTopLevel, "DROP DATABASE"); diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index ebe0c56..c571667 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -2521,6 +2521,7 @@ typedef struct CleanConnStmt NodeTag type; List *nodes; /* list of nodes dropped */ char *dbname; /* name of database to drop connections */ + char *username; /* name of user whose connections are dropped */ bool is_coord; /* type of connections dropped */ bool is_force; /* option force */ } CleanConnStmt; diff --git a/src/include/pgxc/poolmgr.h b/src/include/pgxc/poolmgr.h index febf1d4..18569dc 100644 --- a/src/include/pgxc/poolmgr.h +++ b/src/include/pgxc/poolmgr.h @@ -141,10 +141,10 @@ extern int PoolManagerSetCommand(bool is_local, const char *set_command); extern int *PoolManagerGetConnections(List *datanodelist, List *coordlist); /* Clean pool connections */ -extern void PoolManagerCleanConnection(List *datanodelist, List *coordlist, char *dbname); +extern void PoolManagerCleanConnection(List *datanodelist, List *coordlist, char *dbname, char *username); /* Send Abort signal to transactions being run */ -extern int PoolManagerAbortTransactions(char *dbname, int **proc_pids); +extern int PoolManagerAbortTransactions(char *dbname, char *username, int **proc_pids); /* Return connections back to the pool, for both Coordinator and Datanode connections */ extern void PoolManagerReleaseConnections(int dn_ndisc, int* dn_discard, int co_ndisc, int* co_discard); ----------------------------------------------------------------------- Summary of changes: src/backend/parser/gram.y | 30 +++++-- src/backend/pgxc/pool/poolmgr.c | 184 ++++++++++++++++++++++++++----------- src/backend/pgxc/pool/poolutils.c | 50 ++++++++-- src/backend/tcop/utility.c | 10 ++- src/include/nodes/parsenodes.h | 1 + src/include/pgxc/poolmgr.h | 4 +- 6 files changed, 205 insertions(+), 74 deletions(-) hooks/post-receive -- Postgres-XC |