From 048be0e4203eb7d4c378707cd66b82bff1e5903c Mon Sep 17 00:00:00 2001 From: Zakirov Artur Date: Fri, 6 May 2016 15:16:13 +0300 Subject: [PATCH 01/13] RUM commited --- Makefile | 27 + README.md | 92 ++ data/rum.data | 52 + expected/rum.out | 173 ++ rum--1.0.sql | 66 + rum.control | 5 + rum.h | 856 ++++++++++ rum_ts_utils.c | 677 ++++++++ rumbtree.c | 514 ++++++ rumbulk.c | 291 ++++ rumdatapage.c | 1318 ++++++++++++++++ rumentrypage.c | 552 +++++++ rumfast.c | 948 +++++++++++ rumget.c | 2248 ++++++++++++++++++++++++++ ruminsert.c | 798 ++++++++++ rumscan.c | 486 ++++++ rumsort.c | 3916 ++++++++++++++++++++++++++++++++++++++++++++++ rumsort.h | 135 ++ rumutil.c | 805 ++++++++++ rumvacuum.c | 798 ++++++++++ rumvalidate.c | 290 ++++ sql/rum.sql | 81 + t/001_wal.pl | 81 + 23 files changed, 15209 insertions(+) create mode 100644 Makefile create mode 100644 README.md create mode 100644 data/rum.data create mode 100644 expected/rum.out create mode 100644 rum--1.0.sql create mode 100644 rum.control create mode 100644 rum.h create mode 100644 rum_ts_utils.c create mode 100644 rumbtree.c create mode 100644 rumbulk.c create mode 100644 rumdatapage.c create mode 100644 rumentrypage.c create mode 100644 rumfast.c create mode 100644 rumget.c create mode 100644 ruminsert.c create mode 100644 rumscan.c create mode 100644 rumsort.c create mode 100644 rumsort.h create mode 100644 rumutil.c create mode 100644 rumvacuum.c create mode 100644 rumvalidate.c create mode 100644 sql/rum.sql create mode 100644 t/001_wal.pl diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..97180053b2 --- /dev/null +++ b/Makefile @@ -0,0 +1,27 @@ +# contrib/rum/Makefile + +MODULE_big = rum +OBJS = rumsort.o rum_ts_utils.o \ + rumbtree.o rumbulk.o rumdatapage.o \ + rumentrypage.o rumfast.o rumget.o ruminsert.o \ + rumscan.o rumutil.o rumvacuum.o rumvalidate.o $(WIN32RES) + +EXTENSION = rum +DATA = rum--1.0.sql +PGFILEDESC = "RUM index access method" + +REGRESS = rum + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = contrib/rum +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif + +wal-check: temp-install + $(prove_check) diff --git a/README.md b/README.md new file mode 100644 index 0000000000..f3d7743c6e --- /dev/null +++ b/README.md @@ -0,0 +1,92 @@ +# RUM - RUM access methods + +## Introduction + +The **rum** module provides access methods to work with RUM index. It is based +on the GIN access methods code. + +## License + +This module available under the same license as +[PostgreSQL](http://www.postgresql.org/about/licence/). + +## Installation + +Before build and install **rum** you should ensure following: + +* PostgreSQL version is 9.6. + +Typical installation procedure may look like this: + + $ git clone https://github.com/postgrespro/rum + $ cd rum + $ make USE_PGXS=1 + $ sudo make USE_PGXS=1 install + $ make USE_PGXS=1 installcheck + $ psql DB -c "CREATE EXTENSION rum;" + +## New access method and operator class + +The **rum** module provides the access method **rum** and the operator class +**rum_tsvector_ops**. + +The module provides new operators. + +| Operator | Returns | Description +| ------------------- | ------- | ---------------------------------------------- +| tsvector >< tsquery | float4 | Returns distance between tsvector and tsquery. + +## Examples + +Let us assume we have the table: + +```sql +CREATE TABLE test_rum(t text, a tsvector); + +CREATE TRIGGER tsvectorupdate +BEFORE UPDATE OR INSERT ON test_rum +FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger('a', 'pg_catalog.english', 't'); + +INSERT INTO test_rum(t) VALUES ('The situation is most beautiful'); +INSERT INTO test_rum(t) VALUES ('It is a beautiful'); +INSERT INTO test_rum(t) VALUES ('It looks like a beautiful place'); +``` + +To create the **rum** index we need create an extension: + +```sql +CREATE EXTENSION rum; +``` + +Then we can create new index: + +```sql +CREATE INDEX rumidx ON test_rum USING rum (a rum_tsvector_ops); +``` + +And we can execute the following queries: + +```sql +=# SELECT t, a >< to_tsquery('english', 'beautiful | place') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'beautiful | place') order by a >< to_tsquery('english', 'beautiful | place'); + t | rank +---------------------------------+----------- + The situation is most beautiful | 0.0303964 + It is a beautiful | 0.0303964 + It looks like a beautiful place | 0.0607927 +(3 rows) + +=# SELECT t, a >< to_tsquery('english', 'place | situation') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'place | situation') order by a >< to_tsquery('english', 'place | situation'); + t | rank +---------------------------------+----------- + The situation is most beautiful | 0.0303964 + It looks like a beautiful place | 0.0303964 +(2 rows) +``` + +## Authors + +Alexander Korotkov Postgres Professional Ltd., Russia + +Oleg Bartunov Postgres Professional Ltd., Russia + +Teodor Sigaev Postgres Professional Ltd., Russia diff --git a/data/rum.data b/data/rum.data new file mode 100644 index 0000000000..676ec030ef --- /dev/null +++ b/data/rum.data @@ -0,0 +1,52 @@ +As a reward for your reformation I write to you on this precious sheet. +You see I have come to be wonderfully attached to Heidelberg, the +beautiful, the quaint, the historically poetic, learned and picturesque +old town on the Neckar. It seems like another home. So I could not show +my appreciation of you in a more complimentary way than by sending this +little series of pictures. Have you ever been here, I wonder? You did +not say, but you wrote as if you knew it by sight as well as by heart. +As I cannot know, I will venture an explanation. The panorama speaks for +itself. Put on your “specs” and look at the castle, half way up the +_berg_, “the Jettenhuhl, a wooded spur of the Konigestuhl.” Look at it +from the “Terrasse.” Thus you’ll get something of an idea of it. The +Gesprente Thurm is the one that was blown up by the French. The +thickness of the walls, twenty-one feet, and the solid masonry, held it +so well that only a fragment, as it were, gave way. It still hangs as if +ready to be replaced. “Das Grosse Fass Gebaude,” too, you will have no +difficulty in making out. If you only had it with its 49,000 gallons of +wine, but wouldn’t you divide with your neighbors! The columns in the +portico that shows in the Schlosshof are the four brought from +Charlemagne’s palace at Ingelheim by the Count Palatine Ludwig, some +time between 1508-44. The Zum Ritter has nothing to do with the castle, +but is an ancient structure (1592) in the Renaissance style, and one of +the few that escaped destruction in 1693. It is a beautiful, highly +ornamental building, and I wish you could see it, if you have not seen +it. + +All the above information, I beg you to believe, I do not intend you +to think was evolved from my inner consciousness, but gathered from +the--nearest guide-book! + +I am so much obliged to you for mapping out Switzerland to me. I have +been trying my best to get all those “passes” into my brain. Now, thanks +to your letter, I have them all in the handiest kind of a bunch. Ariel +like, “I’ll do my bidding gently,” and as surely, if I get there. But +there are dreadful reports of floods and roads caved in and bridges +swept away and snows and--enough of such exciting items as sets one +thinking--“to go or not to go?” We are this far on the way. Reached +here this afternoon. Have spent the evening sauntering in the gardens, +the Conversationhaus, the bazaar, mingling with the throng, listening to +the band, and comparing what it is with what it was. It was a gay and +curious spectacle, but on the whole had “the banquet-hall deserted” +look. The situation is most beautiful. It lies, you know, at the +entrance of the Black Forest, among picturesque, thickly-wooded hills, +in the valley of the Oos, and extends up the slope of some of the hills. +The Oos is a most turbid, turbulent stream; dashes through part of the +town with angry, headlong speed. There is an avenue along its bank of +oaks, limes and maples, bordered with flower-beds and shrubberies, and +adorned with fountains and handsome villas. We shall devote to-morrow to +seeing all there is to be seen, and go to Strassburg to-morrow evening +for two or three days. From there to Constance, and then hold _our_ +“Council” as to further movements. +def fgr +def xxx fgr diff --git a/expected/rum.out b/expected/rum.out new file mode 100644 index 0000000000..88d330d37b --- /dev/null +++ b/expected/rum.out @@ -0,0 +1,173 @@ +CREATE EXTENSION rum; +CREATE TABLE test_rum( t text, a tsvector ); +CREATE TRIGGER tsvectorupdate +BEFORE UPDATE OR INSERT ON test_rum +FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger('a', 'pg_catalog.english', 't'); +CREATE INDEX rumidx ON test_rum USING rum (a rum_tsvector_ops); +\copy test_rum(t) from 'data/rum.data'; +SET enable_seqscan=off; +explain (costs off) +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever|wrote'); + QUERY PLAN +------------------------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on test_rum + Recheck Cond: (a @@ '''ever'' | ''wrote'''::tsquery) + -> Bitmap Index Scan on rumidx + Index Cond: (a @@ '''ever'' | ''wrote'''::tsquery) +(5 rows) + +explain (costs off) +SELECT * FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever|wrote') +ORDER BY a >< to_tsquery('pg_catalog.english', 'ever|wrote'); + QUERY PLAN +------------------------------------------------------------------ + Sort + Sort Key: ((a >< '''ever'' | ''wrote'''::tsquery)) + -> Bitmap Heap Scan on test_rum + Recheck Cond: (a @@ '''ever'' | ''wrote'''::tsquery) + -> Bitmap Index Scan on rumidx + Index Cond: (a @@ '''ever'' | ''wrote'''::tsquery) +(6 rows) + +explain (costs off) +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', + 'def <-> fgr'); + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Scan using rumidx on test_rum + Index Cond: (a @@ '''def'' <-> ''fgr'''::tsquery) +(3 rows) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever|wrote'); + count +------- + 2 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'have&wish'); + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'knew&brain'); + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'among'); + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'structure&ancient'); + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', '(complimentary|sight)&(sending|heart)'); + count +------- + 2 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', + 'def <-> fgr'); + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', + 'def <2> fgr'); + count +------- + 2 +(1 row) + +SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way')), * + FROM test_rum + WHERE a @@ to_tsquery('pg_catalog.english', 'way') + ORDER BY a >< to_tsquery('pg_catalog.english', 'way'); + rum_ts_distance | t | a +-----------------+--------------------------------------------------------------------------+--------------------------------------------------------------- + 0.0607927 | my appreciation of you in a more complimentary way than by sending this | 'appreci':2 'complimentari':8 'send':12 'way':9 + 0.0607927 | itself. Put on your “specs” and look at the castle, half way up the | 'castl':10 'half':11 'look':7 'put':2 'spec':5 'way':12 + 0.0607927 | so well that only a fragment, as it were, gave way. It still hangs as if | 'fragment':6 'gave':10 'hang':14 'still':13 'way':11 'well':2 + 0.0607927 | thinking--“to go or not to go?” We are this far on the way. Reached | 'far':11 'go':3,7 'reach':15 'think':1 'way':14 +(4 rows) + +SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way & (go | half)')), * + FROM test_rum + WHERE a @@ to_tsquery('pg_catalog.english', 'way & (go | half)') + ORDER BY a >< to_tsquery('pg_catalog.english', 'way & (go | half)'); + rum_ts_distance | t | a +-----------------+---------------------------------------------------------------------+--------------------------------------------------------- + 0.103556 | thinking--“to go or not to go?” We are this far on the way. Reached | 'far':11 'go':3,7 'reach':15 'think':1 'way':14 + 0.0991032 | itself. Put on your “specs” and look at the castle, half way up the | 'castl':10 'half':11 'look':7 'put':2 'spec':5 'way':12 +(2 rows) + +INSERT INTO test_rum (t) VALUES ('foo bar foo the over foo qq bar'); +INSERT INTO test_rum (t) VALUES ('345 qwerty copyright'); +INSERT INTO test_rum (t) VALUES ('345 qwerty'); +INSERT INTO test_rum (t) VALUES ('A fat cat has just eaten a rat.'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'bar'); + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'qwerty&345'); + count +------- + 2 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', '345'); + count +------- + 2 +(1 row) + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'rat'); + count +------- + 1 +(1 row) + +SELECT a FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'bar') ORDER BY a; + a +------------------------------ + 'bar':2,8 'foo':1,3,6 'qq':7 +(1 row) + +DELETE FROM test_rum; +SELECT count(*) from test_rum; + count +------- + 0 +(1 row) + +CREATE TABLE tst (i int4, t tsvector); +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(1,100000) i; +CREATE INDEX tstidx ON tst USING rum (t rum_tsvector_ops); +DELETE FROM tst WHERE i = 1; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(10001,11000) i; +DELETE FROM tst WHERE i = 2; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(11001,12000) i; +DELETE FROM tst WHERE i = 3; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(12001,13000) i; +DELETE FROM tst WHERE i = 4; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(13001,14000) i; +DELETE FROM tst WHERE i = 5; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(14001,15000) i; diff --git a/rum--1.0.sql b/rum--1.0.sql new file mode 100644 index 0000000000..2556d17f7b --- /dev/null +++ b/rum--1.0.sql @@ -0,0 +1,66 @@ +CREATE OR REPLACE FUNCTION rumhandler(internal) +RETURNS index_am_handler +AS 'MODULE_PATHNAME' +LANGUAGE C; + +-- Access method +CREATE ACCESS METHOD rum TYPE INDEX HANDLER rumhandler; + +-- Opclasses +CREATE FUNCTION rum_ts_distance(tsvector,tsquery) +RETURNS float4 +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE OPERATOR >< ( + LEFTARG = tsvector, + RIGHTARG = tsquery, + PROCEDURE = rum_ts_distance, + COMMUTATOR = '><' +); + +CREATE FUNCTION rum_extract_tsvector(tsvector,internal,internal,internal,internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION rum_extract_tsquery(tsvector,internal,smallint,internal,internal,internal,internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION rum_tsvector_config(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION rum_tsquery_pre_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION rum_tsquery_consistent(internal, smallint, tsvector, integer, internal, internal, internal, internal) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION rum_tsquery_distance(internal,smallint,tsvector,int,internal,internal,internal,internal,internal) +RETURNS float8 +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE OPERATOR CLASS rum_tsvector_ops +FOR TYPE tsvector USING rum +AS + OPERATOR 1 @@ (tsvector, tsquery), + OPERATOR 2 >< (tsvector, tsquery) FOR ORDER BY pg_catalog.float_ops, + FUNCTION 1 gin_cmp_tslexeme(text, text), + FUNCTION 2 rum_extract_tsvector(tsvector,internal,internal,internal,internal), + FUNCTION 3 rum_extract_tsquery(tsvector,internal,smallint,internal,internal,internal,internal), + FUNCTION 4 rum_tsquery_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), + FUNCTION 5 gin_cmp_prefix(text,text,smallint,internal), + FUNCTION 6 gin_tsquery_triconsistent(internal,smallint,tsvector,int,internal,internal,internal), + FUNCTION 7 rum_tsvector_config(internal), + FUNCTION 8 rum_tsquery_pre_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), + FUNCTION 9 rum_tsquery_distance(internal,smallint,tsvector,int,internal,internal,internal,internal,internal), + STORAGE text; diff --git a/rum.control b/rum.control new file mode 100644 index 0000000000..b5d28f2586 --- /dev/null +++ b/rum.control @@ -0,0 +1,5 @@ +# RUM extension +comment = 'RUM index access method' +default_version = '1.0' +module_pathname = '$libdir/rum' +relocatable = true diff --git a/rum.h b/rum.h new file mode 100644 index 0000000000..b922f36148 --- /dev/null +++ b/rum.h @@ -0,0 +1,856 @@ +/*------------------------------------------------------------------------- + * + * bloom.h + * Exported definitions for RUM index. + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 2006-2016, PostgreSQL Global Development Group + * + *------------------------------------------------------------------------- + */ + +#ifndef __RUM_H__ +#define __RUM_H__ + +#include "access/amapi.h" +#include "access/generic_xlog.h" +#include "access/gin.h" +#include "access/itup.h" +#include "lib/rbtree.h" +#include "storage/bufmgr.h" + +#include "rumsort.h" + +/* + * Page opaque data in a inverted index page. + * + * Note: RUM does not include a page ID word as do the other index types. + * This is OK because the opaque data is only 8 bytes and so can be reliably + * distinguished by size. Revisit this if the size ever increases. + * Further note: as of 9.2, SP-GiST also uses 8-byte special space. This is + * still OK, as long as RUM isn't using all of the high-order bits in its + * flags word, because that way the flags word cannot match the page ID used + * by SP-GiST. + */ +typedef struct RumPageOpaqueData +{ + BlockNumber rightlink; /* next page if any */ + OffsetNumber maxoff; /* number entries on RUM_DATA page: number of + * heap ItemPointers on RUM_DATA|RUM_LEAF page + * or number of PostingItems on RUM_DATA & + * ~RUM_LEAF page. On RUM_LIST page, number of + * heap tuples. */ + OffsetNumber freespace; + uint16 flags; /* see bit definitions below */ +} RumPageOpaqueData; + +typedef RumPageOpaqueData *RumPageOpaque; + +#define RUM_DATA (1 << 0) +#define RUM_LEAF (1 << 1) +#define RUM_DELETED (1 << 2) +#define RUM_META (1 << 3) +#define RUM_LIST (1 << 4) +#define RUM_LIST_FULLROW (1 << 5) /* makes sense only on RUM_LIST page */ + +/* Page numbers of fixed-location pages */ +#define RUM_METAPAGE_BLKNO (0) +#define RUM_ROOT_BLKNO (1) + +typedef struct RumMetaPageData +{ + /* + * Pointers to head and tail of pending list, which consists of RUM_LIST + * pages. These store fast-inserted entries that haven't yet been moved + * into the regular RUM structure. + */ + BlockNumber head; + BlockNumber tail; + + /* + * Free space in bytes in the pending list's tail page. + */ + uint32 tailFreeSize; + + /* + * We store both number of pages and number of heap tuples that are in the + * pending list. + */ + BlockNumber nPendingPages; + int64 nPendingHeapTuples; + + /* + * Statistics for planner use (accurate as of last VACUUM) + */ + BlockNumber nTotalPages; + BlockNumber nEntryPages; + BlockNumber nDataPages; + int64 nEntries; + + /* + * RUM version number (ideally this should have been at the front, but too + * late now. Don't move it!) + * + * Currently 1 (for indexes initialized in 9.1 or later) + * + * Version 0 (indexes initialized in 9.0 or before) is compatible but may + * be missing null entries, including both null keys and placeholders. + * Reject full-index-scan attempts on such indexes. + */ + int32 rumVersion; +} RumMetaPageData; + +#define RUM_CURRENT_VERSION 1 + +#define RumPageGetMeta(p) \ + ((RumMetaPageData *) PageGetContents(p)) + +/* + * Macros for accessing a RUM index page's opaque data + */ +#define RumPageGetOpaque(page) ( (RumPageOpaque) PageGetSpecialPointer(page) ) + +#define RumPageIsLeaf(page) ( (RumPageGetOpaque(page)->flags & RUM_LEAF) != 0 ) +#define RumPageSetLeaf(page) ( RumPageGetOpaque(page)->flags |= RUM_LEAF ) +#define RumPageSetNonLeaf(page) ( RumPageGetOpaque(page)->flags &= ~RUM_LEAF ) +#define RumPageIsData(page) ( (RumPageGetOpaque(page)->flags & RUM_DATA) != 0 ) +#define RumPageSetData(page) ( RumPageGetOpaque(page)->flags |= RUM_DATA ) +#define RumPageIsList(page) ( (RumPageGetOpaque(page)->flags & RUM_LIST) != 0 ) +#define RumPageSetList(page) ( RumPageGetOpaque(page)->flags |= RUM_LIST ) +#define RumPageHasFullRow(page) ( (RumPageGetOpaque(page)->flags & RUM_LIST_FULLROW) != 0 ) +#define RumPageSetFullRow(page) ( RumPageGetOpaque(page)->flags |= RUM_LIST_FULLROW ) + +#define RumPageIsDeleted(page) ( (RumPageGetOpaque(page)->flags & RUM_DELETED) != 0 ) +#define RumPageSetDeleted(page) ( RumPageGetOpaque(page)->flags |= RUM_DELETED) +#define RumPageSetNonDeleted(page) ( RumPageGetOpaque(page)->flags &= ~RUM_DELETED) + +#define RumPageRightMost(page) ( RumPageGetOpaque(page)->rightlink == InvalidBlockNumber) + +/* + * We use our own ItemPointerGet(BlockNumber|GetOffsetNumber) + * to avoid Asserts, since sometimes the ip_posid isn't "valid" + */ +#define RumItemPointerGetBlockNumber(pointer) \ + BlockIdGetBlockNumber(&(pointer)->ip_blkid) + +#define RumItemPointerGetOffsetNumber(pointer) \ + ((pointer)->ip_posid) + +/* + * Special-case item pointer values needed by the RUM search logic. + * MIN: sorts less than any valid item pointer + * MAX: sorts greater than any valid item pointer + * LOSSY PAGE: indicates a whole heap page, sorts after normal item + * pointers for that page + * Note that these are all distinguishable from an "invalid" item pointer + * (which is InvalidBlockNumber/0) as well as from all normal item + * pointers (which have item numbers in the range 1..MaxHeapTuplesPerPage). + */ +#define ItemPointerSetMin(p) \ + ItemPointerSet((p), (BlockNumber)0, (OffsetNumber)0) +#define ItemPointerIsMin(p) \ + (RumItemPointerGetOffsetNumber(p) == (OffsetNumber)0 && \ + RumItemPointerGetBlockNumber(p) == (BlockNumber)0) +#define ItemPointerSetMax(p) \ + ItemPointerSet((p), InvalidBlockNumber, (OffsetNumber)0xffff) +#define ItemPointerIsMax(p) \ + (RumItemPointerGetOffsetNumber(p) == (OffsetNumber)0xffff && \ + RumItemPointerGetBlockNumber(p) == InvalidBlockNumber) +#define ItemPointerSetLossyPage(p, b) \ + ItemPointerSet((p), (b), (OffsetNumber)0xffff) +#define ItemPointerIsLossyPage(p) \ + (RumItemPointerGetOffsetNumber(p) == (OffsetNumber)0xffff && \ + RumItemPointerGetBlockNumber(p) != InvalidBlockNumber) + +/* + * Posting item in a non-leaf posting-tree page + */ +typedef struct +{ + /* We use BlockIdData not BlockNumber to avoid padding space wastage */ + BlockIdData child_blkno; + ItemPointerData key; +} PostingItem; + +#define PostingItemGetBlockNumber(pointer) \ + BlockIdGetBlockNumber(&(pointer)->child_blkno) + +#define PostingItemSetBlockNumber(pointer, blockNumber) \ + BlockIdSet(&((pointer)->child_blkno), (blockNumber)) + +/* + * Category codes to distinguish placeholder nulls from ordinary NULL keys. + * Note that the datatype size and the first two code values are chosen to be + * compatible with the usual usage of bool isNull flags. + * + * RUM_CAT_EMPTY_QUERY is never stored in the index; and notice that it is + * chosen to sort before not after regular key values. + */ +typedef signed char RumNullCategory; + +#define RUM_CAT_NORM_KEY 0 /* normal, non-null key value */ +#define RUM_CAT_NULL_KEY 1 /* null key value */ +#define RUM_CAT_EMPTY_ITEM 2 /* placeholder for zero-key item */ +#define RUM_CAT_NULL_ITEM 3 /* placeholder for null item */ +#define RUM_CAT_EMPTY_QUERY (-1) /* placeholder for full-scan query */ + +/* + * Access macros for null category byte in entry tuples + */ +#define RumCategoryOffset(itup,rumstate) \ + (IndexInfoFindDataOffset((itup)->t_info) + \ + ((rumstate)->oneCol ? 0 : sizeof(int16))) +/*#define RumGetNullCategory(itup,rumstate) \ + (*((RumNullCategory *) ((char*)(itup) + RumCategoryOffset(itup,rumstate)))) + #define RumSetNullCategory(itup,rumstate,c) \ + (*((RumNullCategory *) ((char*)(itup) + RumCategoryOffset(itup,rumstate))) = (c))*/ + +#define RumGetNullCategory(itup,rumstate) \ + (*((RumNullCategory *) ((char*)(itup) + IndexTupleSize(itup) - sizeof(RumNullCategory)))) +#define RumSetNullCategory(itup,rumstate,c) \ + (*((RumNullCategory *) ((char*)(itup) + IndexTupleSize(itup) - sizeof(RumNullCategory))) = (c)) + +/* + * Access macros for leaf-page entry tuples (see discussion in README) + */ +#define RumGetNPosting(itup) RumItemPointerGetOffsetNumber(&(itup)->t_tid) +#define RumSetNPosting(itup,n) ItemPointerSetOffsetNumber(&(itup)->t_tid,n) +#define RUM_TREE_POSTING ((OffsetNumber)0xffff) +#define RumIsPostingTree(itup) (RumGetNPosting(itup) == RUM_TREE_POSTING) +#define RumSetPostingTree(itup, blkno) ( RumSetNPosting((itup),RUM_TREE_POSTING), ItemPointerSetBlockNumber(&(itup)->t_tid, blkno) ) +#define RumGetPostingTree(itup) RumItemPointerGetBlockNumber(&(itup)->t_tid) + +#define RumGetPostingOffset(itup) RumItemPointerGetBlockNumber(&(itup)->t_tid) +#define RumSetPostingOffset(itup,n) ItemPointerSetBlockNumber(&(itup)->t_tid,n) +#define RumGetPosting(itup) ((Pointer) ((char*)(itup) + RumGetPostingOffset(itup))) + +#define RumMaxItemSize \ + MAXALIGN_DOWN(((BLCKSZ - SizeOfPageHeaderData - \ + MAXALIGN(sizeof(RumPageOpaqueData))) / 6 - sizeof(ItemIdData))) + +/* + * Access macros for non-leaf entry tuples + */ +#define RumGetDownlink(itup) RumItemPointerGetBlockNumber(&(itup)->t_tid) +#define RumSetDownlink(itup,blkno) ItemPointerSet(&(itup)->t_tid, blkno, InvalidOffsetNumber) + + +/* + * Data (posting tree) pages + */ +#define RumDataPageGetRightBound(page) ((ItemPointer) PageGetContents(page)) +#define RumDataPageGetData(page) \ + (PageGetContents(page) + MAXALIGN(sizeof(ItemPointerData))) +#define RumSizeOfDataPageItem(page) \ + (RumPageIsLeaf(page) ? sizeof(ItemPointerData) : sizeof(PostingItem)) +#define RumDataPageGetItem(page,i) \ + (RumDataPageGetData(page) + ((i)-1) * RumSizeOfDataPageItem(page)) + +#define RumDataPageGetFreeSpace(page) \ + (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \ + - MAXALIGN(sizeof(ItemPointerData)) \ + - RumPageGetOpaque(page)->maxoff * RumSizeOfDataPageItem(page) \ + - MAXALIGN(sizeof(RumPageOpaqueData))) + +#define RumMaxLeafDataItems \ + ((BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - \ + MAXALIGN(sizeof(ItemPointerData)) - \ + MAXALIGN(sizeof(RumPageOpaqueData))) \ + / sizeof(ItemPointerData)) + +/* + * List pages + */ +#define RumListPageSize \ + ( BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(RumPageOpaqueData)) ) + +typedef struct +{ + ItemPointerData iptr; + OffsetNumber offsetNumer; + uint16 pageOffset; +} RumDataLeafItemIndex; + +#define RumDataLeafIndexCount 32 + +#define RumDataPageSize \ + (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \ + - MAXALIGN(sizeof(ItemPointerData)) \ + - MAXALIGN(sizeof(RumPageOpaqueData)) \ + - MAXALIGN(sizeof(RumDataLeafItemIndex) * RumDataLeafIndexCount)) + +#define RumDataPageFreeSpacePre(page,ptr) \ + (RumDataPageSize \ + - ((ptr) - RumDataPageGetData(page))) + +#define RumPageGetIndexes(page) \ + ((RumDataLeafItemIndex *)(RumDataPageGetData(page) + RumDataPageSize)) + + +/* + * Storage type for RUM's reloptions + */ +typedef struct RumOptions +{ + int32 vl_len_; /* varlena header (do not touch directly!) */ + bool useFastUpdate; /* use fast updates? */ +} RumOptions; + +#define RUM_DEFAULT_USE_FASTUPDATE false +#define RumGetUseFastUpdate(relation) \ + ((relation)->rd_options ? \ + ((RumOptions *) (relation)->rd_options)->useFastUpdate : RUM_DEFAULT_USE_FASTUPDATE) + + +/* Macros for buffer lock/unlock operations */ +#define RUM_UNLOCK BUFFER_LOCK_UNLOCK +#define RUM_SHARE BUFFER_LOCK_SHARE +#define RUM_EXCLUSIVE BUFFER_LOCK_EXCLUSIVE + + +/* + * RumState: working data structure describing the index being worked on + */ +typedef struct RumState +{ + Relation index; + bool oneCol; /* true if single-column index */ + + /* + * origTupDesc is the nominal tuple descriptor of the index, ie, the i'th + * attribute shows the key type (not the input data type!) of the i'th + * index column. In a single-column index this describes the actual leaf + * index tuples. In a multi-column index, the actual leaf tuples contain + * a smallint column number followed by a key datum of the appropriate + * type for that column. We set up tupdesc[i] to describe the actual + * rowtype of the index tuples for the i'th column, ie, (int2, keytype). + * Note that in any case, leaf tuples contain more data than is known to + * the TupleDesc; see access/gin/README for details. + */ + TupleDesc origTupdesc; + TupleDesc tupdesc[INDEX_MAX_KEYS]; + Oid addInfoTypeOid[INDEX_MAX_KEYS]; + Form_pg_attribute addAttrs[INDEX_MAX_KEYS]; + + /* + * Per-index-column opclass support functions + */ + FmgrInfo compareFn[INDEX_MAX_KEYS]; + FmgrInfo extractValueFn[INDEX_MAX_KEYS]; + FmgrInfo extractQueryFn[INDEX_MAX_KEYS]; + FmgrInfo consistentFn[INDEX_MAX_KEYS]; + FmgrInfo comparePartialFn[INDEX_MAX_KEYS]; /* optional method */ + FmgrInfo configFn[INDEX_MAX_KEYS]; /* optional method */ + FmgrInfo preConsistentFn[INDEX_MAX_KEYS]; /* optional method */ + FmgrInfo orderingFn[INDEX_MAX_KEYS]; /* optional method */ + /* canPartialMatch[i] is true if comparePartialFn[i] is valid */ + bool canPartialMatch[INDEX_MAX_KEYS]; + /* canPreConsistent[i] is true if preConsistentFn[i] is valid */ + bool canPreConsistent[INDEX_MAX_KEYS]; + /* canOrdering[i] is true if orderingFn[i] is valid */ + bool canOrdering[INDEX_MAX_KEYS]; + /* Collations to pass to the support functions */ + Oid supportCollation[INDEX_MAX_KEYS]; +} RumState; + +typedef struct RumConfig +{ + Oid addInfoTypeOid; +} RumConfig; + +/* XLog stuff */ + +#define RUM_NDELETE_AT_ONCE 16 +typedef struct rumxlogDeleteListPages +{ + int32 ndeleted; + BlockNumber toDelete[RUM_NDELETE_AT_ONCE]; +} rumxlogDeleteListPages; + + +/* rumutil.c */ +extern bytea *rumoptions(Datum reloptions, bool validate); +extern Datum rumhandler(PG_FUNCTION_ARGS); +extern void initRumState(RumState *state, Relation index); +extern Buffer RumNewBuffer(Relation index); +extern void RumInitBuffer(GenericXLogState *state, Buffer buffer, uint32 flags); +extern void RumInitPage(Page page, uint32 f, Size pageSize); +extern void RumInitMetabuffer(GenericXLogState *state, Buffer metaBuffer); +extern int rumCompareEntries(RumState *rumstate, OffsetNumber attnum, + Datum a, RumNullCategory categorya, + Datum b, RumNullCategory categoryb); +extern int rumCompareAttEntries(RumState *rumstate, + OffsetNumber attnuma, Datum a, RumNullCategory categorya, + OffsetNumber attnumb, Datum b, RumNullCategory categoryb); +extern Datum *rumExtractEntries(RumState *rumstate, OffsetNumber attnum, + Datum value, bool isNull, + int32 *nentries, RumNullCategory **categories, + Datum **addInfo, bool **addInfoIsNull); + +extern OffsetNumber rumtuple_get_attrnum(RumState *rumstate, IndexTuple tuple); +extern Datum rumtuple_get_key(RumState *rumstate, IndexTuple tuple, + RumNullCategory *category); + +extern void rumGetStats(Relation index, GinStatsData *stats); +extern void rumUpdateStats(Relation index, const GinStatsData *stats); + +/* ruminsert.c */ +extern IndexBuildResult *rumbuild(Relation heap, Relation index, + struct IndexInfo *indexInfo); +extern void rumbuildempty(Relation index); +extern bool ruminsert(Relation index, Datum *values, bool *isnull, + ItemPointer ht_ctid, Relation heapRel, + IndexUniqueCheck checkUnique); +extern void rumEntryInsert(RumState *rumstate, + OffsetNumber attnum, Datum key, RumNullCategory category, + ItemPointerData *items, Datum *addInfo, + bool *addInfoIsNull, uint32 nitem, + GinStatsData *buildStats); + +/* rumbtree.c */ + +typedef struct RumBtreeStack +{ + BlockNumber blkno; + Buffer buffer; + OffsetNumber off; + /* predictNumber contains predicted number of pages on current level */ + uint32 predictNumber; + struct RumBtreeStack *parent; +} RumBtreeStack; + +typedef struct RumBtreeData *RumBtree; + +typedef struct RumBtreeData +{ + /* search methods */ + BlockNumber (*findChildPage) (RumBtree, RumBtreeStack *); + bool (*isMoveRight) (RumBtree, Page); + bool (*findItem) (RumBtree, RumBtreeStack *); + + /* insert methods */ + OffsetNumber (*findChildPtr) (RumBtree, Page, BlockNumber, OffsetNumber); + BlockNumber (*getLeftMostPage) (RumBtree, Page); + bool (*isEnoughSpace) (RumBtree, Buffer, OffsetNumber); + void (*placeToPage) (RumBtree, Page, OffsetNumber); + Page (*splitPage) (RumBtree, Buffer, Buffer, Page, Page, OffsetNumber); + void (*fillRoot) (RumBtree, Buffer, Buffer, Buffer, Page, Page, Page); + + bool isData; + bool searchMode; + + Relation index; + RumState *rumstate; /* not valid in a data scan */ + bool fullScan; + bool isBuild; + + BlockNumber rightblkno; + + /* Entry options */ + OffsetNumber entryAttnum; + Datum entryKey; + RumNullCategory entryCategory; + IndexTuple entry; + bool isDelete; + + /* Data (posting tree) options */ + ItemPointerData *items; + Datum *addInfo; + bool *addInfoIsNull; + + uint32 nitem; + uint32 curitem; + + PostingItem pitem; +} RumBtreeData; + +extern RumBtreeStack *rumPrepareFindLeafPage(RumBtree btree, BlockNumber blkno); +extern RumBtreeStack *rumFindLeafPage(RumBtree btree, RumBtreeStack *stack); +extern RumBtreeStack *rumReFindLeafPage(RumBtree btree, RumBtreeStack *stack); +extern Buffer rumStepRight(Buffer buffer, Relation index, int lockmode); +extern void freeRumBtreeStack(RumBtreeStack *stack); +extern void rumInsertValue(Relation index, RumBtree btree, RumBtreeStack *stack, + GinStatsData *buildStats); +extern void rumFindParents(RumBtree btree, RumBtreeStack *stack, BlockNumber rootBlkno); + +/* rumentrypage.c */ +extern void rumPrepareEntryScan(RumBtree btree, OffsetNumber attnum, + Datum key, RumNullCategory category, + RumState *rumstate); +extern void rumEntryFillRoot(RumBtree btree, Buffer root, Buffer lbuf, Buffer rbuf, + Page page, Page lpage, Page rpage); +extern IndexTuple rumPageGetLinkItup(Buffer buf, Page page); +extern void rumReadTuple(RumState *rumstate, OffsetNumber attnum, + IndexTuple itup, ItemPointerData *ipd, Datum *addInfo, bool *addInfoIsNull); +extern ItemPointerData updateItemIndexes(Page page, OffsetNumber attnum, RumState *rumstate); + +/* rumdatapage.c */ +extern int rumCompareItemPointers(ItemPointer a, ItemPointer b); +extern char *rumDataPageLeafWriteItemPointer(char *ptr, ItemPointer iptr, ItemPointer prev, bool addInfoIsNull); +extern Pointer rumPlaceToDataPageLeaf(Pointer ptr, OffsetNumber attnum, + ItemPointer iptr, Datum addInfo, bool addInfoIsNull, ItemPointer prev, + RumState *rumstate); +extern Size rumCheckPlaceToDataPageLeaf(OffsetNumber attnum, + ItemPointer iptr, Datum addInfo, bool addInfoIsNull, ItemPointer prev, + RumState *rumstate, Size size); +extern uint32 rumMergeItemPointers(ItemPointerData *dst, Datum *dst2, bool *dst3, + ItemPointerData *a, Datum *a2, bool *a3, uint32 na, + ItemPointerData *b, Datum * b2, bool *b3, uint32 nb); +extern void RumDataPageAddItem(Page page, void *data, OffsetNumber offset); +extern void RumPageDeletePostingItem(Page page, OffsetNumber offset); + +typedef struct +{ + RumBtreeData btree; + RumBtreeStack *stack; +} RumPostingTreeScan; + +extern RumPostingTreeScan *rumPrepareScanPostingTree(Relation index, + BlockNumber rootBlkno, bool searchMode, OffsetNumber attnum, RumState *rumstate); +extern void rumInsertItemPointers(RumState *rumstate, + OffsetNumber attnum, + RumPostingTreeScan *gdi, + ItemPointerData *items, + Datum *addInfo, + bool *addInfoIsNull, + uint32 nitem, + GinStatsData *buildStats); +extern Buffer rumScanBeginPostingTree(RumPostingTreeScan *gdi); +extern void rumDataFillRoot(RumBtree btree, Buffer root, Buffer lbuf, Buffer rbuf, + Page page, Page lpage, Page rpage); +extern void rumPrepareDataScan(RumBtree btree, Relation index, OffsetNumber attnum, RumState *rumstate); + +/* rumscan.c */ + +/* + * RumScanKeyData describes a single RUM index qualifier expression. + * + * From each qual expression, we extract one or more specific index search + * conditions, which are represented by RumScanEntryData. It's quite + * possible for identical search conditions to be requested by more than + * one qual expression, in which case we merge such conditions to have just + * one unique RumScanEntry --- this is particularly important for efficiency + * when dealing with full-index-scan entries. So there can be multiple + * RumScanKeyData.scanEntry pointers to the same RumScanEntryData. + * + * In each RumScanKeyData, nentries is the true number of entries, while + * nuserentries is the number that extractQueryFn returned (which is what + * we report to consistentFn). The "user" entries must come first. + */ +typedef struct RumScanKeyData *RumScanKey; + +typedef struct RumScanEntryData *RumScanEntry; + +typedef struct RumScanKeyData +{ + /* Real number of entries in scanEntry[] (always > 0) */ + uint32 nentries; + /* Number of entries that extractQueryFn and consistentFn know about */ + uint32 nuserentries; + + /* array of RumScanEntry pointers, one per extracted search condition */ + RumScanEntry *scanEntry; + + /* array of check flags, reported to consistentFn */ + bool *entryRes; + Datum *addInfo; + bool *addInfoIsNull; + + /* other data needed for calling consistentFn */ + Datum query; + /* NB: these three arrays have only nuserentries elements! */ + Datum *queryValues; + RumNullCategory *queryCategories; + Pointer *extra_data; + StrategyNumber strategy; + int32 searchMode; + OffsetNumber attnum; + + /* + * Match status data. curItem is the TID most recently tested (could be a + * lossy-page pointer). curItemMatches is TRUE if it passes the + * consistentFn test; if so, recheckCurItem is the recheck flag. + * isFinished means that all the input entry streams are finished, so this + * key cannot succeed for any later TIDs. + */ + ItemPointerData curItem; + bool curItemMatches; + bool recheckCurItem; + bool isFinished; + bool orderBy; +} RumScanKeyData; + +typedef struct RumScanEntryData +{ + /* query key and other information from extractQueryFn */ + Datum queryKey; + RumNullCategory queryCategory; + bool isPartialMatch; + Pointer extra_data; + StrategyNumber strategy; + int32 searchMode; + OffsetNumber attnum; + + /* Current page in posting tree */ + Buffer buffer; + + /* current ItemPointer to heap */ + ItemPointerData curItem; + Datum curAddInfo; + bool curAddInfoIsNull; + + /* for a partial-match or full-scan query, we accumulate all TIDs here */ + TIDBitmap *matchBitmap; + TBMIterator *matchIterator; + TBMIterateResult *matchResult; + + /* used for Posting list and one page in Posting tree */ + ItemPointerData *list; + Datum *addInfo; + bool *addInfoIsNull; + MemoryContext context; + uint32 nlist; + OffsetNumber offset; + + bool isFinished; + bool reduceResult; + bool preValue; + uint32 predictNumberResult; + RumPostingTreeScan *gdi; +} RumScanEntryData; + +typedef struct +{ + ItemPointerData iptr; + float8 distance; + bool recheck; +} RumOrderingItem; + +typedef struct RumScanOpaqueData +{ + MemoryContext tempCtx; + RumState rumstate; + + RumScanKey keys; /* one per scan qualifier expr */ + uint32 nkeys; + int norderbys; + + RumScanEntry *entries; /* one per index search condition */ + RumScanEntry *sortedEntries; /* one per index search condition */ + int entriesIncrIndex; + uint32 totalentries; + uint32 allocentries; /* allocated length of entries[] */ + + Tuplesortstate *sortstate; + + ItemPointerData iptr; + bool firstCall; + bool isVoidRes; /* true if query is unsatisfiable */ + bool useFastScan; + TIDBitmap *tbm; +} RumScanOpaqueData; + +typedef RumScanOpaqueData *RumScanOpaque; + +extern IndexScanDesc rumbeginscan(Relation rel, int nkeys, int norderbys); +extern void rumendscan(IndexScanDesc scan); +extern void rumrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, + ScanKey orderbys, int norderbys); +extern Datum rummarkpos(PG_FUNCTION_ARGS); +extern Datum rumrestrpos(PG_FUNCTION_ARGS); +extern void rumNewScanKey(IndexScanDesc scan); + +/* rumget.c */ +extern int64 rumgetbitmap(IndexScanDesc scan, TIDBitmap *tbm); +extern bool rumgettuple(IndexScanDesc scan, ScanDirection direction); + +/* rumvacuum.c */ +extern IndexBulkDeleteResult *rumbulkdelete(IndexVacuumInfo *info, + IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, + void *callback_state); +extern IndexBulkDeleteResult *rumvacuumcleanup(IndexVacuumInfo *info, + IndexBulkDeleteResult *stats); + +typedef struct +{ + ItemPointerData iptr; + Datum addInfo; + bool addInfoIsNull; +} RumEntryAccumulatorItem; + +/* rumvalidate.c */ +extern bool rumvalidate(Oid opclassoid); + +/* rumbulk.c */ +typedef struct RumEntryAccumulator +{ + RBNode rbnode; + Datum key; + RumNullCategory category; + OffsetNumber attnum; + bool shouldSort; + RumEntryAccumulatorItem *list; + uint32 maxcount; /* allocated size of list[] */ + uint32 count; /* current number of list[] entries */ +} RumEntryAccumulator; + +typedef struct +{ + RumState *rumstate; + long allocatedMemory; + RumEntryAccumulator *entryallocator; + uint32 eas_used; + RBTree *tree; +} BuildAccumulator; + +extern void rumInitBA(BuildAccumulator *accum); +extern void rumInsertBAEntries(BuildAccumulator *accum, + ItemPointer heapptr, OffsetNumber attnum, + Datum *entries, Datum *addInfo, bool *addInfoIsNull, + RumNullCategory *categories, int32 nentries); +extern void rumBeginBAScan(BuildAccumulator *accum); +extern RumEntryAccumulatorItem *rumGetBAEntry(BuildAccumulator *accum, + OffsetNumber *attnum, Datum *key, RumNullCategory *category, + uint32 *n); + +/* rumfast.c */ + +typedef struct RumTupleCollector +{ + IndexTuple *tuples; + uint32 ntuples; + uint32 lentuples; + uint32 sumsize; +} RumTupleCollector; + +extern void rumHeapTupleFastInsert(RumState *rumstate, + RumTupleCollector *collector); +extern void rumHeapTupleFastCollect(RumState *rumstate, + RumTupleCollector *collector, + OffsetNumber attnum, Datum value, bool isNull, + ItemPointer ht_ctid); +extern void rumInsertCleanup(RumState *rumstate, + bool vac_delay, IndexBulkDeleteResult *stats); + +/* rum_ts_utils.c */ +#define RUM_CONFIG_PROC 7 +#define RUM_PRE_CONSISTENT_PROC 8 +#define RUM_ORDERING_PROC 9 +#define RUMNProcs 9 + +extern Datum rum_extract_tsvector(PG_FUNCTION_ARGS); +extern Datum rum_extract_tsquery(PG_FUNCTION_ARGS); +extern Datum rum_tsvector_config(PG_FUNCTION_ARGS); +extern Datum rum_tsquery_pre_consistent(PG_FUNCTION_ARGS); +extern Datum rum_tsquery_distance(PG_FUNCTION_ARGS); +extern Datum rum_ts_distance(PG_FUNCTION_ARGS); + +/* GUC parameters */ +extern PGDLLIMPORT int RumFuzzySearchLimit; + +/* + * Functions for reading ItemPointers with additional information. Used in + * various .c files and have to be inline for being fast. + */ + +#define SEVENTHBIT (0x40) +#define SIXMASK (0x3F) + +/* + * Read next item pointer from leaf data page. Replaces current item pointer + * with the next one. Zero item pointer should be passed in order to read the + * first item pointer. Also reads value of addInfoIsNull flag which is stored + * with item pointer. + */ +static inline char * +rumDataPageLeafReadItemPointer(char *ptr, ItemPointer iptr, bool *addInfoIsNull) +{ + uint32 blockNumberIncr = 0; + uint16 offset = 0; + int i; + uint8 v; + + i = 0; + do + { + v = *ptr; + ptr++; + blockNumberIncr |= (v & (~HIGHBIT)) << i; + Assert(i < 28 || ((i == 28) && ((v & (~HIGHBIT)) < (1 << 4)))); + i += 7; + } + while (v & HIGHBIT); + + Assert((uint64)iptr->ip_blkid.bi_lo + ((uint64)iptr->ip_blkid.bi_hi << 16) + + (uint64)blockNumberIncr < ((uint64)1 << 32)); + + blockNumberIncr += iptr->ip_blkid.bi_lo + (iptr->ip_blkid.bi_hi << 16); + + iptr->ip_blkid.bi_lo = blockNumberIncr & 0xFFFF; + iptr->ip_blkid.bi_hi = (blockNumberIncr >> 16) & 0xFFFF; + + i = 0; + + while(true) + { + v = *ptr; + ptr++; + Assert(i < 14 || ((i == 14) && ((v & SIXMASK) < (1 << 2)))); + + if (v & HIGHBIT) + { + offset |= (v & (~HIGHBIT)) << i; + } + else + { + offset |= (v & SIXMASK) << i; + if (addInfoIsNull) + *addInfoIsNull = (v & SEVENTHBIT) ? true : false; + break; + } + i += 7; + } + + Assert(OffsetNumberIsValid(offset)); + iptr->ip_posid = offset; + + return ptr; +} + +/* + * Reads next item pointer and additional information from leaf data page. + * Replaces current item pointer with the next one. Zero item pointer should be + * passed in order to read the first item pointer. + */ +static inline Pointer +rumDataPageLeafRead(Pointer ptr, OffsetNumber attnum, ItemPointer iptr, + Datum *addInfo, bool *addInfoIsNull, RumState *rumstate) +{ + Form_pg_attribute attr; + bool isNull; + + ptr = rumDataPageLeafReadItemPointer(ptr, iptr, &isNull); + + Assert(iptr->ip_posid != InvalidOffsetNumber); + + if (addInfoIsNull) + *addInfoIsNull = isNull; + + if (!isNull) + { + attr = rumstate->addAttrs[attnum - 1]; + ptr = (Pointer) att_align_pointer(ptr, attr->attalign, attr->attlen, ptr); + if (addInfo) + *addInfo = fetch_att(ptr, attr->attbyval, attr->attlen); + ptr = (Pointer) att_addlength_pointer(ptr, attr->attlen, ptr); + } + return ptr; +} + +extern Datum FunctionCall10Coll(FmgrInfo *flinfo, Oid collation, + Datum arg1, Datum arg2, + Datum arg3, Datum arg4, Datum arg5, + Datum arg6, Datum arg7, Datum arg8, + Datum arg9, Datum arg10); + +#endif /* __RUM_H__ */ diff --git a/rum_ts_utils.c b/rum_ts_utils.c new file mode 100644 index 0000000000..9ba0599140 --- /dev/null +++ b/rum_ts_utils.c @@ -0,0 +1,677 @@ +/*------------------------------------------------------------------------- + * + * rum_ts_utils.c + * various support functions + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/pg_type.h" +#include "tsearch/ts_type.h" +#include "tsearch/ts_utils.h" +#include "utils/array.h" +#include "utils/builtins.h" + +#include "rum.h" + +#include + +PG_FUNCTION_INFO_V1(rum_extract_tsvector); +PG_FUNCTION_INFO_V1(rum_extract_tsquery); +PG_FUNCTION_INFO_V1(rum_tsvector_config); +PG_FUNCTION_INFO_V1(rum_tsquery_pre_consistent); +PG_FUNCTION_INFO_V1(rum_tsquery_consistent); +PG_FUNCTION_INFO_V1(rum_tsquery_distance); +PG_FUNCTION_INFO_V1(rum_ts_distance); + +static float calc_rank_pos_and(float *w, Datum *addInfo, bool *addInfoIsNull, + int size); +static float calc_rank_pos_or(float *w, Datum *addInfo, bool *addInfoIsNull, + int size); + +static int count_pos(char *ptr, int len); +static char * decompress_pos(char *ptr, uint16 *pos); + +typedef struct +{ + QueryItem *first_item; + int *map_item_operand; + bool *check; + bool *need_recheck; + Datum *addInfo; + bool *addInfoIsNull; +} RumChkVal; + +static bool +pre_checkcondition_rum(void *checkval, QueryOperand *val, ExecPhraseData *data) +{ + RumChkVal *gcv = (RumChkVal *) checkval; + int j; + + /* if any val requiring a weight is used, set recheck flag */ + if (val->weight != 0 || data != NULL) + *(gcv->need_recheck) = true; + + /* convert item's number to corresponding entry's (operand's) number */ + j = gcv->map_item_operand[((QueryItem *) val) - gcv->first_item]; + + /* return presence of current entry in indexed value */ + return gcv->check[j]; +} + +Datum +rum_tsquery_pre_consistent(PG_FUNCTION_ARGS) +{ + bool *check = (bool *) PG_GETARG_POINTER(0); + + TSQuery query = PG_GETARG_TSQUERY(2); + + Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); + bool recheck; + bool res = FALSE; + + if (query->size > 0) + { + QueryItem *item; + RumChkVal gcv; + + /* + * check-parameter array has one entry for each value (operand) in the + * query. + */ + gcv.first_item = item = GETQUERY(query); + gcv.check = check; + gcv.map_item_operand = (int *) (extra_data[0]); + gcv.need_recheck = &recheck; + + res = TS_execute(GETQUERY(query), + &gcv, + false, + pre_checkcondition_rum); + } + + PG_RETURN_BOOL(res); +} + +static bool +checkcondition_rum(void *checkval, QueryOperand *val, ExecPhraseData *data) +{ + RumChkVal *gcv = (RumChkVal *) checkval; + int j; + + /* if any val requiring a weight is used, set recheck flag */ + if (val->weight != 0) + *(gcv->need_recheck) = true; + + /* convert item's number to corresponding entry's (operand's) number */ + j = gcv->map_item_operand[((QueryItem *) val) - gcv->first_item]; + + /* return presence of current entry in indexed value */ + if (!gcv->check[j]) + return false; + + if (data && gcv->addInfo && gcv->addInfoIsNull[j] == false) + { + bytea *positions = DatumGetByteaP(gcv->addInfo[j]); + int32 i; + char *ptrt; + WordEntryPos post; + + data->npos = count_pos(VARDATA_ANY(positions), + VARSIZE_ANY_EXHDR(positions)); + data->pos = palloc(sizeof(*data->pos) * data->npos); + data->allocated = true; + + ptrt = (char *)VARDATA_ANY(positions); + post = 0; + + for(i=0; inpos; i++) + { + ptrt = decompress_pos(ptrt, &post); + data->pos[i] = post; + } + } + + return true; +} + +Datum +rum_tsquery_consistent(PG_FUNCTION_ARGS) +{ + bool *check = (bool *) PG_GETARG_POINTER(0); + /* StrategyNumber strategy = PG_GETARG_UINT16(1); */ + TSQuery query = PG_GETARG_TSQUERY(2); + /* int32 nkeys = PG_GETARG_INT32(3); */ + Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); + bool *recheck = (bool *) PG_GETARG_POINTER(5); + Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); + bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); + bool res = FALSE; + + /* The query requires recheck only if it involves + * weights */ + *recheck = false; + + if (query->size > 0) + { + QueryItem *item; + RumChkVal gcv; + + /* + * check-parameter array has one entry for each value + * (operand) in the query. + */ + gcv.first_item = item = GETQUERY(query); + gcv.check = check; + gcv.map_item_operand = (int *) (extra_data[0]); + gcv.need_recheck = recheck; + gcv.addInfo = addInfo; + gcv.addInfoIsNull = addInfoIsNull; + + res = TS_execute(GETQUERY(query), &gcv, true, checkcondition_rum); + } + + PG_RETURN_BOOL(res); +} + + + +static float weights[] = {0.1f, 0.2f, 0.4f, 1.0f}; + +#define wpos(wep) ( w[ WEP_GETWEIGHT(wep) ] ) +/* A dummy WordEntryPos array to use when haspos is false */ +static WordEntryPosVector POSNULL = { + 1, /* Number of elements that follow */ + {0} +}; + +#define SIXTHBIT 0x20 +#define LOWERMASK 0x1F + +/* + * Returns a weight of a word collocation + */ +static float4 +word_distance(int32 w) +{ + if (w > 100) + return 1e-30f; + + return 1.0 / (1.005 + 0.05 * exp(((float4) w) / 1.5 - 2)); +} + +#define WordECompareQueryItem(e,q,p,i,m) \ + tsCompareString((q) + (i)->distance, (i)->length, \ + (e) + (p)->pos, (p)->len, (m)) + +static int +compress_pos(char *target, uint16 *pos, int npos) +{ + int i; + uint16 prev = 0, delta; + char *ptr; + + ptr = target; + for (i = 0; i < npos; i++) + { + delta = WEP_GETPOS(pos[i]) - WEP_GETPOS(prev); + + while (true) + { + if (delta >= SIXTHBIT) + { + *ptr = (delta & (~HIGHBIT)) | HIGHBIT; + ptr++; + delta >>= 7; + } + else + { + *ptr = delta | (WEP_GETWEIGHT(pos[i]) << 5); + ptr++; + break; + } + } + prev = pos[i]; + } + return ptr - target; +} + +static char * +decompress_pos(char *ptr, uint16 *pos) +{ + int i; + uint8 v; + uint16 delta = 0; + + i = 0; + while (true) + { + v = *ptr; + ptr++; + if (v & HIGHBIT) + { + delta |= (v & (~HIGHBIT)) << i; + } + else + { + delta |= (v & LOWERMASK) << i; + *pos += delta; + WEP_SETWEIGHT(*pos, v >> 5); + return ptr; + } + i += 7; + } +} + +static int +count_pos(char *ptr, int len) +{ + int count = 0, i; + for (i = 0; i < len; i++) + { + if (!(ptr[i] & HIGHBIT)) + count++; + } + return count; +} + +static float +calc_rank_pos_and(float *w, Datum *addInfo, bool *addInfoIsNull, int size) +{ + int i, + k, + l, + p; + WordEntryPos post, + ct; + int32 dimt, + lenct, + dist; + float res = -1.0; + char *ptrt, *ptrc; + + if (size < 2) + { + return calc_rank_pos_or(w, addInfo, addInfoIsNull, size); + } + WEP_SETPOS(POSNULL.pos[0], MAXENTRYPOS - 1); + + for (i = 0; i < size; i++) + { + if (!addInfoIsNull[i]) + { + dimt = count_pos(VARDATA_ANY(addInfo[i]), VARSIZE_ANY_EXHDR(addInfo[i])); + ptrt = (char *)VARDATA_ANY(addInfo[i]); + } + else + { + dimt = POSNULL.npos; + ptrt = (char *)POSNULL.pos; + } + for (k = 0; k < i; k++) + { + if (!addInfoIsNull[k]) + lenct = count_pos(VARDATA_ANY(addInfo[k]), VARSIZE_ANY_EXHDR(addInfo[k])); + else + lenct = POSNULL.npos; + post = 0; + for (l = 0; l < dimt; l++) + { + ptrt = decompress_pos(ptrt, &post); + ct = 0; + if (!addInfoIsNull[k]) + ptrc = (char *)VARDATA_ANY(addInfo[k]); + else + ptrc = (char *)POSNULL.pos; + for (p = 0; p < lenct; p++) + { + ptrc = decompress_pos(ptrc, &ct); + dist = Abs((int) WEP_GETPOS(post) - (int) WEP_GETPOS(ct)); + if (dist || (dist == 0 && (ptrt == (char *)POSNULL.pos || ptrc == (char *)POSNULL.pos))) + { + float curw; + + if (!dist) + dist = MAXENTRYPOS; + curw = sqrt(wpos(post) * wpos(ct) * word_distance(dist)); + res = (res < 0) ? curw : 1.0 - (1.0 - res) * (1.0 - curw); + } + } + } + } + + } + return res; +} + +static float +calc_rank_pos_or(float *w, Datum *addInfo, bool *addInfoIsNull, int size) +{ + WordEntryPos post; + int32 dimt, + j, + i; + float res = 0.0; + char *ptrt; + + for (i = 0; i < size; i++) + { + float resj, + wjm; + int32 jm; + + if (!addInfoIsNull[i]) + { + dimt = count_pos(VARDATA_ANY(addInfo[i]), VARSIZE_ANY_EXHDR(addInfo[i])); + ptrt = (char *)VARDATA_ANY(addInfo[i]); + } + else + { + dimt = POSNULL.npos; + ptrt = (char *)POSNULL.pos; + } + + resj = 0.0; + wjm = -1.0; + jm = 0; + post = 0; + for (j = 0; j < dimt; j++) + { + ptrt = decompress_pos(ptrt, &post); + resj = resj + wpos(post) / ((j + 1) * (j + 1)); + if (wpos(post) > wjm) + { + wjm = wpos(post); + jm = j; + } + } +/* + limit (sum(i/i^2),i->inf) = pi^2/6 + resj = sum(wi/i^2),i=1,noccurence, + wi - should be sorted desc, + don't sort for now, just choose maximum weight. This should be corrected + Oleg Bartunov +*/ + res = res + (wjm + resj - wjm / ((jm + 1) * (jm + 1))) / 1.64493406685; + + } + if (size > 0) + res = res / size; + return res; +} + +static float +calc_rank_pos(float *w, TSQuery q, Datum *addInfo, bool *addInfoIsNull, int size) +{ + QueryItem *item = GETQUERY(q); + float res = 0.0; + + if (!size || !q->size) + return 0.0; + + /* XXX: What about NOT? */ + res = (item->type == QI_OPR && (item->qoperator.oper == OP_AND || + item->qoperator.oper == OP_PHRASE)) ? + calc_rank_pos_and(w, addInfo, addInfoIsNull, size) : + calc_rank_pos_or(w, addInfo, addInfoIsNull, size); + + if (res < 0) + res = 1e-20f; + + return res; +} + +/* + * sort QueryOperands by (length, word) + */ +static int +compareQueryOperand(const void *a, const void *b, void *arg) +{ + char *operand = (char *) arg; + QueryOperand *qa = (*(QueryOperand *const *) a); + QueryOperand *qb = (*(QueryOperand *const *) b); + + return tsCompareString(operand + qa->distance, qa->length, + operand + qb->distance, qb->length, + false); +} + +/* + * Returns a sorted, de-duplicated array of QueryOperands in a query. + * The returned QueryOperands are pointers to the original QueryOperands + * in the query. + * + * Length of the returned array is stored in *size + */ +static QueryOperand ** +SortAndUniqItems(TSQuery q, int *size) +{ + char *operand = GETOPERAND(q); + QueryItem *item = GETQUERY(q); + QueryOperand **res, + **ptr, + **prevptr; + + ptr = res = (QueryOperand **) palloc(sizeof(QueryOperand *) * *size); + + /* Collect all operands from the tree to res */ + while ((*size)--) + { + if (item->type == QI_VAL) + { + *ptr = (QueryOperand *) item; + ptr++; + } + item++; + } + + *size = ptr - res; + if (*size < 2) + return res; + + qsort_arg(res, *size, sizeof(QueryOperand *), compareQueryOperand, (void *) operand); + + ptr = res + 1; + prevptr = res; + + /* remove duplicates */ + while (ptr - res < *size) + { + if (compareQueryOperand((void *) ptr, (void *) prevptr, (void *) operand) != 0) + { + prevptr++; + *prevptr = *ptr; + } + ptr++; + } + + *size = prevptr + 1 - res; + return res; +} + +Datum +rum_extract_tsvector(PG_FUNCTION_ARGS) +{ + TSVector vector = PG_GETARG_TSVECTOR(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + Datum **addInfo = (Datum **) PG_GETARG_POINTER(3); + bool **addInfoIsNull = (bool **) PG_GETARG_POINTER(4); + Datum *entries = NULL; + + *nentries = vector->size; + if (vector->size > 0) + { + int i; + WordEntry *we = ARRPTR(vector); + WordEntryPosVector *posVec; + + entries = (Datum *) palloc(sizeof(Datum) * vector->size); + *addInfo = (Datum *) palloc(sizeof(Datum) * vector->size); + *addInfoIsNull = (bool *) palloc(sizeof(bool) * vector->size); + + for (i = 0; i < vector->size; i++) + { + text *txt; + bytea *posData; + int posDataSize; + + txt = cstring_to_text_with_len(STRPTR(vector) + we->pos, we->len); + entries[i] = PointerGetDatum(txt); + + if (we->haspos) + { + posVec = _POSVECPTR(vector, we); + posDataSize = VARHDRSZ + 2 * posVec->npos * sizeof(WordEntryPos); + posData = (bytea *)palloc(posDataSize); + posDataSize = compress_pos(posData->vl_dat, posVec->pos, posVec->npos) + VARHDRSZ; + SET_VARSIZE(posData, posDataSize); + + (*addInfo)[i] = PointerGetDatum(posData); + (*addInfoIsNull)[i] = false; + } + else + { + (*addInfo)[i] = (Datum)0; + (*addInfoIsNull)[i] = true; + } + we++; + } + } + + PG_FREE_IF_COPY(vector, 0); + PG_RETURN_POINTER(entries); +} + +Datum +rum_extract_tsquery(PG_FUNCTION_ARGS) +{ + TSQuery query = PG_GETARG_TSQUERY(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + + /* StrategyNumber strategy = PG_GETARG_UINT16(2); */ + bool **ptr_partialmatch = (bool **) PG_GETARG_POINTER(3); + Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4); + + /* bool **nullFlags = (bool **) PG_GETARG_POINTER(5); */ + int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); + Datum *entries = NULL; + + *nentries = 0; + + if (query->size > 0) + { + QueryItem *item = GETQUERY(query); + int32 i, + j; + bool *partialmatch; + int *map_item_operand; + char *operand = GETOPERAND(query); + QueryOperand **operands; + + /* + * If the query doesn't have any required positive matches (for + * instance, it's something like '! foo'), we have to do a full index + * scan. + */ + if (tsquery_requires_match(item)) + *searchMode = GIN_SEARCH_MODE_DEFAULT; + else + *searchMode = GIN_SEARCH_MODE_ALL; + + *nentries = query->size; + operands = SortAndUniqItems(query, nentries); + + entries = (Datum *) palloc(sizeof(Datum) * (*nentries)); + partialmatch = *ptr_partialmatch = (bool *) palloc(sizeof(bool) * (*nentries)); + + /* + * Make map to convert item's number to corresponding operand's (the + * same, entry's) number. Entry's number is used in check array in + * consistent method. We use the same map for each entry. + */ + *extra_data = (Pointer *) palloc(sizeof(Pointer) * (*nentries)); + map_item_operand = (int *) palloc0(sizeof(int) * query->size); + + for (i = 0; i < (*nentries); i++) + { + text *txt; + + txt = cstring_to_text_with_len(GETOPERAND(query) + operands[i]->distance, + operands[i]->length); + entries[i] = PointerGetDatum(txt); + partialmatch[i] = operands[i]->prefix; + (*extra_data)[i] = (Pointer) map_item_operand; + } + + /* Now rescan the VAL items and fill in the arrays */ + for (j = 0; j < query->size; j++) + { + if (item[j].type == QI_VAL) + { + QueryOperand *val = &item[j].qoperand; + bool found = false; + + for (i = 0; i < (*nentries); i++) + { + if (!tsCompareString(operand + operands[i]->distance, operands[i]->length, + operand + val->distance, val->length, + false)) + { + map_item_operand[j] = i; + found = true; + break; + } + } + + if (!found) + elog(ERROR, "Operand not found!"); + } + } + } + + PG_FREE_IF_COPY(query, 0); + + PG_RETURN_POINTER(entries); +} + +Datum +rum_tsquery_distance(PG_FUNCTION_ARGS) +{ + /* bool *check = (bool *) PG_GETARG_POINTER(0); */ + + /* StrategyNumber strategy = PG_GETARG_UINT16(1); */ + TSQuery query = PG_GETARG_TSQUERY(2); + + int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ + Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); + bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); + float8 res; + + res = 1.0 / (float8)calc_rank_pos(weights, query, + addInfo, addInfoIsNull, nkeys); + + PG_RETURN_FLOAT8(res); +} + +Datum +rum_ts_distance(PG_FUNCTION_ARGS) +{ + return DirectFunctionCall2Coll(ts_rank_tt, + PG_GET_COLLATION(), + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1)); +} + +Datum +rum_tsvector_config(PG_FUNCTION_ARGS) +{ + RumConfig *config = (RumConfig *)PG_GETARG_POINTER(0); + config->addInfoTypeOid = BYTEAOID; + PG_RETURN_VOID(); +} diff --git a/rumbtree.c b/rumbtree.c new file mode 100644 index 0000000000..e1c93cbe6a --- /dev/null +++ b/rumbtree.c @@ -0,0 +1,514 @@ +/*------------------------------------------------------------------------- + * + * rumbtree.c + * page utilities routines for the postgres inverted index access method. + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/generic_xlog.h" + +#include "rum.h" + +/* + * Locks buffer by needed method for search. + */ +static int +rumTraverseLock(Buffer buffer, bool searchMode) +{ + Page page; + int access = RUM_SHARE; + + LockBuffer(buffer, RUM_SHARE); + page = BufferGetPage(buffer); + if (RumPageIsLeaf(page)) + { + if (searchMode == FALSE) + { + /* we should relock our page */ + LockBuffer(buffer, RUM_UNLOCK); + LockBuffer(buffer, RUM_EXCLUSIVE); + + /* But root can become non-leaf during relock */ + if (!RumPageIsLeaf(page)) + { + /* restore old lock type (very rare) */ + LockBuffer(buffer, RUM_UNLOCK); + LockBuffer(buffer, RUM_SHARE); + } + else + access = RUM_EXCLUSIVE; + } + } + + return access; +} + +RumBtreeStack * +rumPrepareFindLeafPage(RumBtree btree, BlockNumber blkno) +{ + RumBtreeStack *stack = (RumBtreeStack *) palloc(sizeof(RumBtreeStack)); + + stack->blkno = blkno; + stack->buffer = ReadBuffer(btree->index, stack->blkno); + stack->parent = NULL; + stack->predictNumber = 1; + + rumTraverseLock(stack->buffer, btree->searchMode); + + return stack; +} + +/* + * Locates leaf page contained tuple + */ +RumBtreeStack * +rumReFindLeafPage(RumBtree btree, RumBtreeStack *stack) +{ + while (stack->parent) + { + RumBtreeStack *ptr; + Page page; + OffsetNumber maxoff; + + LockBuffer(stack->buffer, RUM_UNLOCK); + stack->parent->buffer = + ReleaseAndReadBuffer(stack->buffer, btree->index, stack->parent->blkno); + LockBuffer(stack->parent->buffer, RUM_SHARE); + + ptr = stack; + stack = stack->parent; + pfree(ptr); + + page = BufferGetPage(stack->buffer); + maxoff = RumPageGetOpaque(page)->maxoff; + + if (rumCompareItemPointers( + &(((PostingItem *)RumDataPageGetItem(page, maxoff - 1))->key), + btree->items + btree->curitem) >= 0) + { + break; + } + } + + stack = rumFindLeafPage(btree, stack); + return stack; +} + +/* + * Locates leaf page contained tuple + */ +RumBtreeStack * +rumFindLeafPage(RumBtree btree, RumBtreeStack *stack) +{ + bool isfirst = TRUE; + BlockNumber rootBlkno; + + if (!stack) + stack = rumPrepareFindLeafPage(btree, RUM_ROOT_BLKNO); + rootBlkno = stack->blkno; + + for (;;) + { + Page page; + BlockNumber child; + int access = RUM_SHARE; + + stack->off = InvalidOffsetNumber; + + page = BufferGetPage(stack->buffer); + + if (isfirst) + { + if (RumPageIsLeaf(page) && !btree->searchMode) + access = RUM_EXCLUSIVE; + isfirst = FALSE; + } + else + access = rumTraverseLock(stack->buffer, btree->searchMode); + + /* + * ok, page is correctly locked, we should check to move right .., + * root never has a right link, so small optimization + */ + while (btree->fullScan == FALSE && stack->blkno != rootBlkno && + btree->isMoveRight(btree, page)) + { + BlockNumber rightlink = RumPageGetOpaque(page)->rightlink; + + if (rightlink == InvalidBlockNumber) + /* rightmost page */ + break; + + stack->buffer = rumStepRight(stack->buffer, btree->index, access); + stack->blkno = rightlink; + page = BufferGetPage(stack->buffer); + } + + if (RumPageIsLeaf(page)) /* we found, return locked page */ + return stack; + + /* now we have correct buffer, try to find child */ + child = btree->findChildPage(btree, stack); + + LockBuffer(stack->buffer, RUM_UNLOCK); + Assert(child != InvalidBlockNumber); + Assert(stack->blkno != child); + + if (btree->searchMode) + { + /* in search mode we may forget path to leaf */ + RumBtreeStack *ptr = (RumBtreeStack *) palloc(sizeof(RumBtreeStack)); + Buffer buffer = ReleaseAndReadBuffer(stack->buffer, btree->index, child); + + ptr->parent = stack; + ptr->predictNumber = stack->predictNumber; + stack->buffer = InvalidBuffer; + + stack = ptr; + stack->blkno = child; + stack->buffer = buffer; + } + else + { + RumBtreeStack *ptr = (RumBtreeStack *) palloc(sizeof(RumBtreeStack)); + + ptr->parent = stack; + stack = ptr; + stack->blkno = child; + stack->buffer = ReadBuffer(btree->index, stack->blkno); + stack->predictNumber = 1; + } + } +} + +/* + * Step right from current page. + * + * The next page is locked first, before releasing the current page. This is + * crucial to protect from concurrent page deletion (see comment in + * rumDeletePage). + */ +Buffer +rumStepRight(Buffer buffer, Relation index, int lockmode) +{ + Buffer nextbuffer; + Page page = BufferGetPage(buffer); + bool isLeaf = RumPageIsLeaf(page); + bool isData = RumPageIsData(page); + BlockNumber blkno = RumPageGetOpaque(page)->rightlink; + + nextbuffer = ReadBuffer(index, blkno); + LockBuffer(nextbuffer, lockmode); + UnlockReleaseBuffer(buffer); + + /* Sanity check that the page we stepped to is of similar kind. */ + page = BufferGetPage(nextbuffer); + if (isLeaf != RumPageIsLeaf(page) || isData != RumPageIsData(page)) + elog(ERROR, "right sibling of RUM page is of different type"); + + /* + * Given the proper lock sequence above, we should never land on a + * deleted page. + */ + if (RumPageIsDeleted(page)) + elog(ERROR, "right sibling of RUM page was deleted"); + + return nextbuffer; +} + +void +freeRumBtreeStack(RumBtreeStack *stack) +{ + while (stack) + { + RumBtreeStack *tmp = stack->parent; + + if (stack->buffer != InvalidBuffer) + ReleaseBuffer(stack->buffer); + + pfree(stack); + stack = tmp; + } +} + +/* + * Try to find parent for current stack position, returns correct + * parent and child's offset in stack->parent. + * Function should never release root page to prevent conflicts + * with vacuum process + */ +void +rumFindParents(RumBtree btree, RumBtreeStack *stack, + BlockNumber rootBlkno) +{ + Page page; + Buffer buffer; + BlockNumber blkno, + leftmostBlkno; + OffsetNumber offset; + RumBtreeStack *root = stack->parent; + RumBtreeStack *ptr; + + if (!root) + { + /* XLog mode... */ + root = (RumBtreeStack *) palloc(sizeof(RumBtreeStack)); + root->blkno = rootBlkno; + root->buffer = ReadBuffer(btree->index, rootBlkno); + LockBuffer(root->buffer, RUM_EXCLUSIVE); + root->parent = NULL; + } + else + { + /* + * find root, we should not release root page until update is + * finished!! + */ + while (root->parent) + { + ReleaseBuffer(root->buffer); + root = root->parent; + } + + Assert(root->blkno == rootBlkno); + Assert(BufferGetBlockNumber(root->buffer) == rootBlkno); + LockBuffer(root->buffer, RUM_EXCLUSIVE); + } + root->off = InvalidOffsetNumber; + + page = BufferGetPage(root->buffer); + Assert(!RumPageIsLeaf(page)); + + /* check trivial case */ + if ((root->off = btree->findChildPtr(btree, page, stack->blkno, InvalidOffsetNumber)) != InvalidOffsetNumber) + { + stack->parent = root; + return; + } + + leftmostBlkno = blkno = btree->getLeftMostPage(btree, page); + LockBuffer(root->buffer, RUM_UNLOCK); + Assert(blkno != InvalidBlockNumber); + + for (;;) + { + buffer = ReadBuffer(btree->index, blkno); + LockBuffer(buffer, RUM_EXCLUSIVE); + page = BufferGetPage(buffer); + if (RumPageIsLeaf(page)) + elog(ERROR, "Lost path"); + + leftmostBlkno = btree->getLeftMostPage(btree, page); + + while ((offset = btree->findChildPtr(btree, page, stack->blkno, InvalidOffsetNumber)) == InvalidOffsetNumber) + { + blkno = RumPageGetOpaque(page)->rightlink; + if (blkno == InvalidBlockNumber) + { + UnlockReleaseBuffer(buffer); + break; + } + buffer = rumStepRight(buffer, btree->index, RUM_EXCLUSIVE); + page = BufferGetPage(buffer); + } + + if (blkno != InvalidBlockNumber) + { + ptr = (RumBtreeStack *) palloc(sizeof(RumBtreeStack)); + ptr->blkno = blkno; + ptr->buffer = buffer; + ptr->parent = root; /* it's may be wrong, but in next call we will + * correct */ + ptr->off = offset; + stack->parent = ptr; + return; + } + + blkno = leftmostBlkno; + } +} + +/* + * Insert value (stored in RumBtree) to tree described by stack + * + * During an index build, buildStats is non-null and the counters + * it contains should be incremented as needed. + * + * NB: the passed-in stack is freed, as though by freeRumBtreeStack. + */ +void +rumInsertValue(Relation index, RumBtree btree, RumBtreeStack *stack, + GinStatsData *buildStats) +{ + RumBtreeStack *parent; + BlockNumber rootBlkno; + Page page, + rpage, + lpage; + GenericXLogState *state; + + /* extract root BlockNumber from stack */ + Assert(stack != NULL); + parent = stack; + while (parent->parent) + parent = parent->parent; + rootBlkno = parent->blkno; + Assert(BlockNumberIsValid(rootBlkno)); + + /* this loop crawls up the stack until the insertion is complete */ + for (;;) + { + BlockNumber savedRightLink; + + page = BufferGetPage(stack->buffer); + savedRightLink = RumPageGetOpaque(page)->rightlink; + + if (btree->isEnoughSpace(btree, stack->buffer, stack->off)) + { + state = GenericXLogStart(index); + page = GenericXLogRegisterBuffer(state, stack->buffer, 0); + + btree->placeToPage(btree, page, stack->off); + GenericXLogFinish(state); + + LockBuffer(stack->buffer, RUM_UNLOCK); + freeRumBtreeStack(stack); + + return; + } + else + { + Buffer rbuffer = RumNewBuffer(btree->index); + Page newlpage; + + /* During index build, count the newly-split page */ + if (buildStats) + { + if (btree->isData) + buildStats->nDataPages++; + else + buildStats->nEntryPages++; + } + + parent = stack->parent; + + if (parent == NULL) + { + Buffer lbuffer; + + state = GenericXLogStart(index); + + page = GenericXLogRegisterBuffer(state, stack->buffer, 0); + rpage = GenericXLogRegisterBuffer(state, rbuffer, 0); + + /* + * newlpage is a pointer to memory page, it doesn't associate with + * buffer, stack->buffer should be untouched + */ + newlpage = btree->splitPage(btree, stack->buffer, rbuffer, + page, rpage, stack->off); + + /* + * split root, so we need to allocate new left page and place + * pointer on root to left and right page + */ + lbuffer = RumNewBuffer(btree->index); + lpage = GenericXLogRegisterBuffer(state, lbuffer, 0); + + RumPageGetOpaque(rpage)->rightlink = InvalidBlockNumber; + RumPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer); + + RumInitPage(page, RumPageGetOpaque(newlpage)->flags & ~RUM_LEAF, + BufferGetPageSize(stack->buffer)); + PageRestoreTempPage(newlpage, lpage); + btree->fillRoot(btree, stack->buffer, lbuffer, rbuffer, + page, lpage, rpage); + + GenericXLogFinish(state); + + UnlockReleaseBuffer(rbuffer); + UnlockReleaseBuffer(lbuffer); + LockBuffer(stack->buffer, RUM_UNLOCK); + + freeRumBtreeStack(stack); + + /* During index build, count the newly-added root page */ + if (buildStats) + { + if (btree->isData) + buildStats->nDataPages++; + else + buildStats->nEntryPages++; + } + + return; + } + else + { + /* split non-root page */ + + state = GenericXLogStart(index); + + lpage = GenericXLogRegisterBuffer(state, stack->buffer, 0); + rpage = GenericXLogRegisterBuffer(state, rbuffer, 0); + + /* + * newlpage is a pointer to memory page, it doesn't associate with + * buffer, stack->buffer should be untouched + */ + newlpage = btree->splitPage(btree, stack->buffer, rbuffer, + lpage, rpage, stack->off); + + RumPageGetOpaque(rpage)->rightlink = savedRightLink; + RumPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer); + + PageRestoreTempPage(newlpage, lpage); + + GenericXLogFinish(state); + + UnlockReleaseBuffer(rbuffer); + } + } + + btree->isDelete = FALSE; + + /* search parent to lock */ + LockBuffer(parent->buffer, RUM_EXCLUSIVE); + + /* move right if it's needed */ + page = BufferGetPage(parent->buffer); + while ((parent->off = btree->findChildPtr(btree, page, stack->blkno, parent->off)) == InvalidOffsetNumber) + { + BlockNumber rightlink = RumPageGetOpaque(page)->rightlink; + + if (rightlink == InvalidBlockNumber) + { + /* + * rightmost page, but we don't find parent, we should use + * plain search... + */ + LockBuffer(parent->buffer, RUM_UNLOCK); + rumFindParents(btree, stack, rootBlkno); + parent = stack->parent; + Assert(parent != NULL); + break; + } + + parent->buffer = rumStepRight(parent->buffer, btree->index, RUM_EXCLUSIVE); + parent->blkno = rightlink; + page = BufferGetPage(parent->buffer); + } + + UnlockReleaseBuffer(stack->buffer); + pfree(stack); + stack = parent; + } +} diff --git a/rumbulk.c b/rumbulk.c new file mode 100644 index 0000000000..e93a760fa8 --- /dev/null +++ b/rumbulk.c @@ -0,0 +1,291 @@ +/*------------------------------------------------------------------------- + * + * rumbulk.c + * routines for fast build of inverted index + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "utils/datum.h" +#include "utils/memutils.h" + +#include "rum.h" + +#define DEF_NENTRY 2048 /* RumEntryAccumulator allocation quantum */ +#define DEF_NPTR 5 /* ItemPointer initial allocation quantum */ + + +/* Combiner function for rbtree.c */ +static void +rumCombineData(RBNode *existing, const RBNode *newdata, void *arg) +{ + RumEntryAccumulator *eo = (RumEntryAccumulator *) existing; + const RumEntryAccumulator *en = (const RumEntryAccumulator *) newdata; + BuildAccumulator *accum = (BuildAccumulator *) arg; + + /* + * Note this code assumes that newdata contains only one itempointer. + */ + if (eo->count >= eo->maxcount) + { + accum->allocatedMemory -= GetMemoryChunkSpace(eo->list); + eo->maxcount *= 2; + eo->list = (RumEntryAccumulatorItem *) + repalloc(eo->list, sizeof(RumEntryAccumulatorItem) * eo->maxcount); + accum->allocatedMemory += GetMemoryChunkSpace(eo->list); + } + + /* If item pointers are not ordered, they will need to be sorted later */ + if (eo->shouldSort == FALSE) + { + int res; + + res = rumCompareItemPointers(&eo->list[eo->count - 1].iptr, + &en->list->iptr); + Assert(res != 0); + + if (res > 0) + eo->shouldSort = TRUE; + } + + eo->list[eo->count] = en->list[0]; + eo->count++; +} + +/* Comparator function for rbtree.c */ +static int +cmpEntryAccumulator(const RBNode *a, const RBNode *b, void *arg) +{ + const RumEntryAccumulator *ea = (const RumEntryAccumulator *) a; + const RumEntryAccumulator *eb = (const RumEntryAccumulator *) b; + BuildAccumulator *accum = (BuildAccumulator *) arg; + + return rumCompareAttEntries(accum->rumstate, + ea->attnum, ea->key, ea->category, + eb->attnum, eb->key, eb->category); +} + +/* Allocator function for rbtree.c */ +static RBNode * +rumAllocEntryAccumulator(void *arg) +{ + BuildAccumulator *accum = (BuildAccumulator *) arg; + RumEntryAccumulator *ea; + + /* + * Allocate memory by rather big chunks to decrease overhead. We have no + * need to reclaim RBNodes individually, so this costs nothing. + */ + if (accum->entryallocator == NULL || accum->eas_used >= DEF_NENTRY) + { + accum->entryallocator = palloc(sizeof(RumEntryAccumulator) * DEF_NENTRY); + accum->allocatedMemory += GetMemoryChunkSpace(accum->entryallocator); + accum->eas_used = 0; + } + + /* Allocate new RBNode from current chunk */ + ea = accum->entryallocator + accum->eas_used; + accum->eas_used++; + + return (RBNode *) ea; +} + +void +rumInitBA(BuildAccumulator *accum) +{ + /* accum->rumstate is intentionally not set here */ + accum->allocatedMemory = 0; + accum->entryallocator = NULL; + accum->eas_used = 0; + accum->tree = rb_create(sizeof(RumEntryAccumulator), + cmpEntryAccumulator, + rumCombineData, + rumAllocEntryAccumulator, + NULL, /* no freefunc needed */ + (void *) accum); +} + +/* + * This is basically the same as datumCopy(), but extended to count + * palloc'd space in accum->allocatedMemory. + */ +static Datum +getDatumCopy(BuildAccumulator *accum, OffsetNumber attnum, Datum value) +{ + Form_pg_attribute att = accum->rumstate->origTupdesc->attrs[attnum - 1]; + Datum res; + + if (att->attbyval) + res = value; + else + { + res = datumCopy(value, false, att->attlen); + accum->allocatedMemory += GetMemoryChunkSpace(DatumGetPointer(res)); + } + return res; +} + +/* + * Find/store one entry from indexed value. + */ +static void +rumInsertBAEntry(BuildAccumulator *accum, + ItemPointer heapptr, OffsetNumber attnum, + Datum key, Datum addInfo, bool addInfoIsNull, + RumNullCategory category) +{ + RumEntryAccumulator eatmp; + RumEntryAccumulator *ea; + bool isNew; + RumEntryAccumulatorItem item; + + /* + * For the moment, fill only the fields of eatmp that will be looked at by + * cmpEntryAccumulator or rumCombineData. + */ + eatmp.attnum = attnum; + eatmp.key = key; + eatmp.category = category; + /* temporarily set up single-entry itempointer list */ + eatmp.list = &item; + item.iptr = *heapptr; + item.addInfo = addInfo; + item.addInfoIsNull = addInfoIsNull; + + ea = (RumEntryAccumulator *) rb_insert(accum->tree, (RBNode *) &eatmp, + &isNew); + + if (isNew) + { + /* + * Finish initializing new tree entry, including making permanent + * copies of the datum (if it's not null) and itempointer. + */ + if (category == RUM_CAT_NORM_KEY) + ea->key = getDatumCopy(accum, attnum, key); + ea->maxcount = DEF_NPTR; + ea->count = 1; + ea->shouldSort = FALSE; + ea->list = + (RumEntryAccumulatorItem *) palloc(sizeof(RumEntryAccumulatorItem) * DEF_NPTR); + ea->list[0].iptr = *heapptr; + ea->list[0].addInfo = addInfo; + ea->list[0].addInfoIsNull = addInfoIsNull; + accum->allocatedMemory += GetMemoryChunkSpace(ea->list); + } + else + { + /* + * rumCombineData did everything needed. + */ + } +} + +/* + * Insert the entries for one heap pointer. + * + * Since the entries are being inserted into a balanced binary tree, you + * might think that the order of insertion wouldn't be critical, but it turns + * out that inserting the entries in sorted order results in a lot of + * rebalancing operations and is slow. To prevent this, we attempt to insert + * the nodes in an order that will produce a nearly-balanced tree if the input + * is in fact sorted. + * + * We do this as follows. First, we imagine that we have an array whose size + * is the smallest power of two greater than or equal to the actual array + * size. Second, we insert the middle entry of our virtual array into the + * tree; then, we insert the middles of each half of our virtual array, then + * middles of quarters, etc. + */ +void +rumInsertBAEntries(BuildAccumulator *accum, + ItemPointer heapptr, OffsetNumber attnum, + Datum *entries, Datum *addInfo, bool *addInfoIsNull, + RumNullCategory *categories, int32 nentries) +{ + uint32 step = nentries; + + if (nentries <= 0) + return; + + Assert(ItemPointerIsValid(heapptr) && attnum >= FirstOffsetNumber); + + /* + * step will contain largest power of 2 and <= nentries + */ + step |= (step >> 1); + step |= (step >> 2); + step |= (step >> 4); + step |= (step >> 8); + step |= (step >> 16); + step >>= 1; + step++; + + while (step > 0) + { + int i; + + for (i = step - 1; i < nentries && i >= 0; i += step << 1 /* *2 */ ) + rumInsertBAEntry(accum, heapptr, attnum, + entries[i], addInfo[i], addInfoIsNull[i], categories[i]); + + step >>= 1; /* /2 */ + } +} + +static int +qsortCompareItemPointers(const void *a, const void *b) +{ + int res = rumCompareItemPointers((ItemPointer) a, (ItemPointer) b); + + /* Assert that there are no equal item pointers being sorted */ + Assert(res != 0); + return res; +} + +/* Prepare to read out the rbtree contents using rumGetBAEntry */ +void +rumBeginBAScan(BuildAccumulator *accum) +{ + rb_begin_iterate(accum->tree, LeftRightWalk); +} + +/* + * Get the next entry in sequence from the BuildAccumulator's rbtree. + * This consists of a single key datum and a list (array) of one or more + * heap TIDs in which that key is found. The list is guaranteed sorted. + */ +RumEntryAccumulatorItem * +rumGetBAEntry(BuildAccumulator *accum, + OffsetNumber *attnum, Datum *key, RumNullCategory *category, + uint32 *n) +{ + RumEntryAccumulator *entry; + RumEntryAccumulatorItem *list; + + entry = (RumEntryAccumulator *) rb_iterate(accum->tree); + + if (entry == NULL) + return NULL; /* no more entries */ + + *attnum = entry->attnum; + *key = entry->key; + *category = entry->category; + list = entry->list; + *n = entry->count; + + Assert(list != NULL && entry->count > 0); + + if (entry->shouldSort && entry->count > 1) + qsort(list, entry->count, sizeof(RumEntryAccumulatorItem), + qsortCompareItemPointers); + + return list; +} diff --git a/rumdatapage.c b/rumdatapage.c new file mode 100644 index 0000000000..2a4f97bfb2 --- /dev/null +++ b/rumdatapage.c @@ -0,0 +1,1318 @@ +/*------------------------------------------------------------------------- + * + * rumdatapage.c + * page utilities routines for the postgres inverted index access method. + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "rum.h" + +/* Does datatype allow packing into the 1-byte-header varlena format? */ +#define TYPE_IS_PACKABLE(typlen, typstorage) \ + ((typlen) == -1 && (typstorage) != 'p') + +/* + * Increment data_length by the space needed by the datum, including any + * preceding alignment padding. + */ +static Size +rumComputeDatumSize(Size data_length, Datum val, bool typbyval, char typalign, + int16 typlen, char typstorage) +{ + if (TYPE_IS_PACKABLE(typlen, typstorage) && + VARATT_CAN_MAKE_SHORT(DatumGetPointer(val))) + { + /* + * we're anticipating converting to a short varlena header, so adjust + * length and don't count any alignment + */ + data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val)); + } + else + { + data_length = att_align_datum(data_length, typalign, typlen, val); + data_length = att_addlength_datum(data_length, typlen, val); + } + + return data_length; +} + +/* + * Write the given datum beginning at ptr (after advancing to correct + * alignment, if needed). Setting padding bytes to zero if needed. Return the + * pointer incremented by space used. + */ +static Pointer +rumDatumWrite(Pointer ptr, Datum datum, bool typbyval, char typalign, + int16 typlen, char typstorage) +{ + Size data_length; + Pointer prev_ptr = ptr; + + if (typbyval) + { + /* pass-by-value */ + ptr = (char *) att_align_nominal(ptr, typalign); + store_att_byval(ptr, datum, typlen); + data_length = typlen; + } + else if (typlen == -1) + { + /* varlena */ + Pointer val = DatumGetPointer(datum); + + if (VARATT_IS_EXTERNAL(val)) + { + /* + * Throw error, because we must never put a toast pointer inside a + * range object. Caller should have detoasted it. + */ + elog(ERROR, "cannot store a toast pointer inside a range"); + data_length = 0; /* keep compiler quiet */ + } + else if (VARATT_IS_SHORT(val)) + { + /* no alignment for short varlenas */ + data_length = VARSIZE_SHORT(val); + memmove(ptr, val, data_length); + } + else if (TYPE_IS_PACKABLE(typlen, typstorage) && + VARATT_CAN_MAKE_SHORT(val)) + { + /* convert to short varlena -- no alignment */ + data_length = VARATT_CONVERTED_SHORT_SIZE(val); + SET_VARSIZE_SHORT(ptr, data_length); + memmove(ptr + 1, VARDATA(val), data_length - 1); + } + else + { + /* full 4-byte header varlena */ + ptr = (char *) att_align_nominal(ptr, typalign); + data_length = VARSIZE(val); + memmove(ptr, val, data_length); + } + } + else if (typlen == -2) + { + /* cstring ... never needs alignment */ + Assert(typalign == 'c'); + data_length = strlen(DatumGetCString(datum)) + 1; + memmove(ptr, DatumGetPointer(datum), data_length); + } + else + { + /* fixed-length pass-by-reference */ + ptr = (char *) att_align_nominal(ptr, typalign); + Assert(typlen > 0); + data_length = typlen; + memmove(ptr, DatumGetPointer(datum), data_length); + } + + if (ptr != prev_ptr) + memset(prev_ptr, 0, ptr - prev_ptr); + ptr += data_length; + + return ptr; +} + +/* + * Write item pointer into leaf data page using varbyte encoding. Since + * BlockNumber is stored in incremental manner we also need a previous item + * pointer. Also store addInfoIsNull flag using one bit of OffsetNumber. + */ +char * +rumDataPageLeafWriteItemPointer(char *ptr, ItemPointer iptr, ItemPointer prev, + bool addInfoIsNull) +{ + uint32 blockNumberIncr = 0; + uint16 offset = iptr->ip_posid; + + Assert(rumCompareItemPointers(iptr, prev) > 0); + Assert(OffsetNumberIsValid(iptr->ip_posid)); + + blockNumberIncr = iptr->ip_blkid.bi_lo + (iptr->ip_blkid.bi_hi << 16) - + (prev->ip_blkid.bi_lo + (prev->ip_blkid.bi_hi << 16)); + + + while (true) + { + *ptr = (blockNumberIncr & (~HIGHBIT)) | + ((blockNumberIncr >= HIGHBIT) ? HIGHBIT : 0); + ptr++; + if (blockNumberIncr < HIGHBIT) + break; + blockNumberIncr >>= 7; + } + + while (true) + { + if (offset >= SEVENTHBIT) + { + *ptr = (offset & (~HIGHBIT)) | HIGHBIT; + ptr++; + offset >>= 7; + } + else + { + *ptr = offset | (addInfoIsNull ? SEVENTHBIT : 0); + ptr++; + break; + } + } + + return ptr; +} + +/** + * Place item pointer with additional information into leaf data page. + */ +Pointer +rumPlaceToDataPageLeaf(Pointer ptr, OffsetNumber attnum, + ItemPointer iptr, Datum addInfo, bool addInfoIsNull, ItemPointer prev, + RumState *rumstate) +{ + Form_pg_attribute attr; + + ptr = rumDataPageLeafWriteItemPointer(ptr, iptr, prev, addInfoIsNull); + + if (!addInfoIsNull) + { + attr = rumstate->addAttrs[attnum - 1]; + ptr = rumDatumWrite(ptr, addInfo, attr->attbyval, attr->attalign, + attr->attlen, attr->attstorage); + } + return ptr; +} + +/* + * Calculate size of incremental varbyte encoding of item pointer. + */ +static int +rumDataPageLeafGetItemPointerSize(ItemPointer iptr, ItemPointer prev) +{ + uint32 blockNumberIncr = 0; + uint16 offset = iptr->ip_posid; + int size = 0; + + blockNumberIncr = iptr->ip_blkid.bi_lo + (iptr->ip_blkid.bi_hi << 16) - + (prev->ip_blkid.bi_lo + (prev->ip_blkid.bi_hi << 16)); + + + while (true) + { + size++; + if (blockNumberIncr < HIGHBIT) + break; + blockNumberIncr >>= 7; + } + + while (true) + { + size++; + if (offset < SEVENTHBIT) + break; + offset >>= 7; + } + + return size; +} + +/* + * Returns size of item pointers with additional information if leaf data page + * after inserting another one. + */ +Size +rumCheckPlaceToDataPageLeaf(OffsetNumber attnum, + ItemPointer iptr, Datum addInfo, bool addInfoIsNull, ItemPointer prev, + RumState *rumstate, Size size) +{ + Form_pg_attribute attr; + + size += rumDataPageLeafGetItemPointerSize(iptr, prev); + + if (!addInfoIsNull) + { + attr = rumstate->addAttrs[attnum - 1]; + size = rumComputeDatumSize(size, addInfo, attr->attbyval, + attr->attalign, attr->attlen, attr->attstorage); + } + + return size; +} + +int +rumCompareItemPointers(ItemPointer a, ItemPointer b) +{ + BlockNumber ba = RumItemPointerGetBlockNumber(a); + BlockNumber bb = RumItemPointerGetBlockNumber(b); + + if (ba == bb) + { + OffsetNumber oa = RumItemPointerGetOffsetNumber(a); + OffsetNumber ob = RumItemPointerGetOffsetNumber(b); + + if (oa == ob) + return 0; + return (oa > ob) ? 1 : -1; + } + + return (ba > bb) ? 1 : -1; +} + +/* + * Merge two ordered arrays of itempointers, eliminating any duplicates. + * Returns the number of items in the result. + * Caller is responsible that there is enough space at *dst. + */ +uint32 +rumMergeItemPointers(ItemPointerData *dst, Datum *dstAddInfo, bool *dstAddInfoIsNull, + ItemPointerData *a, Datum *aAddInfo, bool *aAddInfoIsNull, uint32 na, + ItemPointerData *b, Datum *bAddInfo, bool *bAddInfoIsNull, uint32 nb) +{ + ItemPointerData *dptr = dst; + ItemPointerData *aptr = a, + *bptr = b; + + while (aptr - a < na && bptr - b < nb) + { + int cmp = rumCompareItemPointers(aptr, bptr); + + if (cmp > 0) + { + *dptr++ = *bptr++; + *dstAddInfo++ = *bAddInfo++; + *dstAddInfoIsNull++ = *bAddInfoIsNull++; + } + else if (cmp == 0) + { + /* we want only one copy of the identical items */ + *dptr++ = *bptr++; + *dstAddInfo++ = *bAddInfo++; + *dstAddInfoIsNull++ = *bAddInfoIsNull++; + aptr++; + aAddInfo++; + aAddInfoIsNull++; + } + else + { + *dptr++ = *aptr++; + *dstAddInfo++ = *aAddInfo++; + *dstAddInfoIsNull++ = *aAddInfoIsNull++; + } + } + + while (aptr - a < na) + { + *dptr++ = *aptr++; + *dstAddInfo++ = *aAddInfo++; + *dstAddInfoIsNull++ = *aAddInfoIsNull++; + } + + while (bptr - b < nb) + { + *dptr++ = *bptr++; + *dstAddInfo++ = *bAddInfo++; + *dstAddInfoIsNull++ = *bAddInfoIsNull++; + } + + return dptr - dst; +} + +/* + * Checks, should we move to right link... + * Compares inserting itemp pointer with right bound of current page + */ +static bool +dataIsMoveRight(RumBtree btree, Page page) +{ + ItemPointer iptr = RumDataPageGetRightBound(page); + + if (RumPageRightMost(page)) + return FALSE; + + return (rumCompareItemPointers(btree->items + btree->curitem, iptr) > 0) ? TRUE : FALSE; +} + +/* + * Find correct PostingItem in non-leaf page. It supposed that page + * correctly chosen and searching value SHOULD be on page + */ +static BlockNumber +dataLocateItem(RumBtree btree, RumBtreeStack *stack) +{ + OffsetNumber low, + high, + maxoff; + PostingItem *pitem = NULL; + int result; + Page page = BufferGetPage(stack->buffer); + + Assert(!RumPageIsLeaf(page)); + Assert(RumPageIsData(page)); + + if (btree->fullScan) + { + stack->off = FirstOffsetNumber; + stack->predictNumber *= RumPageGetOpaque(page)->maxoff; + return btree->getLeftMostPage(btree, page); + } + + low = FirstOffsetNumber; + maxoff = high = RumPageGetOpaque(page)->maxoff; + Assert(high >= low); + + high++; + + while (high > low) + { + OffsetNumber mid = low + ((high - low) / 2); + + pitem = (PostingItem *) RumDataPageGetItem(page, mid); + + if (mid == maxoff) + { + /* + * Right infinity, page already correctly chosen with a help of + * dataIsMoveRight + */ + result = -1; + } + else + { + pitem = (PostingItem *) RumDataPageGetItem(page, mid); + result = rumCompareItemPointers(btree->items + btree->curitem, &(pitem->key)); + } + + if (result == 0) + { + stack->off = mid; + return PostingItemGetBlockNumber(pitem); + } + else if (result > 0) + low = mid + 1; + else + high = mid; + } + + Assert(high >= FirstOffsetNumber && high <= maxoff); + + stack->off = high; + pitem = (PostingItem *) RumDataPageGetItem(page, high); + return PostingItemGetBlockNumber(pitem); +} + +/** + * Find item pointer in leaf data page. Returns true if given item pointer is + * found and false if it's not. Sets offset and iptrOut to last item pointer + * which is less than given one. Sets ptrOut ahead that item pointer. + */ +static bool +findInLeafPage(RumBtree btree, Page page, OffsetNumber *offset, + ItemPointerData *iptrOut, Pointer *ptrOut) +{ + Pointer ptr = RumDataPageGetData(page); + OffsetNumber i, maxoff, first = FirstOffsetNumber; + ItemPointerData iptr = {{0,0},0}; + int cmp; + + maxoff = RumPageGetOpaque(page)->maxoff; + + /* + * At first, search index at the end of page. As the result we narrow + * [first, maxoff] range. + */ + for (i = 0; i < RumDataLeafIndexCount; i++) + { + RumDataLeafItemIndex *index = &RumPageGetIndexes(page)[i]; + if (index->offsetNumer == InvalidOffsetNumber) + break; + + cmp = rumCompareItemPointers(&index->iptr, btree->items + btree->curitem); + if (cmp < 0) + { + ptr = RumDataPageGetData(page) + index->pageOffset; + first = index->offsetNumer; + iptr = index->iptr; + } + else + { + maxoff = index->offsetNumer - 1; + break; + } + } + + /* Search page in [first, maxoff] range found by page index */ + for (i = first; i <= maxoff; i++) + { + *ptrOut = ptr; + *iptrOut = iptr; + ptr = rumDataPageLeafRead(ptr, btree->entryAttnum, &iptr, + NULL, NULL, btree->rumstate); + + cmp = rumCompareItemPointers(btree->items + btree->curitem, &iptr); + if (cmp == 0) + { + *offset = i; + return true; + } + if (cmp < 0) + { + *offset = i; + return false; + } + } + + *ptrOut = ptr; + *iptrOut = iptr; + *offset = RumPageGetOpaque(page)->maxoff + 1; + return false; +} + + +/* + * Searches correct position for value on leaf page. + * Page should be correctly chosen. + * Returns true if value found on page. + */ +static bool +dataLocateLeafItem(RumBtree btree, RumBtreeStack *stack) +{ + Page page = BufferGetPage(stack->buffer); + ItemPointerData iptr; + Pointer ptr; + + Assert(RumPageIsLeaf(page)); + Assert(RumPageIsData(page)); + + if (btree->fullScan) + { + stack->off = FirstOffsetNumber; + return TRUE; + } + + return findInLeafPage(btree, page, &stack->off, &iptr, &ptr); + +} + +/* + * Finds links to blkno on non-leaf page, returns + * offset of PostingItem + */ +static OffsetNumber +dataFindChildPtr(RumBtree btree, Page page, BlockNumber blkno, OffsetNumber storedOff) +{ + OffsetNumber i, + maxoff = RumPageGetOpaque(page)->maxoff; + PostingItem *pitem; + + Assert(!RumPageIsLeaf(page)); + Assert(RumPageIsData(page)); + + /* if page isn't changed, we return storedOff */ + if (storedOff >= FirstOffsetNumber && storedOff <= maxoff) + { + pitem = (PostingItem *) RumDataPageGetItem(page, storedOff); + if (PostingItemGetBlockNumber(pitem) == blkno) + return storedOff; + + /* + * we hope, that needed pointer goes to right. It's true if there + * wasn't a deletion + */ + for (i = storedOff + 1; i <= maxoff; i++) + { + pitem = (PostingItem *) RumDataPageGetItem(page, i); + if (PostingItemGetBlockNumber(pitem) == blkno) + return i; + } + + maxoff = storedOff - 1; + } + + /* last chance */ + for (i = FirstOffsetNumber; i <= maxoff; i++) + { + pitem = (PostingItem *) RumDataPageGetItem(page, i); + if (PostingItemGetBlockNumber(pitem) == blkno) + return i; + } + + return InvalidOffsetNumber; +} + +/* + * returns blkno of leftmost child + */ +static BlockNumber +dataGetLeftMostPage(RumBtree btree, Page page) +{ + PostingItem *pitem; + + Assert(!RumPageIsLeaf(page)); + Assert(RumPageIsData(page)); + Assert(RumPageGetOpaque(page)->maxoff >= FirstOffsetNumber); + + pitem = (PostingItem *) RumDataPageGetItem(page, FirstOffsetNumber); + return PostingItemGetBlockNumber(pitem); +} + +/* + * add ItemPointer or PostingItem to page. data should point to + * correct value! depending on leaf or non-leaf page + */ +void +RumDataPageAddItem(Page page, void *data, OffsetNumber offset) +{ + OffsetNumber maxoff = RumPageGetOpaque(page)->maxoff; + char *ptr, + *nextptr; + size_t size = RumSizeOfDataPageItem(page); + + if (offset == InvalidOffsetNumber) + { + ptr = RumDataPageGetItem(page, maxoff + 1); + nextptr = ptr + size + 1; + } + else + { + ptr = RumDataPageGetItem(page, offset); + if (maxoff + 1 - offset != 0) + { + memmove(ptr + size, + ptr, + (maxoff - offset + 1) * size); + nextptr = ptr + size + (maxoff - offset + 1) * size + 1; + } + else + nextptr = ptr + size + 1; + } + memcpy(ptr, data, size); + + RumPageGetOpaque(page)->maxoff++; + /* Adjust pd_lower */ + ((PageHeader) page)->pd_lower = nextptr - page; +} + +/* + * Deletes posting item from non-leaf page + */ +void +RumPageDeletePostingItem(Page page, OffsetNumber offset) +{ + OffsetNumber maxoff = RumPageGetOpaque(page)->maxoff; + + Assert(!RumPageIsLeaf(page)); + Assert(offset >= FirstOffsetNumber && offset <= maxoff); + + if (offset != maxoff) + { + char *dstptr = RumDataPageGetItem(page, offset), + *sourceptr = RumDataPageGetItem(page, offset + 1); + memmove(dstptr, sourceptr, sizeof(PostingItem) * (maxoff - offset)); + /* Adjust pd_lower */ + ((PageHeader) page)->pd_lower = sourceptr - page; + } + + RumPageGetOpaque(page)->maxoff--; +} + +/* + * checks space to install new value, + * item pointer never deletes! + */ +static bool +dataIsEnoughSpace(RumBtree btree, Buffer buf, OffsetNumber off) +{ + Page page = BufferGetPage(buf); + + Assert(RumPageIsData(page)); + Assert(!btree->isDelete); + + if (RumPageIsLeaf(page)) + { + int n, j; + ItemPointerData iptr = {{0,0},0}; + Size size = 0; + + /* + * Calculate additional size using worst case assumption: varbyte + * encoding from zero item pointer. Also use worst case assumption about + * alignment. + */ + n = RumPageGetOpaque(page)->maxoff; + + if (RumPageRightMost(page) && off > n) + { + for (j = btree->curitem; j < btree->nitem; j++) + { + size = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, + &btree->items[j], btree->addInfo[j], btree->addInfoIsNull[j], + (j == btree->curitem) ? (&iptr) : &btree->items[j - 1], + btree->rumstate, size); + } + } + else + { + j = btree->curitem; + size = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, + &btree->items[j], btree->addInfo[j], btree->addInfoIsNull[j], + &iptr, btree->rumstate, size); + } + size += MAXIMUM_ALIGNOF; + + if (RumPageGetOpaque(page)->freespace >= size) + return true; + + } + else if (sizeof(PostingItem) <= RumDataPageGetFreeSpace(page)) + return true; + + return false; +} + +/* + * In case of previous split update old child blkno to + * new right page + * item pointer never deletes! + */ +static BlockNumber +dataPrepareData(RumBtree btree, Page page, OffsetNumber off) +{ + BlockNumber ret = InvalidBlockNumber; + + Assert(RumPageIsData(page)); + + if (!RumPageIsLeaf(page) && btree->rightblkno != InvalidBlockNumber) + { + PostingItem *pitem = (PostingItem *) RumDataPageGetItem(page, off); + + PostingItemSetBlockNumber(pitem, btree->rightblkno); + ret = btree->rightblkno; + } + + btree->rightblkno = InvalidBlockNumber; + + return ret; +} + +/* + * Places keys to page and fills WAL record. In case leaf page and + * build mode puts all ItemPointers to page. + */ +static void +dataPlaceToPage(RumBtree btree, Page page, OffsetNumber off) +{ + Assert(RumPageIsData(page)); + + dataPrepareData(btree, page, off); + + if (RumPageIsLeaf(page)) + { + int i = 0, j, max_j; + Pointer ptr = RumDataPageGetData(page), + copy_ptr = NULL; + ItemPointerData iptr = {{0,0},0}, copy_iptr; + char pageCopy[BLCKSZ]; + Datum addInfo = 0; + bool addInfoIsNull = false; + int maxoff = RumPageGetOpaque(page)->maxoff; + int freespace; + + /* + * We're going to prevent var-byte re-encoding of whole page. + * Find position in page using page indexes. + */ + findInLeafPage(btree, page, &off, &iptr, &ptr); + + freespace = RumDataPageFreeSpacePre(page,ptr); + Assert(freespace >= 0); + + if (off <= maxoff) + { + /* + * Read next item-pointer with additional information: we'll have + * to re-encode it. Copy previous part of page. + */ + memcpy(pageCopy, page, BLCKSZ); + copy_ptr = pageCopy + (ptr - page); + copy_iptr = iptr; + } + + /* Check how many items we're going to add */ + if (RumPageRightMost(page) && off > maxoff) + max_j = btree->nitem; + else + max_j = btree->curitem + 1; + + /* Place items to the page while we have enough of space */ + i = 0; + for (j = btree->curitem; j < max_j; j++) + { + Pointer ptr2; + + ptr2 = page + rumCheckPlaceToDataPageLeaf(btree->entryAttnum, + &btree->items[j], btree->addInfo[j], btree->addInfoIsNull[j], + &iptr, btree->rumstate, ptr - page); + + freespace = RumDataPageFreeSpacePre(page, ptr2); + if (freespace < 0) + break; + + ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, + &btree->items[j], btree->addInfo[j], btree->addInfoIsNull[j], + &iptr, btree->rumstate); + freespace = RumDataPageFreeSpacePre(page,ptr); + Assert(freespace >= 0); + + iptr = btree->items[j]; + btree->curitem++; + i++; + } + + /* Place rest of the page back */ + if (off <= maxoff) + { + for (j = off; j <= maxoff; j++) + { + copy_ptr = rumDataPageLeafRead(copy_ptr, btree->entryAttnum, + ©_iptr, &addInfo, &addInfoIsNull, btree->rumstate); + ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, + ©_iptr, addInfo, addInfoIsNull, + &iptr, btree->rumstate); + + freespace = RumDataPageFreeSpacePre(page,ptr); + Assert(freespace >= 0); + + iptr = copy_iptr; + } + } + + RumPageGetOpaque(page)->maxoff += i; + + freespace = RumDataPageFreeSpacePre(page,ptr); + if (freespace < 0) + elog(ERROR, "Not enough of space in leaf page!"); + + /* Update indexes in the end of page */ + updateItemIndexes(page, btree->entryAttnum, btree->rumstate); + } + else + { + RumDataPageAddItem(page, &(btree->pitem), off); + } +} + +/* Macro for leaf data page split: switch to right page if needed. */ +#define CHECK_SWITCH_TO_RPAGE \ + do { \ + if (ptr - RumDataPageGetData(page) > \ + totalsize / 2 && page == newlPage) \ + { \ + maxLeftIptr = iptr; \ + prevIptr.ip_blkid.bi_hi = 0; \ + prevIptr.ip_blkid.bi_lo = 0; \ + prevIptr.ip_posid = 0; \ + RumPageGetOpaque(newlPage)->maxoff = j; \ + page = rPage; \ + ptr = RumDataPageGetData(rPage); \ + j = FirstOffsetNumber; \ + } \ + else \ + { \ + j++; \ + } \ + } while (0) + + + +/* + * Place tuple and split page, original buffer(lbuf) leaves untouched, + * returns shadow page of lbuf filled new data. + * Item pointers with additional information are distributed between pages by + * equal size on its, not an equal number! + */ +static Page +dataSplitPageLeaf(RumBtree btree, Buffer lbuf, Buffer rbuf, + Page lPage, Page rPage, OffsetNumber off) +{ + OffsetNumber i, j, + maxoff; + Size totalsize = 0, prevTotalsize; + Pointer ptr, copyPtr; + Page page; + Page newlPage = PageGetTempPageCopy(lPage); + Size pageSize = PageGetPageSize(newlPage); + Size maxItemSize = 0; + Datum addInfo = 0; + bool addInfoIsNull; + ItemPointerData iptr, prevIptr, maxLeftIptr; + int totalCount = 0; + int maxItemIndex = btree->curitem; + int freespace; + + static char lpageCopy[BLCKSZ]; + + dataPrepareData(btree, newlPage, off); + maxoff = RumPageGetOpaque(newlPage)->maxoff; + + /* Copy original data of the page */ + memcpy(lpageCopy, newlPage, BLCKSZ); + + /* Reinitialize pages */ + RumInitPage(rPage, RumPageGetOpaque(newlPage)->flags, pageSize); + RumInitPage(newlPage, RumPageGetOpaque(rPage)->flags, pageSize); + + RumPageGetOpaque(newlPage)->maxoff = 0; + RumPageGetOpaque(rPage)->maxoff = 0; + + /* Calculate the whole size we're going to place */ + copyPtr = RumDataPageGetData(lpageCopy); + iptr.ip_blkid.bi_hi = 0; + iptr.ip_blkid.bi_lo = 0; + iptr.ip_posid = 0; + for (i = FirstOffsetNumber; i <= maxoff; i++) + { + if (i == off) + { + prevIptr = iptr; + iptr = btree->items[maxItemIndex]; + + prevTotalsize = totalsize; + totalsize = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, + &iptr, btree->addInfo[maxItemIndex], + btree->addInfoIsNull[maxItemIndex], + &prevIptr, btree->rumstate, totalsize); + + maxItemIndex++; + totalCount++; + maxItemSize = Max(maxItemSize, totalsize - prevTotalsize); + } + + prevIptr = iptr; + copyPtr = rumDataPageLeafRead(copyPtr, btree->entryAttnum, + &iptr, &addInfo, &addInfoIsNull, btree->rumstate); + + prevTotalsize = totalsize; + totalsize = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, + &iptr, addInfo, addInfoIsNull, + &prevIptr, btree->rumstate, totalsize); + + totalCount++; + maxItemSize = Max(maxItemSize, totalsize - prevTotalsize); + } + + if (off == maxoff + 1) + { + prevIptr = iptr; + iptr = btree->items[maxItemIndex]; + if (RumPageRightMost(newlPage)) + { + Size newTotalsize; + + /* + * Found how many new item pointer we're going to add using + * worst case assumptions about odd placement and alignment. + */ + while (maxItemIndex < btree->nitem && + (newTotalsize = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, + &iptr, btree->addInfo[maxItemIndex], + btree->addInfoIsNull[maxItemIndex], + &prevIptr, btree->rumstate, totalsize)) < + 2 * RumDataPageSize - 2 * maxItemSize - 2 * MAXIMUM_ALIGNOF + ) + { + maxItemIndex++; + totalCount++; + maxItemSize = Max(maxItemSize, newTotalsize - totalsize); + totalsize = newTotalsize; + + prevIptr = iptr; + if (maxItemIndex < btree->nitem) + iptr = btree->items[maxItemIndex]; + } + } + else + { + prevTotalsize = totalsize; + totalsize = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, + &iptr, btree->addInfo[maxItemIndex], + btree->addInfoIsNull[maxItemIndex], + &prevIptr, btree->rumstate, totalsize); + maxItemIndex++; + + totalCount++; + maxItemSize = Max(maxItemSize, totalsize - prevTotalsize); + } + } + + /* + * Place item pointers with additional information to the pages using + * previous calculations. + */ + ptr = RumDataPageGetData(newlPage); + page = newlPage; + j = FirstOffsetNumber; + iptr.ip_blkid.bi_hi = 0; + iptr.ip_blkid.bi_lo = 0; + iptr.ip_posid = 0; + prevIptr = iptr; + copyPtr = RumDataPageGetData(lpageCopy); + for (i = FirstOffsetNumber; i <= maxoff; i++) + { + if (i == off) + { + while (btree->curitem < maxItemIndex) + { + ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, + &btree->items[btree->curitem], + btree->addInfo[btree->curitem], + btree->addInfoIsNull[btree->curitem], + &prevIptr, btree->rumstate); + freespace = RumDataPageFreeSpacePre(page, ptr); + Assert(freespace >= 0); + + prevIptr = btree->items[btree->curitem]; + btree->curitem++; + + CHECK_SWITCH_TO_RPAGE; + } + } + + copyPtr = rumDataPageLeafRead(copyPtr, btree->entryAttnum, + &iptr, &addInfo, &addInfoIsNull, btree->rumstate); + + ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, &iptr, + addInfo, addInfoIsNull, &prevIptr, btree->rumstate); + freespace = RumDataPageFreeSpacePre(page, ptr); + Assert(freespace >= 0); + + prevIptr = iptr; + + CHECK_SWITCH_TO_RPAGE; + } + + if (off == maxoff + 1) + { + while (btree->curitem < maxItemIndex) + { + ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, + &btree->items[btree->curitem], + btree->addInfo[btree->curitem], + btree->addInfoIsNull[btree->curitem], + &prevIptr, btree->rumstate); + freespace = RumDataPageFreeSpacePre(page, ptr); + Assert(freespace >= 0); + + prevIptr = btree->items[btree->curitem]; + btree->curitem++; + + CHECK_SWITCH_TO_RPAGE; + } + } + + RumPageGetOpaque(rPage)->maxoff = j - 1; + + PostingItemSetBlockNumber(&(btree->pitem), BufferGetBlockNumber(lbuf)); + btree->pitem.key = maxLeftIptr; + btree->rightblkno = BufferGetBlockNumber(rbuf); + + *RumDataPageGetRightBound(rPage) = *RumDataPageGetRightBound(lpageCopy); + *RumDataPageGetRightBound(newlPage) = maxLeftIptr; + + /* Fill indexes at the end of pages */ + updateItemIndexes(newlPage, btree->entryAttnum, btree->rumstate); + updateItemIndexes(rPage, btree->entryAttnum, btree->rumstate); + + return newlPage; +} + +/* + * split page and fills WAL record. original buffer(lbuf) leaves untouched, + * returns shadow page of lbuf filled new data. In leaf page and build mode puts all + * ItemPointers to pages. Also, in build mode splits data by way to full fulled + * left page + */ +static Page +dataSplitPageInternal(RumBtree btree, Buffer lbuf, Buffer rbuf, + Page lPage, Page rPage, OffsetNumber off) +{ + char *ptr; + OffsetNumber separator; + ItemPointer bound; + Page newlPage = PageGetTempPageCopy(BufferGetPage(lbuf)); + ItemPointerData oldbound = *RumDataPageGetRightBound(newlPage); + int sizeofitem = RumSizeOfDataPageItem(newlPage); + OffsetNumber maxoff = RumPageGetOpaque(newlPage)->maxoff; + Size pageSize = PageGetPageSize(newlPage); + Size freeSpace; + uint32 nCopied = 1; + + static char vector[2 * BLCKSZ]; + + RumInitPage(rPage, RumPageGetOpaque(newlPage)->flags, pageSize); + freeSpace = RumDataPageGetFreeSpace(rPage); + dataPrepareData(btree, newlPage, off); + + memcpy(vector, RumDataPageGetItem(newlPage, FirstOffsetNumber), + maxoff * sizeofitem); + + if (RumPageIsLeaf(newlPage) && RumPageRightMost(newlPage) && + off > RumPageGetOpaque(newlPage)->maxoff) + { + nCopied = 0; + while (btree->curitem < btree->nitem && + maxoff * sizeof(ItemPointerData) < 2 * (freeSpace - sizeof(ItemPointerData))) + { + memcpy(vector + maxoff * sizeof(ItemPointerData), + btree->items + btree->curitem, + sizeof(ItemPointerData)); + maxoff++; + nCopied++; + btree->curitem++; + } + } + else + { + ptr = vector + (off - 1) * sizeofitem; + if (maxoff + 1 - off != 0) + memmove(ptr + sizeofitem, ptr, (maxoff - off + 1) * sizeofitem); + if (RumPageIsLeaf(newlPage)) + { + memcpy(ptr, btree->items + btree->curitem, sizeofitem); + btree->curitem++; + } + else + memcpy(ptr, &(btree->pitem), sizeofitem); + + maxoff++; + } + + /* + * we suppose that during index creation table scaned from begin to end, + * so ItemPointers are monotonically increased.. + */ + if (btree->isBuild && RumPageRightMost(newlPage)) + separator = freeSpace / sizeofitem; + else + separator = maxoff / 2; + + RumInitPage(rPage, RumPageGetOpaque(newlPage)->flags, pageSize); + RumInitPage(newlPage, RumPageGetOpaque(rPage)->flags, pageSize); + + ptr = RumDataPageGetItem(newlPage, FirstOffsetNumber); + memcpy(ptr, vector, separator * sizeofitem); + RumPageGetOpaque(newlPage)->maxoff = separator; + /* Adjust pd_lower */ + ((PageHeader) newlPage)->pd_lower = (ptr + separator * sizeofitem + 1) - + newlPage; + + ptr = RumDataPageGetItem(rPage, FirstOffsetNumber); + memcpy(ptr, vector + separator * sizeofitem, + (maxoff - separator) * sizeofitem); + RumPageGetOpaque(rPage)->maxoff = maxoff - separator; + /* Adjust pd_lower */ + ((PageHeader) rPage)->pd_lower = (ptr + + (maxoff - separator) * sizeofitem + 1) - + rPage; + + PostingItemSetBlockNumber(&(btree->pitem), BufferGetBlockNumber(lbuf)); + if (RumPageIsLeaf(newlPage)) + btree->pitem.key = *(ItemPointerData *) RumDataPageGetItem(newlPage, + RumPageGetOpaque(newlPage)->maxoff); + else + btree->pitem.key = ((PostingItem *) RumDataPageGetItem(newlPage, + RumPageGetOpaque(newlPage)->maxoff))->key; + btree->rightblkno = BufferGetBlockNumber(rbuf); + + /* set up right bound for left page */ + bound = RumDataPageGetRightBound(newlPage); + *bound = btree->pitem.key; + + /* set up right bound for right page */ + bound = RumDataPageGetRightBound(rPage); + *bound = oldbound; + + return newlPage; +} + +/* + * Split page of posting tree. Calls relevant function of internal of leaf page + * because they are handled very different. + */ +static Page +dataSplitPage(RumBtree btree, Buffer lbuf, Buffer rbuf, + Page lpage, Page rpage, OffsetNumber off) +{ + if (RumPageIsLeaf(BufferGetPage(lbuf))) + return dataSplitPageLeaf(btree, lbuf, rbuf, lpage, rpage, off); + else + return dataSplitPageInternal(btree, lbuf, rbuf, lpage, rpage, off); +} + +/* + * Updates indexes in the end of leaf page which are used for faster search. + * Also updates freespace opaque field of page. Returns last item pointer of + * page. + */ +ItemPointerData +updateItemIndexes(Page page, OffsetNumber attnum, RumState *rumstate) +{ + Pointer ptr; + ItemPointerData iptr; + int j = 0, maxoff, i; + + /* Iterate over page */ + + maxoff = RumPageGetOpaque(page)->maxoff; + ptr = RumDataPageGetData(page); + iptr.ip_blkid.bi_lo = 0; + iptr.ip_blkid.bi_hi = 0; + iptr.ip_posid = 0; + + for (i = FirstOffsetNumber; i <= maxoff; i++) + { + /* Place next page index entry if it's time to */ + if (i * (RumDataLeafIndexCount + 1) > (j + 1) * maxoff) + { + RumPageGetIndexes(page)[j].iptr = iptr; + RumPageGetIndexes(page)[j].offsetNumer = i; + RumPageGetIndexes(page)[j].pageOffset = ptr - RumDataPageGetData(page); + j++; + } + ptr = rumDataPageLeafRead(ptr, attnum, &iptr, NULL, NULL, rumstate); + } + /* Fill rest of page indexes with InvalidOffsetNumber if any */ + for (; j < RumDataLeafIndexCount; j++) + { + RumPageGetIndexes(page)[j].offsetNumer = InvalidOffsetNumber; + } + /* Update freespace of page */ + RumPageGetOpaque(page)->freespace = RumDataPageFreeSpacePre(page, ptr); + /* Adjust pd_lower */ + ((PageHeader) page)->pd_lower = ptr - page; + return iptr; +} + +/* + * Fills new root by right bound values from child. + * Also called from rumxlog, should not use btree + */ +void +rumDataFillRoot(RumBtree btree, Buffer root, Buffer lbuf, Buffer rbuf, + Page page, Page lpage, Page rpage) +{ + PostingItem li, + ri; + + li.key = *RumDataPageGetRightBound(lpage); + PostingItemSetBlockNumber(&li, BufferGetBlockNumber(lbuf)); + RumDataPageAddItem(page, &li, InvalidOffsetNumber); + + ri.key = *RumDataPageGetRightBound(rpage); + PostingItemSetBlockNumber(&ri, BufferGetBlockNumber(rbuf)); + RumDataPageAddItem(page, &ri, InvalidOffsetNumber); +} + +void +rumPrepareDataScan(RumBtree btree, Relation index, OffsetNumber attnum, RumState *rumstate) +{ + memset(btree, 0, sizeof(RumBtreeData)); + + btree->index = index; + btree->rumstate = rumstate; + + btree->findChildPage = dataLocateItem; + btree->isMoveRight = dataIsMoveRight; + btree->findItem = dataLocateLeafItem; + btree->findChildPtr = dataFindChildPtr; + btree->getLeftMostPage = dataGetLeftMostPage; + btree->isEnoughSpace = dataIsEnoughSpace; + btree->placeToPage = dataPlaceToPage; + btree->splitPage = dataSplitPage; + btree->fillRoot = rumDataFillRoot; + + btree->isData = TRUE; + btree->searchMode = FALSE; + btree->isDelete = FALSE; + btree->fullScan = FALSE; + btree->isBuild = FALSE; + + btree->entryAttnum = attnum; +} + +RumPostingTreeScan * +rumPrepareScanPostingTree(Relation index, BlockNumber rootBlkno, + bool searchMode, OffsetNumber attnum, RumState *rumstate) +{ + RumPostingTreeScan *gdi = (RumPostingTreeScan *) palloc0(sizeof(RumPostingTreeScan)); + + rumPrepareDataScan(&gdi->btree, index, attnum, rumstate); + + gdi->btree.searchMode = searchMode; + gdi->btree.fullScan = searchMode; + + gdi->stack = rumPrepareFindLeafPage(&gdi->btree, rootBlkno); + + return gdi; +} + +/* + * Inserts array of item pointers, may execute several tree scan (very rare) + */ +void +rumInsertItemPointers(RumState *rumstate, + OffsetNumber attnum, + RumPostingTreeScan *gdi, + ItemPointerData *items, + Datum *addInfo, + bool *addInfoIsNull, + uint32 nitem, + GinStatsData *buildStats) +{ + BlockNumber rootBlkno = gdi->stack->blkno; + + + gdi->btree.items = items; + gdi->btree.addInfo = addInfo; + gdi->btree.addInfoIsNull = addInfoIsNull; + + gdi->btree.nitem = nitem; + gdi->btree.curitem = 0; + + while (gdi->btree.curitem < gdi->btree.nitem) + { + if (!gdi->stack) + gdi->stack = rumPrepareFindLeafPage(&gdi->btree, rootBlkno); + + gdi->stack = rumFindLeafPage(&gdi->btree, gdi->stack); + + if (gdi->btree.findItem(&(gdi->btree), gdi->stack)) + { + /* + * gdi->btree.items[gdi->btree.curitem] already exists in index + */ + gdi->btree.curitem++; + LockBuffer(gdi->stack->buffer, RUM_UNLOCK); + freeRumBtreeStack(gdi->stack); + } + else + rumInsertValue(rumstate->index, &(gdi->btree), gdi->stack, buildStats); + + gdi->stack = NULL; + } +} + +Buffer +rumScanBeginPostingTree(RumPostingTreeScan *gdi) +{ + gdi->stack = rumFindLeafPage(&gdi->btree, gdi->stack); + return gdi->stack->buffer; +} diff --git a/rumentrypage.c b/rumentrypage.c new file mode 100644 index 0000000000..51fb16d34f --- /dev/null +++ b/rumentrypage.c @@ -0,0 +1,552 @@ +/*------------------------------------------------------------------------- + * + * rumentrypage.c + * page utilities routines for the postgres inverted index access method. + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "rum.h" + +/* + * Read item pointers with additional information from leaf data page. + * Information is stored in the same manner as in leaf data pages. + */ +void +rumReadTuple(RumState *rumstate, OffsetNumber attnum, + IndexTuple itup, ItemPointerData *ipd, Datum *addInfo, bool *addInfoIsNull) +{ + Pointer ptr; + int nipd = RumGetNPosting(itup), i; + ItemPointerData ip = {{0,0},0}; + + ptr = RumGetPosting(itup); + + if (addInfo && addInfoIsNull) + { + for (i = 0; i < nipd; i++) + { + ptr = rumDataPageLeafRead(ptr, attnum, &ip, &addInfo[i], + &addInfoIsNull[i], rumstate); + ipd[i] = ip; + } + } + else + { + for (i = 0; i < nipd; i++) + { + ptr = rumDataPageLeafRead(ptr, attnum, &ip, NULL, NULL, rumstate); + ipd[i] = ip; + } + } +} + +/* + * Form a non-leaf entry tuple by copying the key data from the given tuple, + * which can be either a leaf or non-leaf entry tuple. + * + * Any posting list in the source tuple is not copied. The specified child + * block number is inserted into t_tid. + */ +static IndexTuple +RumFormInteriorTuple(IndexTuple itup, Page page, BlockNumber childblk) +{ + IndexTuple nitup; + + if (RumPageIsLeaf(page) && !RumIsPostingTree(itup)) + { + /* Tuple contains a posting list, just copy stuff before that */ + uint32 origsize = RumGetPostingOffset(itup); + + origsize = MAXALIGN(origsize); + nitup = (IndexTuple) palloc(origsize); + memcpy(nitup, itup, origsize); + /* ... be sure to fix the size header field ... */ + nitup->t_info &= ~INDEX_SIZE_MASK; + nitup->t_info |= origsize; + } + else + { + /* Copy the tuple as-is */ + nitup = (IndexTuple) palloc(IndexTupleSize(itup)); + memcpy(nitup, itup, IndexTupleSize(itup)); + } + + /* Now insert the correct downlink */ + RumSetDownlink(nitup, childblk); + + return nitup; +} + +/* + * Entry tree is a "static", ie tuple never deletes from it, + * so we don't use right bound, we use rightmost key instead. + */ +static IndexTuple +getRightMostTuple(Page page) +{ + OffsetNumber maxoff = PageGetMaxOffsetNumber(page); + + return (IndexTuple) PageGetItem(page, PageGetItemId(page, maxoff)); +} + +static bool +entryIsMoveRight(RumBtree btree, Page page) +{ + IndexTuple itup; + OffsetNumber attnum; + Datum key; + RumNullCategory category; + + if (RumPageRightMost(page)) + return FALSE; + + itup = getRightMostTuple(page); + attnum = rumtuple_get_attrnum(btree->rumstate, itup); + key = rumtuple_get_key(btree->rumstate, itup, &category); + + if (rumCompareAttEntries(btree->rumstate, + btree->entryAttnum, btree->entryKey, btree->entryCategory, + attnum, key, category) > 0) + return TRUE; + + return FALSE; +} + +/* + * Find correct tuple in non-leaf page. It supposed that + * page correctly chosen and searching value SHOULD be on page + */ +static BlockNumber +entryLocateEntry(RumBtree btree, RumBtreeStack *stack) +{ + OffsetNumber low, + high, + maxoff; + IndexTuple itup = NULL; + int result; + Page page = BufferGetPage(stack->buffer); + + Assert(!RumPageIsLeaf(page)); + Assert(!RumPageIsData(page)); + + if (btree->fullScan) + { + stack->off = FirstOffsetNumber; + stack->predictNumber *= PageGetMaxOffsetNumber(page); + return btree->getLeftMostPage(btree, page); + } + + low = FirstOffsetNumber; + maxoff = high = PageGetMaxOffsetNumber(page); + Assert(high >= low); + + high++; + + while (high > low) + { + OffsetNumber mid = low + ((high - low) / 2); + + if (mid == maxoff && RumPageRightMost(page)) + { + /* Right infinity */ + result = -1; + } + else + { + OffsetNumber attnum; + Datum key; + RumNullCategory category; + + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, mid)); + attnum = rumtuple_get_attrnum(btree->rumstate, itup); + key = rumtuple_get_key(btree->rumstate, itup, &category); + result = rumCompareAttEntries(btree->rumstate, + btree->entryAttnum, + btree->entryKey, + btree->entryCategory, + attnum, key, category); + } + + if (result == 0) + { + stack->off = mid; + Assert(RumGetDownlink(itup) != RUM_ROOT_BLKNO); + return RumGetDownlink(itup); + } + else if (result > 0) + low = mid + 1; + else + high = mid; + } + + Assert(high >= FirstOffsetNumber && high <= maxoff); + + stack->off = high; + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, high)); + Assert(RumGetDownlink(itup) != RUM_ROOT_BLKNO); + return RumGetDownlink(itup); +} + +/* + * Searches correct position for value on leaf page. + * Page should be correctly chosen. + * Returns true if value found on page. + */ +static bool +entryLocateLeafEntry(RumBtree btree, RumBtreeStack *stack) +{ + Page page = BufferGetPage(stack->buffer); + OffsetNumber low, + high; + + Assert(RumPageIsLeaf(page)); + Assert(!RumPageIsData(page)); + + if (btree->fullScan) + { + stack->off = FirstOffsetNumber; + return TRUE; + } + + low = FirstOffsetNumber; + high = PageGetMaxOffsetNumber(page); + + if (high < low) + { + stack->off = FirstOffsetNumber; + return false; + } + + high++; + + while (high > low) + { + OffsetNumber mid = low + ((high - low) / 2); + IndexTuple itup; + OffsetNumber attnum; + Datum key; + RumNullCategory category; + int result; + + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, mid)); + attnum = rumtuple_get_attrnum(btree->rumstate, itup); + key = rumtuple_get_key(btree->rumstate, itup, &category); + result = rumCompareAttEntries(btree->rumstate, + btree->entryAttnum, + btree->entryKey, + btree->entryCategory, + attnum, key, category); + if (result == 0) + { + stack->off = mid; + return true; + } + else if (result > 0) + low = mid + 1; + else + high = mid; + } + + stack->off = high; + return false; +} + +static OffsetNumber +entryFindChildPtr(RumBtree btree, Page page, BlockNumber blkno, OffsetNumber storedOff) +{ + OffsetNumber i, + maxoff = PageGetMaxOffsetNumber(page); + IndexTuple itup; + + Assert(!RumPageIsLeaf(page)); + Assert(!RumPageIsData(page)); + + /* if page isn't changed, we returns storedOff */ + if (storedOff >= FirstOffsetNumber && storedOff <= maxoff) + { + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, storedOff)); + if (RumGetDownlink(itup) == blkno) + return storedOff; + + /* + * we hope, that needed pointer goes to right. It's true if there + * wasn't a deletion + */ + for (i = storedOff + 1; i <= maxoff; i++) + { + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i)); + if (RumGetDownlink(itup) == blkno) + return i; + } + maxoff = storedOff - 1; + } + + /* last chance */ + for (i = FirstOffsetNumber; i <= maxoff; i++) + { + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i)); + if (RumGetDownlink(itup) == blkno) + return i; + } + + return InvalidOffsetNumber; +} + +static BlockNumber +entryGetLeftMostPage(RumBtree btree, Page page) +{ + IndexTuple itup; + + Assert(!RumPageIsLeaf(page)); + Assert(!RumPageIsData(page)); + Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber); + + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber)); + return RumGetDownlink(itup); +} + +static bool +entryIsEnoughSpace(RumBtree btree, Buffer buf, OffsetNumber off) +{ + Size itupsz = 0; + Page page = BufferGetPage(buf); + + Assert(btree->entry); + Assert(!RumPageIsData(page)); + + if (btree->isDelete) + { + IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off)); + + itupsz = MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData); + } + + if (PageGetFreeSpace(page) + itupsz >= MAXALIGN(IndexTupleSize(btree->entry)) + sizeof(ItemIdData)) + return true; + + return false; +} + +/* + * Delete tuple on leaf page if tuples existed and we + * should update it, update old child blkno to new right page + * if child split occurred + */ +static BlockNumber +entryPreparePage(RumBtree btree, Page page, OffsetNumber off) +{ + BlockNumber ret = InvalidBlockNumber; + + Assert(btree->entry); + Assert(!RumPageIsData(page)); + + if (btree->isDelete) + { + Assert(RumPageIsLeaf(page)); + PageIndexTupleDelete(page, off); + } + + if (!RumPageIsLeaf(page) && btree->rightblkno != InvalidBlockNumber) + { + IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off)); + + RumSetDownlink(itup, btree->rightblkno); + ret = btree->rightblkno; + } + + btree->rightblkno = InvalidBlockNumber; + + return ret; +} + +/* + * Place tuple on page and fills WAL record + */ +static void +entryPlaceToPage(RumBtree btree, Page page, OffsetNumber off) +{ + OffsetNumber placed; + + entryPreparePage(btree, page, off); + + placed = PageAddItem(page, (Item) btree->entry, IndexTupleSize(btree->entry), off, false, false); + if (placed != off) + elog(ERROR, "failed to add item to index page in \"%s\"", + RelationGetRelationName(btree->index)); + + btree->entry = NULL; +} + +/* + * Place tuple and split page, original buffer(lbuf) leaves untouched, + * returns shadow page of lbuf filled new data. + * Tuples are distributed between pages by equal size on its, not + * an equal number! + */ +static Page +entrySplitPage(RumBtree btree, Buffer lbuf, Buffer rbuf, + Page lPage, Page rPage, OffsetNumber off) +{ + OffsetNumber i, + maxoff, + separator = InvalidOffsetNumber; + Size totalsize = 0; + Size lsize = 0, + size; + char *ptr; + IndexTuple itup, + leftrightmost = NULL; + Page page; + Page newlPage = PageGetTempPageCopy(lPage); + Size pageSize = PageGetPageSize(newlPage); + + static char tupstore[2 * BLCKSZ]; + + entryPreparePage(btree, newlPage, off); + + maxoff = PageGetMaxOffsetNumber(newlPage); + ptr = tupstore; + + for (i = FirstOffsetNumber; i <= maxoff; i++) + { + if (i == off) + { + size = MAXALIGN(IndexTupleSize(btree->entry)); + memcpy(ptr, btree->entry, size); + ptr += size; + totalsize += size + sizeof(ItemIdData); + } + + itup = (IndexTuple) PageGetItem(newlPage, PageGetItemId(newlPage, i)); + size = MAXALIGN(IndexTupleSize(itup)); + memcpy(ptr, itup, size); + ptr += size; + totalsize += size + sizeof(ItemIdData); + } + + if (off == maxoff + 1) + { + size = MAXALIGN(IndexTupleSize(btree->entry)); + memcpy(ptr, btree->entry, size); + ptr += size; + totalsize += size + sizeof(ItemIdData); + } + + RumInitPage(rPage, RumPageGetOpaque(newlPage)->flags, pageSize); + RumInitPage(newlPage, RumPageGetOpaque(rPage)->flags, pageSize); + + ptr = tupstore; + maxoff++; + lsize = 0; + + page = newlPage; + for (i = FirstOffsetNumber; i <= maxoff; i++) + { + itup = (IndexTuple) ptr; + + if (lsize > totalsize / 2) + { + if (separator == InvalidOffsetNumber) + separator = i - 1; + page = rPage; + } + else + { + leftrightmost = itup; + lsize += MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData); + } + + if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false, false) == InvalidOffsetNumber) + elog(ERROR, "failed to add item to index page in \"%s\"", + RelationGetRelationName(btree->index)); + ptr += MAXALIGN(IndexTupleSize(itup)); + } + + btree->entry = RumFormInteriorTuple(leftrightmost, newlPage, + BufferGetBlockNumber(lbuf)); + + btree->rightblkno = BufferGetBlockNumber(rbuf); + + return newlPage; +} + +/* + * return newly allocated rightmost tuple + */ +IndexTuple +rumPageGetLinkItup(Buffer buf, Page page) +{ + IndexTuple itup, + nitup; + + itup = getRightMostTuple(page); + nitup = RumFormInteriorTuple(itup, page, BufferGetBlockNumber(buf)); + + return nitup; +} + +/* + * Fills new root by rightest values from child. + * Also called from rumxlog, should not use btree + */ +void +rumEntryFillRoot(RumBtree btree, Buffer root, Buffer lbuf, Buffer rbuf, + Page page, Page lpage, Page rpage) +{ + IndexTuple itup; + + itup = rumPageGetLinkItup(lbuf, lpage); + if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false, false) == InvalidOffsetNumber) + elog(ERROR, "failed to add item to index root page"); + pfree(itup); + + itup = rumPageGetLinkItup(rbuf, rpage); + if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), InvalidOffsetNumber, false, false) == InvalidOffsetNumber) + elog(ERROR, "failed to add item to index root page"); + pfree(itup); +} + +/* + * Set up RumBtree for entry page access + * + * Note: during WAL recovery, there may be no valid data in rumstate + * other than a faked-up Relation pointer; the key datum is bogus too. + */ +void +rumPrepareEntryScan(RumBtree btree, OffsetNumber attnum, + Datum key, RumNullCategory category, + RumState *rumstate) +{ + memset(btree, 0, sizeof(RumBtreeData)); + + btree->index = rumstate->index; + btree->rumstate = rumstate; + + btree->findChildPage = entryLocateEntry; + btree->isMoveRight = entryIsMoveRight; + btree->findItem = entryLocateLeafEntry; + btree->findChildPtr = entryFindChildPtr; + btree->getLeftMostPage = entryGetLeftMostPage; + btree->isEnoughSpace = entryIsEnoughSpace; + btree->placeToPage = entryPlaceToPage; + btree->splitPage = entrySplitPage; + btree->fillRoot = rumEntryFillRoot; + + btree->isData = FALSE; + btree->searchMode = FALSE; + btree->fullScan = FALSE; + btree->isBuild = FALSE; + + btree->entryAttnum = attnum; + btree->entryKey = key; + btree->entryCategory = category; + btree->isDelete = FALSE; +} diff --git a/rumfast.c b/rumfast.c new file mode 100644 index 0000000000..e05d268d10 --- /dev/null +++ b/rumfast.c @@ -0,0 +1,948 @@ +/*------------------------------------------------------------------------- + * + * rumfast.c + * Fast insert routines for the Postgres inverted index access method. + * Pending entries are stored in linear list of pages. Later on + * (typically during VACUUM), rumInsertCleanup() will be invoked to + * transfer pending entries into the regular index structure. This + * wins because bulk insertion is much more efficient than retail. + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/generic_xlog.h" +#include "access/htup_details.h" +#include "commands/vacuum.h" +#include "miscadmin.h" +#include "utils/memutils.h" +#include "utils/datum.h" + +#include "rum.h" + +#define RUM_PAGE_FREESIZE \ + ( BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(RumPageOpaqueData)) ) + +typedef struct KeyArray +{ + Datum *keys; /* expansible array of keys */ + Datum *addInfo; /* expansible array of additional information */ + bool *addInfoIsNull; /* expansible array of NULL flag of additional information */ + RumNullCategory *categories; /* another expansible array */ + int32 nvalues; /* current number of valid entries */ + int32 maxvalues; /* allocated size of arrays */ +} KeyArray; + + +/* + * Build a pending-list page from the given array of tuples, and write it out. + * + * Returns amount of free space left on the page. + */ +static int32 +writeListPage(Relation index, Buffer buffer, + IndexTuple *tuples, int32 ntuples, BlockNumber rightlink) +{ + Page page; + int32 i, + freesize; + OffsetNumber l, + off; + GenericXLogState *state; + + state = GenericXLogStart(index); + + page = GenericXLogRegisterBuffer(state, buffer, 0); + RumInitPage(page, RUM_LIST, BufferGetPageSize(buffer)); + + off = FirstOffsetNumber; + + for (i = 0; i < ntuples; i++) + { + int this_size = IndexTupleSize(tuples[i]); + + l = PageAddItem(page, (Item) tuples[i], this_size, off, false, false); + + if (l == InvalidOffsetNumber) + elog(ERROR, "failed to add item to index page in \"%s\"", + RelationGetRelationName(index)); + + off++; + } + + RumPageGetOpaque(page)->rightlink = rightlink; + + /* + * tail page may contain only whole row(s) or final part of row placed on + * previous pages (a "row" here meaning all the index tuples generated for + * one heap tuple) + */ + if (rightlink == InvalidBlockNumber) + { + RumPageSetFullRow(page); + RumPageGetOpaque(page)->maxoff = 1; + } + else + { + RumPageGetOpaque(page)->maxoff = 0; + } + + /* get free space before releasing buffer */ + freesize = PageGetExactFreeSpace(page); + GenericXLogFinish(state); + UnlockReleaseBuffer(buffer); + + return freesize; +} + +static void +makeSublist(Relation index, IndexTuple *tuples, int32 ntuples, + RumMetaPageData *res) +{ + Buffer curBuffer = InvalidBuffer; + Buffer prevBuffer = InvalidBuffer; + int i, + size = 0, + tupsize; + int startTuple = 0; + + Assert(ntuples > 0); + + /* + * Split tuples into pages + */ + for (i = 0; i < ntuples; i++) + { + if (curBuffer == InvalidBuffer) + { + curBuffer = RumNewBuffer(index); + + if (prevBuffer != InvalidBuffer) + { + res->nPendingPages++; + writeListPage(index, prevBuffer, + tuples + startTuple, + i - startTuple, + BufferGetBlockNumber(curBuffer)); + } + else + { + res->head = BufferGetBlockNumber(curBuffer); + } + + prevBuffer = curBuffer; + startTuple = i; + size = 0; + } + + tupsize = MAXALIGN(IndexTupleSize(tuples[i])) + sizeof(ItemIdData); + + if (size + tupsize > RumListPageSize) + { + /* won't fit, force a new page and reprocess */ + i--; + curBuffer = InvalidBuffer; + } + else + { + size += tupsize; + } + } + + /* + * Write last page + */ + res->tail = BufferGetBlockNumber(curBuffer); + res->tailFreeSize = writeListPage(index, curBuffer, + tuples + startTuple, + ntuples - startTuple, + InvalidBlockNumber); + res->nPendingPages++; + /* that was only one heap tuple */ + res->nPendingHeapTuples = 1; +} + +/* + * Write the index tuples contained in *collector into the index's + * pending list. + * + * Function guarantees that all these tuples will be inserted consecutively, + * preserving order + */ +void +rumHeapTupleFastInsert(RumState *rumstate, RumTupleCollector *collector) +{ + Relation index = rumstate->index; + Buffer metabuffer; + Page metapage; + RumMetaPageData *metadata = NULL; + Buffer buffer = InvalidBuffer; + Page page = NULL; + bool separateList = false; + bool needCleanup = false; + GenericXLogState *state; + + if (collector->ntuples == 0) + return; + + state = GenericXLogStart(rumstate->index); + metabuffer = ReadBuffer(index, RUM_METAPAGE_BLKNO); + + if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > RumListPageSize) + { + /* + * Total size is greater than one page => make sublist + */ + separateList = true; + } + else + { + LockBuffer(metabuffer, RUM_EXCLUSIVE); + metadata = RumPageGetMeta(BufferGetPage(metabuffer)); + + if (metadata->head == InvalidBlockNumber || + collector->sumsize + collector->ntuples * sizeof(ItemIdData) > metadata->tailFreeSize) + { + /* + * Pending list is empty or total size is greater than freespace + * on tail page => make sublist + * + * We unlock metabuffer to keep high concurrency + */ + separateList = true; + LockBuffer(metabuffer, RUM_UNLOCK); + } + } + + if (separateList) + { + /* + * We should make sublist separately and append it to the tail + */ + RumMetaPageData sublist; + + memset(&sublist, 0, sizeof(RumMetaPageData)); + makeSublist(index, collector->tuples, collector->ntuples, &sublist); + + /* + * metapage was unlocked, see above + */ + LockBuffer(metabuffer, RUM_EXCLUSIVE); + metapage = GenericXLogRegisterBuffer(state, metabuffer, 0); + metadata = RumPageGetMeta(metapage); + + if (metadata->head == InvalidBlockNumber) + { + /* + * Main list is empty, so just insert sublist as main list + */ + metadata->head = sublist.head; + metadata->tail = sublist.tail; + metadata->tailFreeSize = sublist.tailFreeSize; + + metadata->nPendingPages = sublist.nPendingPages; + metadata->nPendingHeapTuples = sublist.nPendingHeapTuples; + } + else + { + /* + * Merge lists + */ + + buffer = ReadBuffer(index, metadata->tail); + LockBuffer(buffer, RUM_EXCLUSIVE); + page = GenericXLogRegisterBuffer(state, buffer, 0); + + Assert(RumPageGetOpaque(page)->rightlink == InvalidBlockNumber); + + RumPageGetOpaque(page)->rightlink = sublist.head; + + metadata->tail = sublist.tail; + metadata->tailFreeSize = sublist.tailFreeSize; + + metadata->nPendingPages += sublist.nPendingPages; + metadata->nPendingHeapTuples += sublist.nPendingHeapTuples; + } + } + else + { + /* + * Insert into tail page. Metapage is already locked + */ + OffsetNumber l, + off; + int i, + tupsize; + + metapage = GenericXLogRegisterBuffer(state, metabuffer, 0); + metadata = RumPageGetMeta(metapage); + + buffer = ReadBuffer(index, metadata->tail); + LockBuffer(buffer, RUM_EXCLUSIVE); + page = GenericXLogRegisterBuffer(state, buffer, 0); + + off = (PageIsEmpty(page)) ? FirstOffsetNumber : + OffsetNumberNext(PageGetMaxOffsetNumber(page)); + + /* + * Increase counter of heap tuples + */ + Assert(RumPageGetOpaque(page)->maxoff <= metadata->nPendingHeapTuples); + RumPageGetOpaque(page)->maxoff++; + metadata->nPendingHeapTuples++; + + for (i = 0; i < collector->ntuples; i++) + { + tupsize = IndexTupleSize(collector->tuples[i]); + l = PageAddItem(page, (Item) collector->tuples[i], tupsize, off, false, false); + + if (l == InvalidOffsetNumber) + { + GenericXLogAbort(state); + elog(ERROR, "failed to add item to index page in \"%s\"", + RelationGetRelationName(index)); + } + + off++; + } + + metadata->tailFreeSize = PageGetExactFreeSpace(page); + } + + /* + * Force pending list cleanup when it becomes too long. And, + * rumInsertCleanup could take significant amount of time, so we prefer to + * call it when it can do all the work in a single collection cycle. In + * non-vacuum mode, it shouldn't require maintenance_work_mem, so fire it + * while pending list is still small enough to fit into work_mem. + * + * rumInsertCleanup() should not be called inside our CRIT_SECTION. + */ + if (metadata->nPendingPages * RUM_PAGE_FREESIZE > work_mem * 1024L) + needCleanup = true; + + GenericXLogFinish(state); + + if (buffer != InvalidBuffer) + UnlockReleaseBuffer(buffer); + + UnlockReleaseBuffer(metabuffer); + + if (needCleanup) + rumInsertCleanup(rumstate, false, NULL); +} + +static IndexTuple +RumFastFormTuple(RumState *rumstate, + OffsetNumber attnum, Datum key, RumNullCategory category, + Datum addInfo, + bool addInfoIsNull) +{ + Datum datums[3]; + bool isnull[3]; + IndexTuple itup; + uint32 newsize; + + /* Build the basic tuple: optional column number, plus key datum */ + + if (rumstate->oneCol) + { + datums[0] = key; + isnull[0] = (category != RUM_CAT_NORM_KEY); + datums[1] = addInfo; + isnull[1] = addInfoIsNull; + } + else + { + datums[0] = UInt16GetDatum(attnum); + isnull[0] = false; + datums[1] = key; + isnull[1] = (category != RUM_CAT_NORM_KEY); + datums[2] = addInfo; + isnull[2] = addInfoIsNull; + } + + itup = index_form_tuple(rumstate->tupdesc[attnum - 1], datums, isnull); + + /* + * Place category to the last byte of index tuple extending it's size if + * needed + */ + newsize = IndexTupleSize(itup); + + if (category != RUM_CAT_NORM_KEY) + { + uint32 minsize; + + Assert(IndexTupleHasNulls(itup)); + minsize = IndexInfoFindDataOffset(itup->t_info) + + heap_compute_data_size(rumstate->tupdesc[attnum - 1], datums, isnull) + + sizeof(RumNullCategory); + newsize = Max(newsize, minsize); + } + + newsize = MAXALIGN(newsize); + + if (newsize > Min(INDEX_SIZE_MASK, RumMaxItemSize)) + { + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", + (unsigned long) newsize, + (unsigned long) Min(INDEX_SIZE_MASK, + RumMaxItemSize), + RelationGetRelationName(rumstate->index)))); + pfree(itup); + return NULL; + } + + /* + * Resize tuple if needed + */ + if (newsize != IndexTupleSize(itup)) + { + itup = repalloc(itup, newsize); + + /* set new size in tuple header */ + itup->t_info &= ~INDEX_SIZE_MASK; + itup->t_info |= newsize; + } + + /* + * Insert category byte, if needed + */ + if (category != RUM_CAT_NORM_KEY) + { + Assert(IndexTupleHasNulls(itup)); + RumSetNullCategory(itup, rumstate, category); + } + + return itup; +} + + +/* + * Create temporary index tuples for a single indexable item (one index column + * for the heap tuple specified by ht_ctid), and append them to the array + * in *collector. They will subsequently be written out using + * rumHeapTupleFastInsert. Note that to guarantee consistent state, all + * temp tuples for a given heap tuple must be written in one call to + * rumHeapTupleFastInsert. + */ +void +rumHeapTupleFastCollect(RumState *rumstate, + RumTupleCollector *collector, + OffsetNumber attnum, Datum value, bool isNull, + ItemPointer ht_ctid) +{ + Datum *entries; + RumNullCategory *categories; + int32 i, + nentries; + Datum *addInfo; + bool *addInfoIsNull; + + /* + * Extract the key values that need to be inserted in the index + */ + entries = rumExtractEntries(rumstate, attnum, value, isNull, + &nentries, &categories, &addInfo, &addInfoIsNull); + + /* + * Allocate/reallocate memory for storing collected tuples + */ + if (collector->tuples == NULL) + { + collector->lentuples = nentries * rumstate->origTupdesc->natts; + collector->tuples = (IndexTuple *) palloc(sizeof(IndexTuple) * collector->lentuples); + } + + while (collector->ntuples + nentries > collector->lentuples) + { + collector->lentuples *= 2; + collector->tuples = (IndexTuple *) repalloc(collector->tuples, + sizeof(IndexTuple) * collector->lentuples); + } + + /* + * Build an index tuple for each key value, and add to array. In pending + * tuples we just stick the heap TID into t_tid. + */ + for (i = 0; i < nentries; i++) + { + IndexTuple itup; + + itup = RumFastFormTuple(rumstate, attnum, entries[i], categories[i], addInfo[i], addInfoIsNull[i]); + itup->t_tid = *ht_ctid; + collector->tuples[collector->ntuples++] = itup; + collector->sumsize += IndexTupleSize(itup); + } +} + +/* + * Deletes pending list pages up to (not including) newHead page. + * If newHead == InvalidBlockNumber then function drops the whole list. + * + * metapage is pinned and exclusive-locked throughout this function. + * + * Returns true if another cleanup process is running concurrently + * (if so, we can just abandon our own efforts) + */ +static bool +shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, + IndexBulkDeleteResult *stats) +{ + Page metapage; + RumMetaPageData *metadata; + BlockNumber blknoToDelete; + GenericXLogState *metastate; + + metastate = GenericXLogStart(index); + metapage = GenericXLogRegisterBuffer(metastate, metabuffer, + GENERIC_XLOG_FULL_IMAGE); + metadata = RumPageGetMeta(metapage); + blknoToDelete = metadata->head; + + do + { + Page page; + int i; + int64 nDeletedHeapTuples = 0; + rumxlogDeleteListPages data; + Buffer buffers[RUM_NDELETE_AT_ONCE]; + GenericXLogState *state; + + data.ndeleted = 0; + while (data.ndeleted < RUM_NDELETE_AT_ONCE && blknoToDelete != newHead) + { + data.toDelete[data.ndeleted] = blknoToDelete; + buffers[data.ndeleted] = ReadBuffer(index, blknoToDelete); + LockBuffer(buffers[data.ndeleted], RUM_EXCLUSIVE); + + page = BufferGetPage(buffers[data.ndeleted]); + + data.ndeleted++; + + if (RumPageIsDeleted(page)) + { + GenericXLogAbort(metastate); + /* concurrent cleanup process is detected */ + for (i = 0; i < data.ndeleted; i++) + UnlockReleaseBuffer(buffers[i]); + + return true; + } + + nDeletedHeapTuples += RumPageGetOpaque(page)->maxoff; + blknoToDelete = RumPageGetOpaque(page)->rightlink; + } + + if (stats) + stats->pages_deleted += data.ndeleted; + + metadata->head = blknoToDelete; + + Assert(metadata->nPendingPages >= data.ndeleted); + metadata->nPendingPages -= data.ndeleted; + Assert(metadata->nPendingHeapTuples >= nDeletedHeapTuples); + metadata->nPendingHeapTuples -= nDeletedHeapTuples; + + if (blknoToDelete == InvalidBlockNumber) + { + metadata->tail = InvalidBlockNumber; + metadata->tailFreeSize = 0; + metadata->nPendingPages = 0; + metadata->nPendingHeapTuples = 0; + } + + MarkBufferDirty(metabuffer); + + for (i = 0; i < data.ndeleted; i++) + { + state = GenericXLogStart(index); + page = GenericXLogRegisterBuffer(state, buffers[i], 0); + + RumPageGetOpaque(page)->flags = RUM_DELETED; + GenericXLogFinish(state); + } + + for (i = 0; i < data.ndeleted; i++) + UnlockReleaseBuffer(buffers[i]); + } while (blknoToDelete != newHead); + + GenericXLogFinish(metastate); + + return false; +} + +/* Initialize empty KeyArray */ +static void +initKeyArray(KeyArray *keys, int32 maxvalues) +{ + keys->keys = (Datum *) palloc(sizeof(Datum) * maxvalues); + keys->addInfo = (Datum *) palloc(sizeof(Datum) * maxvalues); + keys->addInfoIsNull = (bool *) palloc(sizeof(bool) * maxvalues); + keys->categories = (RumNullCategory *) + palloc(sizeof(RumNullCategory) * maxvalues); + keys->nvalues = 0; + keys->maxvalues = maxvalues; +} + +/* Add datum to KeyArray, resizing if needed */ +static void +addDatum(KeyArray *keys, Datum datum, Datum addInfo, bool addInfoIsNull, RumNullCategory category) +{ + if (keys->nvalues >= keys->maxvalues) + { + keys->maxvalues *= 2; + keys->keys = (Datum *) + repalloc(keys->keys, sizeof(Datum) * keys->maxvalues); + keys->addInfo = (Datum *) + repalloc(keys->addInfo, sizeof(Datum) * keys->maxvalues); + keys->addInfoIsNull = (bool *) + repalloc(keys->addInfoIsNull, sizeof(bool) * keys->maxvalues); + keys->categories = (RumNullCategory *) + repalloc(keys->categories, sizeof(RumNullCategory) * keys->maxvalues); + } + + keys->keys[keys->nvalues] = datum; + keys->categories[keys->nvalues] = category; + keys->addInfo[keys->nvalues] = addInfo; + keys->addInfoIsNull[keys->nvalues] = addInfoIsNull; + keys->nvalues++; +} + +/* + * Collect data from a pending-list page in preparation for insertion into + * the main index. + * + * Go through all tuples >= startoff on page and collect values in accum + * + * Note that ka is just workspace --- it does not carry any state across + * calls. + */ +static void +processPendingPage(BuildAccumulator *accum, KeyArray *ka, + Page page, OffsetNumber startoff) +{ + ItemPointerData heapptr; + OffsetNumber i, + maxoff; + OffsetNumber attrnum; + + /* reset *ka to empty */ + ka->nvalues = 0; + + maxoff = PageGetMaxOffsetNumber(page); + Assert(maxoff >= FirstOffsetNumber); + ItemPointerSetInvalid(&heapptr); + attrnum = 0; + + for (i = startoff; i <= maxoff; i = OffsetNumberNext(i)) + { + IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i)); + OffsetNumber curattnum; + Datum curkey, addInfo = 0; + bool addInfoIsNull = true; + RumNullCategory curcategory; + + /* Check for change of heap TID or attnum */ + curattnum = rumtuple_get_attrnum(accum->rumstate, itup); + + if (OidIsValid(accum->rumstate->addInfoTypeOid[curattnum - 1])) + { + Form_pg_attribute attr = accum->rumstate->addAttrs[curattnum - 1]; + if (accum->rumstate->oneCol) + addInfo = index_getattr(itup, 2, + accum->rumstate->tupdesc[curattnum - 1], &addInfoIsNull); + else + addInfo = index_getattr(itup, 3, + accum->rumstate->tupdesc[curattnum - 1], &addInfoIsNull); + addInfo = datumCopy(addInfo, attr->attbyval, attr->attlen); + } + + if (!ItemPointerIsValid(&heapptr)) + { + heapptr = itup->t_tid; + attrnum = curattnum; + } + else if (!(ItemPointerEquals(&heapptr, &itup->t_tid) && + curattnum == attrnum)) + { + /* + * rumInsertBAEntries can insert several datums per call, but only + * for one heap tuple and one column. So call it at a boundary, + * and reset ka. + */ + rumInsertBAEntries(accum, &heapptr, attrnum, + ka->keys, ka->addInfo, ka->addInfoIsNull, ka->categories, ka->nvalues); + ka->nvalues = 0; + heapptr = itup->t_tid; + attrnum = curattnum; + } + + /* Add key to KeyArray */ + curkey = rumtuple_get_key(accum->rumstate, itup, &curcategory); + addDatum(ka, curkey, addInfo, addInfoIsNull, curcategory); + } + + /* Dump out all remaining keys */ + rumInsertBAEntries(accum, &heapptr, attrnum, + ka->keys, ka->addInfo, ka->addInfoIsNull, ka->categories, ka->nvalues); +} + +/* + * Move tuples from pending pages into regular RUM structure. + * + * This can be called concurrently by multiple backends, so it must cope. + * On first glance it looks completely not concurrent-safe and not crash-safe + * either. The reason it's okay is that multiple insertion of the same entry + * is detected and treated as a no-op by ruminsert.c. If we crash after + * posting entries to the main index and before removing them from the + * pending list, it's okay because when we redo the posting later on, nothing + * bad will happen. Likewise, if two backends simultaneously try to post + * a pending entry into the main index, one will succeed and one will do + * nothing. We try to notice when someone else is a little bit ahead of + * us in the process, but that's just to avoid wasting cycles. Only the + * action of removing a page from the pending list really needs exclusive + * lock. + * + * vac_delay indicates that rumInsertCleanup is called from vacuum process, + * so call vacuum_delay_point() periodically. + * If stats isn't null, we count deleted pending pages into the counts. + */ +void +rumInsertCleanup(RumState *rumstate, + bool vac_delay, IndexBulkDeleteResult *stats) +{ + Relation index = rumstate->index; + Buffer metabuffer, + buffer; + Page metapage, + page; + RumMetaPageData *metadata; + MemoryContext opCtx, + oldCtx; + BuildAccumulator accum; + KeyArray datums; + BlockNumber blkno; + + metabuffer = ReadBuffer(index, RUM_METAPAGE_BLKNO); + LockBuffer(metabuffer, RUM_SHARE); + + metapage = BufferGetPage(metabuffer); + metadata = RumPageGetMeta(metapage); + + if (metadata->head == InvalidBlockNumber) + { + /* Nothing to do */ + UnlockReleaseBuffer(metabuffer); + return; + } + + /* + * Read and lock head of pending list + */ + blkno = metadata->head; + buffer = ReadBuffer(index, blkno); + LockBuffer(buffer, RUM_SHARE); + page = BufferGetPage(buffer); + + LockBuffer(metabuffer, RUM_UNLOCK); + + /* + * Initialize. All temporary space will be in opCtx + */ + opCtx = AllocSetContextCreate(CurrentMemoryContext, + "RUM insert cleanup temporary context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + oldCtx = MemoryContextSwitchTo(opCtx); + + initKeyArray(&datums, 128); + rumInitBA(&accum); + accum.rumstate = rumstate; + + /* + * At the top of this loop, we have pin and lock on the current page of + * the pending list. However, we'll release that before exiting the loop. + * Note we also have pin but not lock on the metapage. + */ + for (;;) + { + if (RumPageIsDeleted(page)) + { + /* another cleanup process is running concurrently */ + UnlockReleaseBuffer(buffer); + break; + } + + /* + * read page's datums into accum + */ + processPendingPage(&accum, &datums, page, FirstOffsetNumber); + + vacuum_delay_point(); + + /* + * Is it time to flush memory to disk? Flush if we are at the end of + * the pending list, or if we have a full row and memory is getting + * full. + * + * XXX using up maintenance_work_mem here is probably unreasonably + * much, since vacuum might already be using that much. + */ + if (RumPageGetOpaque(page)->rightlink == InvalidBlockNumber || + (RumPageHasFullRow(page) && + (accum.allocatedMemory >= maintenance_work_mem * 1024L))) + { + RumEntryAccumulatorItem *list; + uint32 nlist; + Datum key; + RumNullCategory category; + OffsetNumber maxoff, + attnum; + + /* + * Unlock current page to increase performance. Changes of page + * will be checked later by comparing maxoff after completion of + * memory flush. + */ + maxoff = PageGetMaxOffsetNumber(page); + LockBuffer(buffer, RUM_UNLOCK); + + /* + * Moving collected data into regular structure can take + * significant amount of time - so, run it without locking pending + * list. + */ + rumBeginBAScan(&accum); + while ((list = rumGetBAEntry(&accum, + &attnum, &key, &category, &nlist)) != NULL) + { + ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(ItemPointerData) *nlist); + Datum *addInfo = (Datum *)palloc(sizeof(Datum) * nlist); + bool *addInfoIsNull = (bool *)palloc(sizeof(bool) * nlist); + int i; + + for (i = 0; i < nlist; i++) + { + iptrs[i] = list[i].iptr; + addInfo[i] = list[i].addInfo; + addInfoIsNull[i] = list[i].addInfoIsNull; + } + rumEntryInsert(rumstate, attnum, key, category, + iptrs, addInfo, addInfoIsNull, nlist, NULL); + vacuum_delay_point(); + } + + /* + * Lock the whole list to remove pages + */ + LockBuffer(metabuffer, RUM_EXCLUSIVE); + LockBuffer(buffer, RUM_SHARE); + + if (RumPageIsDeleted(page)) + { + /* another cleanup process is running concurrently */ + UnlockReleaseBuffer(buffer); + LockBuffer(metabuffer, RUM_UNLOCK); + break; + } + + /* + * While we left the page unlocked, more stuff might have gotten + * added to it. If so, process those entries immediately. There + * shouldn't be very many, so we don't worry about the fact that + * we're doing this with exclusive lock. Insertion algorithm + * guarantees that inserted row(s) will not continue on next page. + * NOTE: intentionally no vacuum_delay_point in this loop. + */ + if (PageGetMaxOffsetNumber(page) != maxoff) + { + rumInitBA(&accum); + processPendingPage(&accum, &datums, page, maxoff + 1); + + rumBeginBAScan(&accum); + while ((list = rumGetBAEntry(&accum, + &attnum, &key, &category, &nlist)) != NULL) + { + ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(ItemPointerData) *nlist); + Datum *addInfo = (Datum *)palloc(sizeof(Datum) * nlist); + bool *addInfoIsNull = (bool *)palloc(sizeof(bool) * nlist); + int i; + + for (i = 0; i < nlist; i++) + { + iptrs[i] = list[i].iptr; + addInfo[i] = list[i].addInfo; + addInfoIsNull[i] = list[i].addInfoIsNull; + } + + rumEntryInsert(rumstate, attnum, key, category, + iptrs, addInfo, addInfoIsNull, nlist, NULL); + } + } + + /* + * Remember next page - it will become the new list head + */ + blkno = RumPageGetOpaque(page)->rightlink; + UnlockReleaseBuffer(buffer); /* shiftList will do exclusive + * locking */ + + /* + * remove read pages from pending list, at this point all + * content of read pages is in regular structure + */ + if (shiftList(index, metabuffer, blkno, stats)) + { + /* another cleanup process is running concurrently */ + LockBuffer(metabuffer, RUM_UNLOCK); + break; + } + + Assert(blkno == metadata->head); + LockBuffer(metabuffer, RUM_UNLOCK); + + /* + * if we removed the whole pending list just exit + */ + if (blkno == InvalidBlockNumber) + break; + + /* + * release memory used so far and reinit state + */ + MemoryContextReset(opCtx); + initKeyArray(&datums, datums.maxvalues); + rumInitBA(&accum); + } + else + { + blkno = RumPageGetOpaque(page)->rightlink; + UnlockReleaseBuffer(buffer); + } + + /* + * Read next page in pending list + */ + vacuum_delay_point(); + buffer = ReadBuffer(index, blkno); + LockBuffer(buffer, RUM_SHARE); + page = BufferGetPage(buffer); + } + + ReleaseBuffer(metabuffer); + + /* Clean up temporary space */ + MemoryContextSwitchTo(oldCtx); + MemoryContextDelete(opCtx); +} diff --git a/rumget.c b/rumget.c new file mode 100644 index 0000000000..bb88485727 --- /dev/null +++ b/rumget.c @@ -0,0 +1,2248 @@ +/*------------------------------------------------------------------------- + * + * rumget.c + * fetch tuples from a RUM scan. + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "rumsort.h" + +#include "access/relscan.h" +#include "miscadmin.h" +#include "utils/datum.h" +#include "utils/memutils.h" + +#include "rum.h" + +/* GUC parameter */ +int RumFuzzySearchLimit = 0; + +typedef struct pendingPosition +{ + Buffer pendingBuffer; + OffsetNumber firstOffset; + OffsetNumber lastOffset; + ItemPointerData item; + bool *hasMatchKey; +} pendingPosition; + +static bool scanPage(RumState *rumstate, RumScanEntry entry, ItemPointer item, + Page page, bool equalOk); +static void insertScanItem(RumScanOpaque so, bool recheck); +static int scan_entry_cmp(const void *p1, const void *p2); +static void entryGetItem(RumState *rumstate, RumScanEntry entry); + + +/* + * Convenience function for invoking a key's consistentFn + */ +static bool +callConsistentFn(RumState *rumstate, RumScanKey key) +{ + /* + * If we're dealing with a dummy EVERYTHING key, we don't want to call the + * consistentFn; just claim it matches. + */ + if (key->searchMode == GIN_SEARCH_MODE_EVERYTHING) + { + key->recheckCurItem = false; + return true; + } + + /* + * Initialize recheckCurItem in case the consistentFn doesn't know it + * should set it. The safe assumption in that case is to force recheck. + */ + key->recheckCurItem = true; + + return DatumGetBool(FunctionCall10Coll(&rumstate->consistentFn[key->attnum - 1], + rumstate->supportCollation[key->attnum - 1], + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), + PointerGetDatum(&key->recheckCurItem), + PointerGetDatum(key->queryValues), + PointerGetDatum(key->queryCategories), + PointerGetDatum(key->addInfo), + PointerGetDatum(key->addInfoIsNull) + )); +} + +/* + * Tries to refind previously taken ItemPointer on a posting page. + */ +static bool +findItemInPostingPage(Page page, ItemPointer item, OffsetNumber *off, + OffsetNumber attnum, RumState *rumstate) +{ + OffsetNumber maxoff = RumPageGetOpaque(page)->maxoff; + int res; + Pointer ptr; + ItemPointerData iptr = {{0, 0}, 0}; + + if (RumPageGetOpaque(page)->flags & RUM_DELETED) + /* page was deleted by concurrent vacuum */ + return false; + + ptr = RumDataPageGetData(page); + /* + * scan page to find equal or first greater value + */ + for (*off = FirstOffsetNumber; *off <= maxoff; (*off)++) + { + ptr = rumDataPageLeafRead(ptr, attnum, &iptr, NULL, NULL, rumstate); + + res = rumCompareItemPointers(item, &iptr); + if (res <= 0) + return true; + } + + return false; +} + +/* + * Goes to the next page if current offset is outside of bounds + */ +static bool +moveRightIfItNeeded(RumBtreeData *btree, RumBtreeStack *stack) +{ + Page page = BufferGetPage(stack->buffer); + + if (stack->off > PageGetMaxOffsetNumber(page)) + { + /* + * We scanned the whole page, so we should take right page + */ + if (RumPageRightMost(page)) + return false; /* no more pages */ + + stack->buffer = rumStepRight(stack->buffer, btree->index, RUM_SHARE); + stack->blkno = BufferGetBlockNumber(stack->buffer); + stack->off = FirstOffsetNumber; + } + + return true; +} + +/* + * Scan all pages of a posting tree and save all its heap ItemPointers + * in scanEntry->matchBitmap + */ +static void +scanPostingTree(Relation index, RumScanEntry scanEntry, + BlockNumber rootPostingTree, OffsetNumber attnum, RumState *rumstate) +{ + RumPostingTreeScan *gdi; + Buffer buffer; + Page page; + + /* Descend to the leftmost leaf page */ + gdi = rumPrepareScanPostingTree(index, rootPostingTree, TRUE, attnum, rumstate); + + buffer = rumScanBeginPostingTree(gdi); + IncrBufferRefCount(buffer); /* prevent unpin in freeRumBtreeStack */ + + freeRumBtreeStack(gdi->stack); + pfree(gdi); + + /* + * Loop iterates through all leaf pages of posting tree + */ + for (;;) + { + OffsetNumber maxoff, i; + + page = BufferGetPage(buffer); + maxoff = RumPageGetOpaque(page)->maxoff; + + if ((RumPageGetOpaque(page)->flags & RUM_DELETED) == 0 && + maxoff >= FirstOffsetNumber) + { + ItemPointerData iptr = {{0, 0}, 0}; + Pointer ptr; + + ptr = RumDataPageGetData(page); + for (i = FirstOffsetNumber; i <= maxoff; i++) + { + ptr = rumDataPageLeafRead(ptr, attnum, &iptr, NULL, NULL, + rumstate); + tbm_add_tuples(scanEntry->matchBitmap, &iptr, 1, false); + } + + scanEntry->predictNumberResult += maxoff; + } + + if (RumPageRightMost(page)) + break; /* no more pages */ + + buffer = rumStepRight(buffer, index, RUM_SHARE); + } + + UnlockReleaseBuffer(buffer); +} + +/* + * Collects TIDs into scanEntry->matchBitmap for all heap tuples that + * match the search entry. This supports three different match modes: + * + * 1. Partial-match support: scan from current point until the + * comparePartialFn says we're done. + * 2. SEARCH_MODE_ALL: scan from current point (which should be first + * key for the current attnum) until we hit null items or end of attnum + * 3. SEARCH_MODE_EVERYTHING: scan from current point (which should be first + * key for the current attnum) until we hit end of attnum + * + * Returns true if done, false if it's necessary to restart scan from scratch + */ +static bool +collectMatchBitmap(RumBtreeData *btree, RumBtreeStack *stack, + RumScanEntry scanEntry) +{ + OffsetNumber attnum; + Form_pg_attribute attr; + + /* Initialize empty bitmap result */ + scanEntry->matchBitmap = tbm_create(work_mem * 1024L); + + /* Null query cannot partial-match anything */ + if (scanEntry->isPartialMatch && + scanEntry->queryCategory != RUM_CAT_NORM_KEY) + return true; + + /* Locate tupdesc entry for key column (for attbyval/attlen data) */ + attnum = scanEntry->attnum; + attr = btree->rumstate->origTupdesc->attrs[attnum - 1]; + + for (;;) + { + Page page; + IndexTuple itup; + Datum idatum; + RumNullCategory icategory; + + /* + * stack->off points to the interested entry, buffer is already locked + */ + if (moveRightIfItNeeded(btree, stack) == false) + return true; + + page = BufferGetPage(stack->buffer); + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stack->off)); + + /* + * If tuple stores another attribute then stop scan + */ + if (rumtuple_get_attrnum(btree->rumstate, itup) != attnum) + return true; + + /* Safe to fetch attribute value */ + idatum = rumtuple_get_key(btree->rumstate, itup, &icategory); + + /* + * Check for appropriate scan stop conditions + */ + if (scanEntry->isPartialMatch) + { + int32 cmp; + + /* + * In partial match, stop scan at any null (including + * placeholders); partial matches never match nulls + */ + if (icategory != RUM_CAT_NORM_KEY) + return true; + + /*---------- + * Check of partial match. + * case cmp == 0 => match + * case cmp > 0 => not match and finish scan + * case cmp < 0 => not match and continue scan + *---------- + */ + cmp = DatumGetInt32(FunctionCall4Coll(&btree->rumstate->comparePartialFn[attnum - 1], + btree->rumstate->supportCollation[attnum - 1], + scanEntry->queryKey, + idatum, + UInt16GetDatum(scanEntry->strategy), + PointerGetDatum(scanEntry->extra_data))); + + if (cmp > 0) + return true; + else if (cmp < 0) + { + stack->off++; + continue; + } + } + else if (scanEntry->searchMode == GIN_SEARCH_MODE_ALL) + { + /* + * In ALL mode, we are not interested in null items, so we can + * stop if we get to a null-item placeholder (which will be the + * last entry for a given attnum). We do want to include NULL_KEY + * and EMPTY_ITEM entries, though. + */ + if (icategory == RUM_CAT_NULL_ITEM) + return true; + } + + /* + * OK, we want to return the TIDs listed in this entry. + */ + if (RumIsPostingTree(itup)) + { + BlockNumber rootPostingTree = RumGetPostingTree(itup); + + /* + * We should unlock current page (but not unpin) during tree scan + * to prevent deadlock with vacuum processes. + * + * We save current entry value (idatum) to be able to re-find our + * tuple after re-locking + */ + if (icategory == RUM_CAT_NORM_KEY) + idatum = datumCopy(idatum, attr->attbyval, attr->attlen); + + LockBuffer(stack->buffer, RUM_UNLOCK); + + /* Collect all the TIDs in this entry's posting tree */ + scanPostingTree(btree->index, scanEntry, rootPostingTree, attnum, btree->rumstate); + + /* + * We lock again the entry page and while it was unlocked insert + * might have occurred, so we need to re-find our position. + */ + LockBuffer(stack->buffer, RUM_SHARE); + page = BufferGetPage(stack->buffer); + if (!RumPageIsLeaf(page)) + { + /* + * Root page becomes non-leaf while we unlock it. We will + * start again, this situation doesn't occur often - root can + * became a non-leaf only once per life of index. + */ + return false; + } + + /* Search forward to re-find idatum */ + for (;;) + { + Datum newDatum; + RumNullCategory newCategory; + + if (moveRightIfItNeeded(btree, stack) == false) + elog(ERROR, "lost saved point in index"); /* must not happen !!! */ + + page = BufferGetPage(stack->buffer); + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stack->off)); + + if (rumtuple_get_attrnum(btree->rumstate, itup) != attnum) + elog(ERROR, "lost saved point in index"); /* must not happen !!! */ + newDatum = rumtuple_get_key(btree->rumstate, itup, + &newCategory); + + if (rumCompareEntries(btree->rumstate, attnum, + newDatum, newCategory, + idatum, icategory) == 0) + break; /* Found! */ + + stack->off++; + } + + if (icategory == RUM_CAT_NORM_KEY && !attr->attbyval) + pfree(DatumGetPointer(idatum)); + } + else + { + ItemPointerData *ipd = (ItemPointerData *)palloc( + sizeof(ItemPointerData) * RumGetNPosting(itup)); + rumReadTuple(btree->rumstate, scanEntry->attnum, + itup, ipd, NULL, NULL); + + tbm_add_tuples(scanEntry->matchBitmap, + ipd, RumGetNPosting(itup), false); + scanEntry->predictNumberResult += RumGetNPosting(itup); + } + + /* + * Done with this entry, go to the next + */ + stack->off++; + } +} + +/* + * Start* functions setup beginning state of searches: finds correct buffer and pins it. + */ +static void +startScanEntry(RumState *rumstate, RumScanEntry entry) +{ + RumBtreeData btreeEntry; + RumBtreeStack *stackEntry; + Page page; + bool needUnlock; + +restartScanEntry: + entry->buffer = InvalidBuffer; + ItemPointerSetMin(&entry->curItem); + entry->curAddInfo = (Datum) 0; + entry->curAddInfoIsNull = true; + entry->offset = InvalidOffsetNumber; + entry->list = NULL; + entry->gdi = NULL; + entry->nlist = 0; + entry->matchBitmap = NULL; + entry->matchResult = NULL; + entry->reduceResult = FALSE; + entry->predictNumberResult = 0; + + /* + * we should find entry, and begin scan of posting tree or just store + * posting list in memory + */ + rumPrepareEntryScan(&btreeEntry, entry->attnum, + entry->queryKey, entry->queryCategory, + rumstate); + btreeEntry.searchMode = TRUE; + stackEntry = rumFindLeafPage(&btreeEntry, NULL); + page = BufferGetPage(stackEntry->buffer); + needUnlock = TRUE; + + entry->isFinished = TRUE; + + if (entry->isPartialMatch || + entry->queryCategory == RUM_CAT_EMPTY_QUERY) + { + /* + * btreeEntry.findItem locates the first item >= given search key. + * (For RUM_CAT_EMPTY_QUERY, it will find the leftmost index item + * because of the way the RUM_CAT_EMPTY_QUERY category code is + * assigned.) We scan forward from there and collect all TIDs needed + * for the entry type. + */ + btreeEntry.findItem(&btreeEntry, stackEntry); + if (collectMatchBitmap(&btreeEntry, stackEntry, entry) == false) + { + /* + * RUM tree was seriously restructured, so we will cleanup all + * found data and rescan. See comments near 'return false' in + * collectMatchBitmap() + */ + if (entry->matchBitmap) + { + if (entry->matchIterator) + tbm_end_iterate(entry->matchIterator); + entry->matchIterator = NULL; + tbm_free(entry->matchBitmap); + entry->matchBitmap = NULL; + } + LockBuffer(stackEntry->buffer, RUM_UNLOCK); + freeRumBtreeStack(stackEntry); + goto restartScanEntry; + } + + if (entry->matchBitmap && !tbm_is_empty(entry->matchBitmap)) + { + entry->matchIterator = tbm_begin_iterate(entry->matchBitmap); + entry->isFinished = FALSE; + } + } + else if (btreeEntry.findItem(&btreeEntry, stackEntry)) + { + IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off)); + + if (RumIsPostingTree(itup)) + { + BlockNumber rootPostingTree = RumGetPostingTree(itup); + RumPostingTreeScan *gdi; + Page page; + OffsetNumber maxoff, i; + Pointer ptr; + ItemPointerData iptr = {{0,0},0}; + + /* + * We should unlock entry page before touching posting tree to + * prevent deadlocks with vacuum processes. Because entry is never + * deleted from page and posting tree is never reduced to the + * posting list, we can unlock page after getting BlockNumber of + * root of posting tree. + */ + LockBuffer(stackEntry->buffer, RUM_UNLOCK); + needUnlock = FALSE; + gdi = rumPrepareScanPostingTree(rumstate->index, rootPostingTree, TRUE, entry->attnum, rumstate); + + entry->buffer = rumScanBeginPostingTree(gdi); + entry->gdi = gdi; + entry->context = AllocSetContextCreate(CurrentMemoryContext, + "GiST temporary context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + /* + * We keep buffer pinned because we need to prevent deletion of + * page during scan. See RUM's vacuum implementation. RefCount is + * increased to keep buffer pinned after freeRumBtreeStack() call. + */ + page = BufferGetPage(entry->buffer); + entry->predictNumberResult = gdi->stack->predictNumber * RumPageGetOpaque(page)->maxoff; + + /* + * Keep page content in memory to prevent durable page locking + */ + entry->list = (ItemPointerData *) palloc(BLCKSZ * sizeof(ItemPointerData)); + entry->addInfo = (Datum *) palloc(BLCKSZ * sizeof(Datum)); + entry->addInfoIsNull = (bool *) palloc(BLCKSZ * sizeof(bool)); + maxoff = RumPageGetOpaque(page)->maxoff; + entry->nlist = maxoff; + + ptr = RumDataPageGetData(page); + + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) + { + ptr = rumDataPageLeafRead(ptr, entry->attnum, &iptr, + &entry->addInfo[i - FirstOffsetNumber], + &entry->addInfoIsNull[i - FirstOffsetNumber], rumstate); + entry->list[i - FirstOffsetNumber] = iptr; + } + + LockBuffer(entry->buffer, RUM_UNLOCK); + entry->isFinished = FALSE; + } + else if (RumGetNPosting(itup) > 0) + { + entry->nlist = RumGetNPosting(itup); + entry->predictNumberResult = entry->nlist; + entry->list = (ItemPointerData *) palloc(sizeof(ItemPointerData) * entry->nlist); + entry->addInfo = (Datum *) palloc(sizeof(Datum) * entry->nlist); + entry->addInfoIsNull = (bool *) palloc(sizeof(bool) * entry->nlist); + + rumReadTuple(rumstate, entry->attnum, itup, + entry->list, entry->addInfo, entry->addInfoIsNull); + + entry->isFinished = FALSE; + } + } + + if (needUnlock) + LockBuffer(stackEntry->buffer, RUM_UNLOCK); + freeRumBtreeStack(stackEntry); +} + +static void +startScanKey(RumState *rumstate, RumScanKey key) +{ + ItemPointerSetMin(&key->curItem); + key->curItemMatches = false; + key->recheckCurItem = false; + key->isFinished = false; +} + +static int +cmpEntries(RumScanEntry e1, RumScanEntry e2) +{ + if (e1->isFinished == TRUE) + { + if (e2->isFinished == TRUE) + return 0; + else + return 1; + } + if (e2->isFinished) + return -1; + return rumCompareItemPointers(&e1->curItem, &e2->curItem); +} + +static int +scan_entry_cmp(const void *p1, const void *p2) +{ + RumScanEntry e1 = *((RumScanEntry *)p1); + RumScanEntry e2 = *((RumScanEntry *)p2); + + return -cmpEntries(e1, e2); +} + +static void +startScan(IndexScanDesc scan) +{ + RumScanOpaque so = (RumScanOpaque) scan->opaque; + RumState *rumstate = &so->rumstate; + uint32 i; + bool useFastScan = false; + + for (i = 0; i < so->totalentries; i++) + { + startScanEntry(rumstate, so->entries[i]); + } + + if (RumFuzzySearchLimit > 0) + { + /* + * If all of keys more than threshold we will try to reduce result, we + * hope (and only hope, for intersection operation of array our + * supposition isn't true), that total result will not more than + * minimal predictNumberResult. + */ + bool reduce = true; + + for (i = 0; i < so->totalentries; i++) + { + if (so->entries[i]->predictNumberResult <= so->totalentries * RumFuzzySearchLimit) + { + reduce = false; + break; + } + } + if (reduce) + { + for (i = 0; i < so->totalentries; i++) + { + so->entries[i]->predictNumberResult /= so->totalentries; + so->entries[i]->reduceResult = TRUE; + } + } + } + + for (i = 0; i < so->nkeys; i++) + startScanKey(rumstate, so->keys + i); + + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = &so->keys[i]; + + if (so->rumstate.canPreConsistent[key->attnum - 1]) + { + useFastScan = true; + break; + } + } + + if (useFastScan) + { + for (i = 0; i < so->totalentries; i++) + { + RumScanEntry entry = so->entries[i]; + if (entry->isPartialMatch) + { + useFastScan = false; + break; + } + } + } + + ItemPointerSetMin(&so->iptr); + + if (useFastScan) + { + so->sortedEntries = (RumScanEntry *)palloc(sizeof(RumScanEntry) * + so->totalentries); + memcpy(so->sortedEntries, so->entries, sizeof(RumScanEntry) * + so->totalentries); + for (i = 0; i < so->totalentries; i++) + { + if (!so->sortedEntries[i]->isFinished) + entryGetItem(&so->rumstate, so->sortedEntries[i]); + } + qsort(so->sortedEntries, so->totalentries, sizeof(RumScanEntry), + scan_entry_cmp); + } + + so->useFastScan = useFastScan; +} + +/* + * Gets next ItemPointer from PostingTree. Note, that we copy + * page into RumScanEntry->list array and unlock page, but keep it pinned + * to prevent interference with vacuum + */ +static void +entryGetNextItem(RumState *rumstate, RumScanEntry entry) +{ + Page page; + + for (;;) + { + if (entry->offset < entry->nlist) + { + entry->curItem = entry->list[entry->offset]; + entry->curAddInfo = entry->addInfo[entry->offset]; + entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset]; + entry->offset++; + return; + } + + LockBuffer(entry->buffer, RUM_SHARE); + page = BufferGetPage(entry->buffer); + + if (scanPage(rumstate, entry, &entry->curItem, + BufferGetPage(entry->buffer), + false)) + { + LockBuffer(entry->buffer, RUM_UNLOCK); + return; + } + + for (;;) + { + /* + * It's needed to go by right link. During that we should refind + * first ItemPointer greater that stored + */ + if (RumPageRightMost(page)) + { + UnlockReleaseBuffer(entry->buffer); + ItemPointerSetInvalid(&entry->curItem); + + entry->buffer = InvalidBuffer; + entry->isFinished = TRUE; + entry->gdi->stack->buffer = InvalidBuffer; + return; + } + + entry->buffer = rumStepRight(entry->buffer, + rumstate->index, + RUM_SHARE); + entry->gdi->stack->buffer = entry->buffer; + entry->gdi->stack->blkno = BufferGetBlockNumber(entry->buffer); + page = BufferGetPage(entry->buffer); + + entry->offset = InvalidOffsetNumber; + if (!ItemPointerIsValid(&entry->curItem) || + findItemInPostingPage(page, &entry->curItem, &entry->offset, + entry->attnum, rumstate)) + { + OffsetNumber maxoff, i; + Pointer ptr; + ItemPointerData iptr = {{0,0},0}; + + /* + * Found position equal to or greater than stored + */ + maxoff = RumPageGetOpaque(page)->maxoff; + entry->nlist = maxoff; + + ptr = RumDataPageGetData(page); + + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) + { + ptr = rumDataPageLeafRead(ptr, entry->attnum, &iptr, + &entry->addInfo[i - FirstOffsetNumber], &entry->addInfoIsNull[i - FirstOffsetNumber], rumstate); + entry->list[i - FirstOffsetNumber] = iptr; + } + + LockBuffer(entry->buffer, RUM_UNLOCK); + + if (!ItemPointerIsValid(&entry->curItem) || + rumCompareItemPointers(&entry->curItem, + entry->list + entry->offset - 1) == 0) + { + /* + * First pages are deleted or empty, or we found exact + * position, so break inner loop and continue outer one. + */ + break; + } + + /* + * Find greater than entry->curItem position, store it. + */ + entry->curItem = entry->list[entry->offset - 1]; + entry->curAddInfo = entry->addInfo[entry->offset - 1]; + entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset - 1]; + + return; + } + } + } +} + +#define rum_rand() (((double) random()) / ((double) MAX_RANDOM_VALUE)) +#define dropItem(e) ( rum_rand() > ((double)RumFuzzySearchLimit)/((double)((e)->predictNumberResult)) ) + +/* + * Sets entry->curItem to next heap item pointer for one entry of one scan key, + * or sets entry->isFinished to TRUE if there are no more. + * + * Item pointers must be returned in ascending order. + * + * Note: this can return a "lossy page" item pointer, indicating that the + * entry potentially matches all items on that heap page. However, it is + * not allowed to return both a lossy page pointer and exact (regular) + * item pointers for the same page. (Doing so would break the key-combination + * logic in keyGetItem and scanGetItem; see comment in scanGetItem.) In the + * current implementation this is guaranteed by the behavior of tidbitmaps. + */ +static void +entryGetItem(RumState *rumstate, RumScanEntry entry) +{ + Assert(!entry->isFinished); + + if (entry->matchBitmap) + { + do + { + if (entry->matchResult == NULL || + entry->offset >= entry->matchResult->ntuples) + { + entry->matchResult = tbm_iterate(entry->matchIterator); + + if (entry->matchResult == NULL) + { + ItemPointerSetInvalid(&entry->curItem); + tbm_end_iterate(entry->matchIterator); + entry->matchIterator = NULL; + entry->isFinished = TRUE; + break; + } + + /* + * Reset counter to the beginning of entry->matchResult. Note: + * entry->offset is still greater than matchResult->ntuples if + * matchResult is lossy. So, on next call we will get next + * result from TIDBitmap. + */ + entry->offset = 0; + } + + if (entry->matchResult->ntuples < 0) + { + /* + * lossy result, so we need to check the whole page + */ + ItemPointerSetLossyPage(&entry->curItem, + entry->matchResult->blockno); + + /* + * We might as well fall out of the loop; we could not + * estimate number of results on this page to support correct + * reducing of result even if it's enabled + */ + break; + } + + ItemPointerSet(&entry->curItem, + entry->matchResult->blockno, + entry->matchResult->offsets[entry->offset]); + entry->offset++; + } while (entry->reduceResult == TRUE && dropItem(entry)); + } + else if (!BufferIsValid(entry->buffer)) + { + entry->offset++; + if (entry->offset <= entry->nlist) + { + entry->curItem = entry->list[entry->offset - 1]; + entry->curAddInfo = entry->addInfo[entry->offset - 1]; + entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset - 1]; + } + else + { + ItemPointerSetInvalid(&entry->curItem); + entry->isFinished = TRUE; + } + } + else + { + do + { + entryGetNextItem(rumstate, entry); + } while (entry->isFinished == FALSE && + entry->reduceResult == TRUE && + dropItem(entry)); + } +} + +/* + * Identify the "current" item among the input entry streams for this scan key, + * and test whether it passes the scan key qual condition. + * + * The current item is the smallest curItem among the inputs. key->curItem + * is set to that value. key->curItemMatches is set to indicate whether that + * TID passes the consistentFn test. If so, key->recheckCurItem is set true + * iff recheck is needed for this item pointer (including the case where the + * item pointer is a lossy page pointer). + * + * If all entry streams are exhausted, sets key->isFinished to TRUE. + * + * Item pointers must be returned in ascending order. + * + * Note: this can return a "lossy page" item pointer, indicating that the + * key potentially matches all items on that heap page. However, it is + * not allowed to return both a lossy page pointer and exact (regular) + * item pointers for the same page. (Doing so would break the key-combination + * logic in scanGetItem.) + */ +static void +keyGetItem(RumState *rumstate, MemoryContext tempCtx, RumScanKey key) +{ + ItemPointerData minItem; + ItemPointerData curPageLossy; + uint32 i; + uint32 lossyEntry; + bool haveLossyEntry; + RumScanEntry entry; + bool res; + MemoryContext oldCtx; + + Assert(!key->isFinished); + + /* + * Find the minimum of the active entry curItems. + * + * Note: a lossy-page entry is encoded by a ItemPointer with max value for + * offset (0xffff), so that it will sort after any exact entries for the + * same page. So we'll prefer to return exact pointers not lossy + * pointers, which is good. + */ + ItemPointerSetMax(&minItem); + + for (i = 0; i < key->nentries; i++) + { + entry = key->scanEntry[i]; + if (entry->isFinished == FALSE && + rumCompareItemPointers(&entry->curItem, &minItem) < 0) + minItem = entry->curItem; + } + + if (ItemPointerIsMax(&minItem)) + { + /* all entries are finished */ + key->isFinished = TRUE; + return; + } + + /* + * We might have already tested this item; if so, no need to repeat work. + * (Note: the ">" case can happen, if minItem is exact but we previously + * had to set curItem to a lossy-page pointer.) + */ + if (rumCompareItemPointers(&key->curItem, &minItem) >= 0) + return; + + /* + * OK, advance key->curItem and perform consistentFn test. + */ + key->curItem = minItem; + + /* + * Lossy-page entries pose a problem, since we don't know the correct + * entryRes state to pass to the consistentFn, and we also don't know what + * its combining logic will be (could be AND, OR, or even NOT). If the + * logic is OR then the consistentFn might succeed for all items in the + * lossy page even when none of the other entries match. + * + * If we have a single lossy-page entry then we check to see if the + * consistentFn will succeed with only that entry TRUE. If so, we return + * a lossy-page pointer to indicate that the whole heap page must be + * checked. (On subsequent calls, we'll do nothing until minItem is past + * the page altogether, thus ensuring that we never return both regular + * and lossy pointers for the same page.) + * + * This idea could be generalized to more than one lossy-page entry, but + * ideally lossy-page entries should be infrequent so it would seldom be + * the case that we have more than one at once. So it doesn't seem worth + * the extra complexity to optimize that case. If we do find more than + * one, we just punt and return a lossy-page pointer always. + * + * Note that only lossy-page entries pointing to the current item's page + * should trigger this processing; we might have future lossy pages in the + * entry array, but they aren't relevant yet. + */ + ItemPointerSetLossyPage(&curPageLossy, + RumItemPointerGetBlockNumber(&key->curItem)); + + lossyEntry = 0; + haveLossyEntry = false; + for (i = 0; i < key->nentries; i++) + { + entry = key->scanEntry[i]; + if (entry->isFinished == FALSE && + rumCompareItemPointers(&entry->curItem, &curPageLossy) == 0) + { + if (haveLossyEntry) + { + /* Multiple lossy entries, punt */ + key->curItem = curPageLossy; + key->curItemMatches = true; + key->recheckCurItem = true; + return; + } + lossyEntry = i; + haveLossyEntry = true; + } + } + + /* prepare for calling consistentFn in temp context */ + oldCtx = MemoryContextSwitchTo(tempCtx); + + if (haveLossyEntry) + { + /* Single lossy-page entry, so see if whole page matches */ + for (i = 0; i < key->nentries; i++) + { + key->addInfo[i] = (Datum) 0; + key->addInfoIsNull[i] = true; + } + memset(key->entryRes, FALSE, key->nentries); + key->entryRes[lossyEntry] = TRUE; + + if (callConsistentFn(rumstate, key)) + { + /* Yes, so clean up ... */ + MemoryContextSwitchTo(oldCtx); + MemoryContextReset(tempCtx); + + /* and return lossy pointer for whole page */ + key->curItem = curPageLossy; + key->curItemMatches = true; + key->recheckCurItem = true; + return; + } + } + + /* + * At this point we know that we don't need to return a lossy whole-page + * pointer, but we might have matches for individual exact item pointers, + * possibly in combination with a lossy pointer. Our strategy if there's + * a lossy pointer is to try the consistentFn both ways and return a hit + * if it accepts either one (forcing the hit to be marked lossy so it will + * be rechecked). An exception is that we don't need to try it both ways + * if the lossy pointer is in a "hidden" entry, because the consistentFn's + * result can't depend on that. + * + * Prepare entryRes array to be passed to consistentFn. + */ + for (i = 0; i < key->nentries; i++) + { + entry = key->scanEntry[i]; + if (entry->isFinished == FALSE && + rumCompareItemPointers(&entry->curItem, &key->curItem) == 0) + { + key->entryRes[i] = TRUE; + key->addInfo[i] = entry->curAddInfo; + key->addInfoIsNull[i] = entry->curAddInfoIsNull; + } + else + { + key->entryRes[i] = FALSE; + key->addInfo[i] = (Datum) 0; + key->addInfoIsNull[i] = true; + } + } + if (haveLossyEntry) + { + key->entryRes[lossyEntry] = TRUE; + key->addInfo[lossyEntry] = (Datum) 0; + key->addInfoIsNull[lossyEntry] = true; + } + + res = callConsistentFn(rumstate, key); + + if (!res && haveLossyEntry && lossyEntry < key->nuserentries) + { + /* try the other way for the lossy item */ + key->entryRes[lossyEntry] = FALSE; + key->addInfo[lossyEntry] = (Datum) 0; + key->addInfoIsNull[lossyEntry] = true; + + res = callConsistentFn(rumstate, key); + } + + key->curItemMatches = res; + /* If we matched a lossy entry, force recheckCurItem = true */ + if (haveLossyEntry) + key->recheckCurItem = true; + + /* clean up after consistentFn calls */ + MemoryContextSwitchTo(oldCtx); + MemoryContextReset(tempCtx); +} + +/* + * Get next heap item pointer (after advancePast) from scan. + * Returns true if anything found. + * On success, *item and *recheck are set. + * + * Note: this is very nearly the same logic as in keyGetItem(), except + * that we know the keys are to be combined with AND logic, whereas in + * keyGetItem() the combination logic is known only to the consistentFn. + */ +static bool +scanGetItemRegular(IndexScanDesc scan, ItemPointer advancePast, + ItemPointerData *item, bool *recheck) +{ + RumScanOpaque so = (RumScanOpaque) scan->opaque; + RumState *rumstate = &so->rumstate; + ItemPointerData myAdvancePast = *advancePast; + uint32 i; + bool allFinished; + bool match; + + for (;;) + { + /* + * Advance any entries that are <= myAdvancePast. In particular, + * since entry->curItem was initialized with ItemPointerSetMin, this + * ensures we fetch the first item for each entry on the first call. + */ + allFinished = TRUE; + + for (i = 0; i < so->totalentries; i++) + { + RumScanEntry entry = so->entries[i]; + + while (entry->isFinished == FALSE && + rumCompareItemPointers(&entry->curItem, + &myAdvancePast) <= 0) + entryGetItem(rumstate, entry); + + if (entry->isFinished == FALSE) + allFinished = FALSE; + } + + if (allFinished) + { + /* all entries exhausted, so we're done */ + return false; + } + + /* + * Perform the consistentFn test for each scan key. If any key + * reports isFinished, meaning its subset of the entries is exhausted, + * we can stop. Otherwise, set *item to the minimum of the key + * curItems. + */ + ItemPointerSetMax(item); + + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = so->keys + i; + + if (key->orderBy) + continue; + + keyGetItem(&so->rumstate, so->tempCtx, key); + + if (key->isFinished) + return false; /* finished one of keys */ + + if (rumCompareItemPointers(&key->curItem, item) < 0) + *item = key->curItem; + } + + Assert(!ItemPointerIsMax(item)); + + /*---------- + * Now *item contains first ItemPointer after previous result. + * + * The item is a valid hit only if all the keys succeeded for either + * that exact TID, or a lossy reference to the same page. + * + * This logic works only if a keyGetItem stream can never contain both + * exact and lossy pointers for the same page. Else we could have a + * case like + * + * stream 1 stream 2 + * ... ... + * 42/6 42/7 + * 50/1 42/0xffff + * ... ... + * + * We would conclude that 42/6 is not a match and advance stream 1, + * thus never detecting the match to the lossy pointer in stream 2. + * (keyGetItem has a similar problem versus entryGetItem.) + *---------- + */ + match = true; + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = so->keys + i; + + if (key->orderBy) + continue; + + if (key->curItemMatches) + { + if (rumCompareItemPointers(item, &key->curItem) == 0) + continue; + if (ItemPointerIsLossyPage(&key->curItem) && + RumItemPointerGetBlockNumber(&key->curItem) == + RumItemPointerGetBlockNumber(item)) + continue; + } + match = false; + break; + } + + if (match) + break; + + /* + * No hit. Update myAdvancePast to this TID, so that on the next pass + * we'll move to the next possible entry. + */ + myAdvancePast = *item; + } + + /* + * We must return recheck = true if any of the keys are marked recheck. + */ + *recheck = false; + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = so->keys + i; + + if (key->orderBy) + continue; + + if (key->recheckCurItem) + { + *recheck = true; + break; + } + } + + return TRUE; +} + +static bool +scanPage(RumState *rumstate, RumScanEntry entry, ItemPointer item, Page page, bool equalOk) +{ + int j; + ItemPointerData iptr = {{0,0},0}; + Pointer ptr; + OffsetNumber first = FirstOffsetNumber, i, maxoff; + bool found; + int cmp; + + if (!RumPageRightMost(page)) + { + cmp = rumCompareItemPointers(RumDataPageGetRightBound(page), item); + if (cmp < 0 || (cmp <= 0 && !equalOk)) + return false; + } + + ptr = RumDataPageGetData(page); + maxoff = RumPageGetOpaque(page)->maxoff; + for (j = 0; j < RumDataLeafIndexCount; j++) + { + RumDataLeafItemIndex *index = &RumPageGetIndexes(page)[j]; + if (index->offsetNumer == InvalidOffsetNumber) + break; + + cmp = rumCompareItemPointers(&index->iptr, item); + if (cmp < 0 || (cmp <= 0 && !equalOk)) + { + ptr = RumDataPageGetData(page) + index->pageOffset; + first = index->offsetNumer; + iptr = index->iptr; + } + else + { + maxoff = index->offsetNumer - 1; + break; + } + } + + entry->nlist = maxoff - first + 1; + entry->offset = InvalidOffsetNumber; + found = false; + for (i = first; i <= maxoff; i++) + { + ptr = rumDataPageLeafRead(ptr, entry->attnum, &iptr, + &entry->addInfo[i - first], + &entry->addInfoIsNull[i - first], + rumstate); + entry->list[i - first] = iptr; + cmp = rumCompareItemPointers(item, &iptr); + if ((cmp < 0 || (cmp <= 0 && equalOk))&& entry->offset == InvalidOffsetNumber) + { + found = true; + entry->offset = i - first + 1; + } + } + if (!found) + return false; + + entry->curItem = entry->list[entry->offset - 1]; + entry->curAddInfo = entry->addInfo[entry->offset - 1]; + entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset - 1]; + return true; +} + +static void +entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) +{ + Page page = NULL; + + if (entry->nlist == 0) + { + entry->isFinished = TRUE; + return; + } + + if (rumCompareItemPointers(&entry->list[entry->nlist - 1], item) >= 0) + { + if (rumCompareItemPointers(&entry->curItem, item) >= 0) + return; + while (entry->offset < entry->nlist) + { + if (rumCompareItemPointers(&entry->list[entry->offset], item) >= 0) + { + entry->curItem = entry->list[entry->offset]; + entry->curAddInfo = entry->addInfo[entry->offset]; + entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset]; + entry->offset++; + return; + } + entry->offset++; + } + } + + + if (!BufferIsValid(entry->buffer)) + { + entry->isFinished = TRUE; + return; + } + + LockBuffer(entry->buffer, RUM_SHARE); + + if (scanPage(rumstate, entry, item, + BufferGetPage(entry->buffer), + true)) + { + LockBuffer(entry->buffer, RUM_UNLOCK); + return; + } + + entry->gdi->btree.items = item; + entry->gdi->btree.curitem = 0; + + entry->gdi->stack->buffer = entry->buffer; + entry->gdi->stack = rumReFindLeafPage(&entry->gdi->btree, entry->gdi->stack); + entry->buffer = entry->gdi->stack->buffer; + + page = BufferGetPage(entry->buffer); + + if (scanPage(rumstate, entry, item, + BufferGetPage(entry->buffer), + true)) + { + LockBuffer(entry->buffer, RUM_UNLOCK); + return; + } + + for (;;) + { + /* + * It's needed to go by right link. During that we should refind + * first ItemPointer greater that stored + */ + BlockNumber blkno; + + blkno = RumPageGetOpaque(page)->rightlink; + + LockBuffer(entry->buffer, RUM_UNLOCK); + if (blkno == InvalidBlockNumber) + { + ReleaseBuffer(entry->buffer); + ItemPointerSetInvalid(&entry->curItem); + entry->buffer = InvalidBuffer; + entry->gdi->stack->buffer = InvalidBuffer; + entry->isFinished = TRUE; + return; + } + + entry->buffer = ReleaseAndReadBuffer(entry->buffer, + rumstate->index, + blkno); + entry->gdi->stack->buffer = entry->buffer; + entry->gdi->stack->blkno = blkno; + LockBuffer(entry->buffer, RUM_SHARE); + page = BufferGetPage(entry->buffer); + + if (scanPage(rumstate, entry, item, + BufferGetPage(entry->buffer), + true)) + { + LockBuffer(entry->buffer, RUM_UNLOCK); + return; + } + } +} + +static bool +preConsistentCheck(RumScanOpaque so) +{ + RumState *rumstate = &so->rumstate; + int i, j; + bool recheck; + + for (j = 0; j < so->nkeys; j++) + { + RumScanKey key = &so->keys[j]; + bool hasFalse = false; + + if (key->orderBy) + continue; + + if (!so->rumstate.canPreConsistent[key->attnum - 1]) + continue; + + for (i = 0; i < key->nentries; i++) + { + RumScanEntry entry = key->scanEntry[i]; + key->entryRes[i] = entry->preValue; + if (!entry->preValue) + hasFalse = true; + } + + if (!hasFalse) + continue; + + if (!DatumGetBool(FunctionCall8Coll(&rumstate->preConsistentFn[key->attnum - 1], + rumstate->supportCollation[key->attnum - 1], + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), + PointerGetDatum(&recheck), + PointerGetDatum(key->queryValues), + PointerGetDatum(key->queryCategories) + + ))) + return false; + } + return true; +} + +static void +entryShift(int i, RumScanOpaque so, bool find) +{ + int minIndex = -1, j; + uint32 minPredictNumberResult = 0; + RumState *rumstate = &so->rumstate; + + for (j = i; j < so->totalentries; j++) + { + if (minIndex < 0 || so->sortedEntries[j]->predictNumberResult < minPredictNumberResult) + { + minIndex = j; + minPredictNumberResult = so->sortedEntries[j]->predictNumberResult; + } + } + + if (find) + entryFindItem(rumstate, so->sortedEntries[minIndex], &so->sortedEntries[i - 1]->curItem); + else if (!so->sortedEntries[minIndex]->isFinished) + entryGetItem(rumstate, so->sortedEntries[minIndex]); + + while (minIndex > 0 && + cmpEntries(so->sortedEntries[minIndex], so->sortedEntries[minIndex - 1]) > 0) + { + RumScanEntry tmp; + tmp = so->sortedEntries[minIndex]; + so->sortedEntries[minIndex] = so->sortedEntries[minIndex - 1]; + so->sortedEntries[minIndex - 1] = tmp; + minIndex--; + } +} + +static bool +scanGetItemFast(IndexScanDesc scan, ItemPointer advancePast, + ItemPointerData *item, bool *recheck) +{ + RumScanOpaque so = (RumScanOpaque) scan->opaque; + int i, j, k; + bool preConsistentFalse, consistentFalse; + + if (so->entriesIncrIndex >= 0) + { + for (k = so->entriesIncrIndex; k < so->totalentries; k++) + entryShift(k, so, false); + } + + for (;;) + { + preConsistentFalse = false; + j = 0; + k = 0; + for (i = 0; i < so->totalentries; i++) + so->sortedEntries[i]->preValue = true; + for (i = 1; i < so->totalentries; i++) + { + if (cmpEntries(so->sortedEntries[i], so->sortedEntries[i - 1]) < 0) + { + k = i; + for (; j < i; j++) + so->sortedEntries[j]->preValue = false; + + if (!preConsistentCheck(so)) + { + preConsistentFalse = true; + break; + } + } + } + + if (so->sortedEntries[i - 1]->isFinished == TRUE) + return false; + + if (preConsistentFalse) + { + entryShift(i, so, true); + continue; + } + + consistentFalse = false; + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = so->keys + i; + + if (key->orderBy) + continue; + + for (j = 0; j < key->nentries; j++) + { + RumScanEntry entry = key->scanEntry[j]; + if (entry->isFinished == FALSE && + rumCompareItemPointers(&entry->curItem, + &so->sortedEntries[so->totalentries - 1]->curItem) == 0) + { + key->entryRes[j] = TRUE; + key->addInfo[j] = entry->curAddInfo; + key->addInfoIsNull[j] = entry->curAddInfoIsNull; + } + else + { + key->entryRes[j] = FALSE; + key->addInfo[j] = (Datum) 0; + key->addInfoIsNull[j] = true; + } + } + if (!callConsistentFn(&so->rumstate, key)) + { + consistentFalse = true; + entryShift(k, so, false); + continue; + } + } + + if (consistentFalse) + continue; + + *recheck = false; + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = so->keys + i; + + if (key->orderBy) + continue; + + if (key->recheckCurItem) + { + *recheck = true; + break; + } + } + + *item = so->sortedEntries[so->totalentries - 1]->curItem; + so->entriesIncrIndex = k; + + return true; + } + return false; +} + +static bool +scanGetItem(IndexScanDesc scan, ItemPointer advancePast, + ItemPointerData *item, bool *recheck) +{ + RumScanOpaque so = (RumScanOpaque) scan->opaque; + + if (so->useFastScan) + return scanGetItemFast(scan, advancePast, item, recheck); + else + return scanGetItemRegular(scan, advancePast, item, recheck); +} + + +/* + * Functions for scanning the pending list + */ + + +/* + * Get ItemPointer of next heap row to be checked from pending list. + * Returns false if there are no more. On pages with several heap rows + * it returns each row separately, on page with part of heap row returns + * per page data. pos->firstOffset and pos->lastOffset are set to identify + * the range of pending-list tuples belonging to this heap row. + * + * The pendingBuffer is presumed pinned and share-locked on entry, and is + * pinned and share-locked on success exit. On failure exit it's released. + */ +static bool +scanGetCandidate(IndexScanDesc scan, pendingPosition *pos) +{ + OffsetNumber maxoff; + Page page; + IndexTuple itup; + + ItemPointerSetInvalid(&pos->item); + for (;;) + { + page = BufferGetPage(pos->pendingBuffer); + + maxoff = PageGetMaxOffsetNumber(page); + if (pos->firstOffset > maxoff) + { + BlockNumber blkno = RumPageGetOpaque(page)->rightlink; + + if (blkno == InvalidBlockNumber) + { + UnlockReleaseBuffer(pos->pendingBuffer); + pos->pendingBuffer = InvalidBuffer; + + return false; + } + else + { + /* + * Here we must prevent deletion of next page by insertcleanup + * process, which may be trying to obtain exclusive lock on + * current page. So, we lock next page before releasing the + * current one + */ + Buffer tmpbuf = ReadBuffer(scan->indexRelation, blkno); + + LockBuffer(tmpbuf, RUM_SHARE); + UnlockReleaseBuffer(pos->pendingBuffer); + + pos->pendingBuffer = tmpbuf; + pos->firstOffset = FirstOffsetNumber; + } + } + else + { + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, pos->firstOffset)); + pos->item = itup->t_tid; + if (RumPageHasFullRow(page)) + { + /* + * find itempointer to the next row + */ + for (pos->lastOffset = pos->firstOffset + 1; pos->lastOffset <= maxoff; pos->lastOffset++) + { + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, pos->lastOffset)); + if (!ItemPointerEquals(&pos->item, &itup->t_tid)) + break; + } + } + else + { + /* + * All itempointers are the same on this page + */ + pos->lastOffset = maxoff + 1; + } + + /* + * Now pos->firstOffset points to the first tuple of current heap + * row, pos->lastOffset points to the first tuple of next heap row + * (or to the end of page) + */ + break; + } + } + + return true; +} + +/* + * Scan pending-list page from current tuple (off) up till the first of: + * - match is found (then returns true) + * - no later match is possible + * - tuple's attribute number is not equal to entry's attrnum + * - reach end of page + * + * datum[]/category[]/datumExtracted[] arrays are used to cache the results + * of rumtuple_get_key() on the current page. + */ +static bool +matchPartialInPendingList(RumState *rumstate, Page page, + OffsetNumber off, OffsetNumber maxoff, + RumScanEntry entry, + Datum *datum, RumNullCategory *category, + bool *datumExtracted) +{ + IndexTuple itup; + int32 cmp; + + /* Partial match to a null is not possible */ + if (entry->queryCategory != RUM_CAT_NORM_KEY) + return false; + + while (off < maxoff) + { + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off)); + + if (rumtuple_get_attrnum(rumstate, itup) != entry->attnum) + return false; + + if (datumExtracted[off - 1] == false) + { + datum[off - 1] = rumtuple_get_key(rumstate, itup, + &category[off - 1]); + datumExtracted[off - 1] = true; + } + + /* Once we hit nulls, no further match is possible */ + if (category[off - 1] != RUM_CAT_NORM_KEY) + return false; + + /*---------- + * Check partial match. + * case cmp == 0 => match + * case cmp > 0 => not match and end scan (no later match possible) + * case cmp < 0 => not match and continue scan + *---------- + */ + cmp = DatumGetInt32(FunctionCall4Coll(&rumstate->comparePartialFn[entry->attnum - 1], + rumstate->supportCollation[entry->attnum - 1], + entry->queryKey, + datum[off - 1], + UInt16GetDatum(entry->strategy), + PointerGetDatum(entry->extra_data))); + if (cmp == 0) + return true; + else if (cmp > 0) + return false; + + off++; + } + + return false; +} + +/* + * Set up the entryRes array for each key by looking at + * every entry for current heap row in pending list. + * + * Returns true if each scan key has at least one entryRes match. + * This corresponds to the situations where the normal index search will + * try to apply the key's consistentFn. (A tuple not meeting that requirement + * cannot be returned by the normal search since no entry stream will + * source its TID.) + * + * The pendingBuffer is presumed pinned and share-locked on entry. + */ +static bool +collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) +{ + RumScanOpaque so = (RumScanOpaque) scan->opaque; + OffsetNumber attrnum; + Page page; + IndexTuple itup; + int i, + j; + + /* + * Reset all entryRes and hasMatchKey flags + */ + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = so->keys + i; + + memset(key->entryRes, FALSE, key->nentries); + memset(key->addInfo, FALSE, sizeof(Datum) * key->nentries); + memset(key->addInfoIsNull, TRUE, key->nentries); + } + memset(pos->hasMatchKey, FALSE, so->nkeys); + + /* + * Outer loop iterates over multiple pending-list pages when a single heap + * row has entries spanning those pages. + */ + for (;;) + { + Datum datum[BLCKSZ / sizeof(IndexTupleData)]; + RumNullCategory category[BLCKSZ / sizeof(IndexTupleData)]; + bool datumExtracted[BLCKSZ / sizeof(IndexTupleData)]; + + Assert(pos->lastOffset > pos->firstOffset); + memset(datumExtracted + pos->firstOffset - 1, 0, + sizeof(bool) * (pos->lastOffset - pos->firstOffset)); + + page = BufferGetPage(pos->pendingBuffer); + + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = so->keys + i; + + for (j = 0; j < key->nentries; j++) + { + RumScanEntry entry = key->scanEntry[j]; + OffsetNumber StopLow = pos->firstOffset, + StopHigh = pos->lastOffset, + StopMiddle; + + /* If already matched on earlier page, do no extra work */ + if (key->entryRes[j]) + continue; + + /* + * Interesting tuples are from pos->firstOffset to + * pos->lastOffset and they are ordered by (attnum, Datum) as + * it's done in entry tree. So we can use binary search to + * avoid linear scanning. + */ + while (StopLow < StopHigh) + { + int res; + + StopMiddle = StopLow + ((StopHigh - StopLow) >> 1); + + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, StopMiddle)); + + attrnum = rumtuple_get_attrnum(&so->rumstate, itup); + + if (key->attnum < attrnum) + { + StopHigh = StopMiddle; + continue; + } + if (key->attnum > attrnum) + { + StopLow = StopMiddle + 1; + continue; + } + + if (datumExtracted[StopMiddle - 1] == false) + { + datum[StopMiddle - 1] = + rumtuple_get_key(&so->rumstate, itup, + &category[StopMiddle - 1]); + datumExtracted[StopMiddle - 1] = true; + } + + if (entry->queryCategory == RUM_CAT_EMPTY_QUERY) + { + /* special behavior depending on searchMode */ + if (entry->searchMode == GIN_SEARCH_MODE_ALL) + { + /* match anything except NULL_ITEM */ + if (category[StopMiddle - 1] == RUM_CAT_NULL_ITEM) + res = -1; + else + res = 0; + } + else + { + /* match everything */ + res = 0; + } + } + else + { + res = rumCompareEntries(&so->rumstate, + entry->attnum, + entry->queryKey, + entry->queryCategory, + datum[StopMiddle - 1], + category[StopMiddle - 1]); + } + + if (res == 0) + { + /* + * Found exact match (there can be only one, except in + * EMPTY_QUERY mode). + * + * If doing partial match, scan forward from here to + * end of page to check for matches. + * + * See comment above about tuple's ordering. + */ + if (entry->isPartialMatch) + key->entryRes[j] = + matchPartialInPendingList(&so->rumstate, + page, + StopMiddle, + pos->lastOffset, + entry, + datum, + category, + datumExtracted); + else + { + key->entryRes[j] = true; + if (OidIsValid(so->rumstate.addInfoTypeOid[i])) + key->addInfo[j] = index_getattr(itup, + so->rumstate.oneCol ? 2 : 3, + so->rumstate.tupdesc[attrnum - 1], + &key->addInfoIsNull[j]); + } + + /* done with binary search */ + break; + } + else if (res < 0) + StopHigh = StopMiddle; + else + StopLow = StopMiddle + 1; + } + + if (StopLow >= StopHigh && entry->isPartialMatch) + { + /* + * No exact match on this page. If doing partial match, + * scan from the first tuple greater than target value to + * end of page. Note that since we don't remember whether + * the comparePartialFn told us to stop early on a + * previous page, we will uselessly apply comparePartialFn + * to the first tuple on each subsequent page. + */ + key->entryRes[j] = + matchPartialInPendingList(&so->rumstate, + page, + StopHigh, + pos->lastOffset, + entry, + datum, + category, + datumExtracted); + } + + pos->hasMatchKey[i] |= key->entryRes[j]; + } + } + + /* Advance firstOffset over the scanned tuples */ + pos->firstOffset = pos->lastOffset; + + if (RumPageHasFullRow(page)) + { + /* + * We have examined all pending entries for the current heap row. + * Break out of loop over pages. + */ + break; + } + else + { + /* + * Advance to next page of pending entries for the current heap + * row. Complain if there isn't one. + */ + ItemPointerData item = pos->item; + + if (scanGetCandidate(scan, pos) == false || + !ItemPointerEquals(&pos->item, &item)) + elog(ERROR, "could not find additional pending pages for same heap tuple"); + } + } + + /* + * Now return "true" if all scan keys have at least one matching datum + */ + for (i = 0; i < so->nkeys; i++) + { + if (pos->hasMatchKey[i] == false) + return false; + } + + return true; +} + +/* + * Collect all matched rows from pending list into bitmap + */ +static int64 +scanPendingInsert(IndexScanDesc scan) +{ + RumScanOpaque so = (RumScanOpaque) scan->opaque; + MemoryContext oldCtx; + bool recheck, + match; + int i; + pendingPosition pos; + Buffer metabuffer = ReadBuffer(scan->indexRelation, RUM_METAPAGE_BLKNO); + BlockNumber blkno; + int64 ntids = 0; + TIDBitmap *tbm = so->tbm; + + LockBuffer(metabuffer, RUM_SHARE); + blkno = RumPageGetMeta(BufferGetPage(metabuffer))->head; + + /* + * fetch head of list before unlocking metapage. head page must be pinned + * to prevent deletion by vacuum process + */ + if (blkno == InvalidBlockNumber) + { + /* No pending list, so proceed with normal scan */ + UnlockReleaseBuffer(metabuffer); + return ntids; + } + + pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno); + LockBuffer(pos.pendingBuffer, RUM_SHARE); + pos.firstOffset = FirstOffsetNumber; + UnlockReleaseBuffer(metabuffer); + pos.hasMatchKey = palloc(sizeof(bool) * so->nkeys); + + /* + * loop for each heap row. scanGetCandidate returns full row or row's + * tuples from first page. + */ + while (scanGetCandidate(scan, &pos)) + { + /* + * Check entries in tuple and set up entryRes array. + * + * If pending tuples belonging to the current heap row are spread + * across several pages, collectMatchesForHeapRow will read all of + * those pages. + */ + if (!collectMatchesForHeapRow(scan, &pos)) + continue; + + /* + * Matching of entries of one row is finished, so check row using + * consistent functions. + */ + oldCtx = MemoryContextSwitchTo(so->tempCtx); + recheck = false; + match = true; + + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = so->keys + i; + + if (!callConsistentFn(&so->rumstate, key)) + { + match = false; + break; + } + recheck |= key->recheckCurItem; + } + + MemoryContextSwitchTo(oldCtx); + MemoryContextReset(so->tempCtx); + + if (match) + { + if (tbm) + { + tbm_add_tuples(tbm, &pos.item, 1, recheck); + } + else + { + so->iptr = pos.item; + insertScanItem(so, recheck); + } + ntids++; + } + } + + pfree(pos.hasMatchKey); + return ntids; +} + + +#define RumIsNewKey(s) ( ((RumScanOpaque) scan->opaque)->keys == NULL ) +#define RumIsVoidRes(s) ( ((RumScanOpaque) scan->opaque)->isVoidRes ) + +int64 +rumgetbitmap(IndexScanDesc scan, TIDBitmap *tbm) +{ + RumScanOpaque so = (RumScanOpaque)scan->opaque; + int64 ntids; + bool recheck; + + /* + * Set up the scan keys, and check for unsatisfiable query. + */ + if (RumIsNewKey(scan)) + rumNewScanKey(scan); + + if (RumIsVoidRes(scan)) + return 0; + + ntids = 0; + + /* + * First, scan the pending list and collect any matching entries into the + * bitmap. After we scan a pending item, some other backend could post it + * into the main index, and so we might visit it a second time during the + * main scan. This is okay because we'll just re-set the same bit in the + * bitmap. (The possibility of duplicate visits is a major reason why RUM + * can't support the amgettuple API, however.) Note that it would not do + * to scan the main index before the pending list, since concurrent + * cleanup could then make us miss entries entirely. + */ + so->tbm = tbm; + so->entriesIncrIndex = -1; + ntids = scanPendingInsert(scan); + + /* + * Now scan the main index. + */ + startScan(scan); + + for (;;) + { + CHECK_FOR_INTERRUPTS(); + + if (!scanGetItem(scan, &so->iptr, &so->iptr, &recheck)) + break; + + if (ItemPointerIsLossyPage(&so->iptr)) + tbm_add_page(tbm, ItemPointerGetBlockNumber(&so->iptr)); + else + tbm_add_tuples(tbm, &so->iptr, 1, recheck); + ntids++; + } + + return ntids; +} + +static float8 +keyGetOrdering(RumState *rumstate, MemoryContext tempCtx, RumScanKey key, + ItemPointer iptr) +{ + RumScanEntry entry; + int i; + + for (i = 0; i < key->nentries; i++) + { + entry = key->scanEntry[i]; + if (entry->isFinished == FALSE && + rumCompareItemPointers(&entry->curItem, iptr) == 0) + { + key->addInfo[i] = entry->curAddInfo; + key->addInfoIsNull[i] = entry->curAddInfoIsNull; + key->entryRes[i] = true; + } + else + { + key->addInfo[i] = (Datum) 0; + key->addInfoIsNull[i] = true; + key->entryRes[i] = false; + } + } + + return DatumGetFloat8(FunctionCall10Coll(&rumstate->orderingFn[key->attnum - 1], + rumstate->supportCollation[key->attnum - 1], + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), + PointerGetDatum(&key->recheckCurItem), + PointerGetDatum(key->queryValues), + PointerGetDatum(key->queryCategories), + PointerGetDatum(key->addInfo), + PointerGetDatum(key->addInfoIsNull) + )); +} + +static void +insertScanItem(RumScanOpaque so, bool recheck) +{ + RumSortItem *item; + int i, j = 0; + + item = (RumSortItem *)palloc(RumSortItemSize(so->norderbys)); + item->iptr = so->iptr; + item->recheck = recheck; + for (i = 0; i < so->nkeys; i++) + { + if (!so->keys[i].orderBy) + continue; + item->data[j] = keyGetOrdering(&so->rumstate, so->tempCtx, &so->keys[i], &so->iptr); + j++; + } + rum_tuplesort_putrum(so->sortstate, item); +} + +bool +rumgettuple(IndexScanDesc scan, ScanDirection direction) +{ + bool recheck; + RumScanOpaque so = (RumScanOpaque)scan->opaque; + RumSortItem *item; + bool should_free; + + if (so->firstCall) + { + so->norderbys = scan->numberOfOrderBys; + + /* + * Set up the scan keys, and check for unsatisfiable query. + */ + if (RumIsNewKey(scan)) + rumNewScanKey(scan); + + if (RumIsVoidRes(scan)) + PG_RETURN_INT64(0); + + so->tbm = NULL; + so->entriesIncrIndex = -1; + so->firstCall = false; + so->sortstate = rum_tuplesort_begin_rum(work_mem, so->norderbys, false); + + scanPendingInsert(scan); + + /* + * Now scan the main index. + */ + startScan(scan); + + while (scanGetItem(scan, &so->iptr, &so->iptr, &recheck)) + { + insertScanItem(so, recheck); + } + rum_tuplesort_performsort(so->sortstate); + } + + item = rum_tuplesort_getrum(so->sortstate, true, &should_free); + if (item) + { + int i, j = 0; + + scan->xs_ctup.t_self = item->iptr; + scan->xs_recheck = item->recheck; + scan->xs_recheckorderby = false; + + for (i = 0; i < so->nkeys; i++) + { + if (!so->keys[i].orderBy) + continue; + scan->xs_orderbyvals[j] = Float8GetDatum(item->data[j]); + scan->xs_orderbynulls[j] = false; + + j++; + } + + if (should_free) + pfree(item); + PG_RETURN_BOOL(true); + } + else + { + PG_RETURN_BOOL(false); + } +} diff --git a/ruminsert.c b/ruminsert.c new file mode 100644 index 0000000000..4334dc2cf8 --- /dev/null +++ b/ruminsert.c @@ -0,0 +1,798 @@ +/*------------------------------------------------------------------------- + * + * ruminsert.c + * insert routines for the postgres inverted index access method. + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/generic_xlog.h" +#include "catalog/index.h" +#include "miscadmin.h" +#include "utils/memutils.h" +#include "utils/datum.h" + +#include "rum.h" + +typedef struct +{ + RumState rumstate; + double indtuples; + GinStatsData buildStats; + MemoryContext tmpCtx; + MemoryContext funcCtx; + BuildAccumulator accum; +} RumBuildState; + +/* + * Creates new posting tree with one page, containing the given TIDs. + * Returns the page number (which will be the root of this posting tree). + * + * items[] must be in sorted order with no duplicates. + */ +static BlockNumber +createPostingTree(RumState *rumstate, OffsetNumber attnum, Relation index, + ItemPointerData *items, Datum *addInfo, bool *addInfoIsNull, uint32 nitems) +{ + BlockNumber blkno; + Buffer buffer = RumNewBuffer(index); + Page page; + int i, + freespace; + Pointer ptr; + ItemPointerData prev_iptr = {{0,0},0}; + GenericXLogState *state; + + state = GenericXLogStart(index); + + page = GenericXLogRegisterBuffer(state, buffer, GENERIC_XLOG_FULL_IMAGE); + RumInitPage(page, RUM_DATA | RUM_LEAF, BufferGetPageSize(buffer)); + + blkno = BufferGetBlockNumber(buffer); + + RumPageGetOpaque(page)->maxoff = nitems; + ptr = RumDataPageGetData(page); + for (i = 0; i < nitems; i++) + { + if (i > 0) + prev_iptr = items[i - 1]; + ptr = rumPlaceToDataPageLeaf(ptr, attnum, &items[i], addInfo[i], + addInfoIsNull[i], &prev_iptr, rumstate); + } + freespace = RumDataPageFreeSpacePre(page, ptr); + Assert(freespace >= 0); + updateItemIndexes(page, attnum, rumstate); + + GenericXLogFinish(state); + + UnlockReleaseBuffer(buffer); + + return blkno; +} + +/* + * Form a tuple for entry tree. + * + * If the tuple would be too big to be stored, function throws a suitable + * error if errorTooBig is TRUE, or returns NULL if errorTooBig is FALSE. + * + * See src/backend/access/gin/README for a description of the index tuple + * format that is being built here. We build on the assumption that we + * are making a leaf-level key entry containing a posting list of nipd items. + * If the caller is actually trying to make a posting-tree entry, non-leaf + * entry, or pending-list entry, it should pass nipd = 0 and then overwrite + * the t_tid fields as necessary. In any case, ipd can be NULL to skip + * copying any itempointers into the posting list; the caller is responsible + * for filling the posting list afterwards, if ipd = NULL and nipd > 0. + */ +static IndexTuple +RumFormTuple(RumState *rumstate, + OffsetNumber attnum, Datum key, RumNullCategory category, + ItemPointerData *ipd, + Datum *addInfo, + bool *addInfoIsNull, + uint32 nipd, + bool errorTooBig) +{ + Datum datums[3]; + bool isnull[3]; + IndexTuple itup; + uint32 newsize; + int i; + ItemPointerData nullItemPointer = {{0,0},0}; + + /* Build the basic tuple: optional column number, plus key datum */ + if (rumstate->oneCol) + { + datums[0] = key; + isnull[0] = (category != RUM_CAT_NORM_KEY); + isnull[1] = true; + } + else + { + datums[0] = UInt16GetDatum(attnum); + isnull[0] = false; + datums[1] = key; + isnull[1] = (category != RUM_CAT_NORM_KEY); + isnull[2] = true; + } + + itup = index_form_tuple(rumstate->tupdesc[attnum - 1], datums, isnull); + + /* + * Determine and store offset to the posting list, making sure there is + * room for the category byte if needed. + * + * Note: because index_form_tuple MAXALIGNs the tuple size, there may well + * be some wasted pad space. Is it worth recomputing the data length to + * prevent that? That would also allow us to Assert that the real data + * doesn't overlap the RumNullCategory byte, which this code currently + * takes on faith. + */ + newsize = IndexTupleSize(itup); + + RumSetPostingOffset(itup, newsize); + + RumSetNPosting(itup, nipd); + + /* + * Add space needed for posting list, if any. Then check that the tuple + * won't be too big to store. + */ + + if (nipd > 0) + { + newsize = rumCheckPlaceToDataPageLeaf(attnum, &ipd[0], addInfo[0], + addInfoIsNull[0], &nullItemPointer, rumstate, newsize); + for (i = 1; i < nipd; i++) + { + newsize = rumCheckPlaceToDataPageLeaf(attnum, &ipd[i], addInfo[i], + addInfoIsNull[i], &ipd[i - 1], rumstate, newsize); + } + } + + if (category != RUM_CAT_NORM_KEY) + { + Assert(IndexTupleHasNulls(itup)); + newsize = newsize + sizeof(RumNullCategory); + } + newsize = MAXALIGN(newsize); + + if (newsize > Min(INDEX_SIZE_MASK, RumMaxItemSize)) + { + if (errorTooBig) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", + (unsigned long) newsize, + (unsigned long) Min(INDEX_SIZE_MASK, + RumMaxItemSize), + RelationGetRelationName(rumstate->index)))); + pfree(itup); + return NULL; + } + + /* + * Resize tuple if needed + */ + if (newsize != IndexTupleSize(itup)) + { + itup = repalloc(itup, newsize); + + /* set new size in tuple header */ + itup->t_info &= ~INDEX_SIZE_MASK; + itup->t_info |= newsize; + } + + /* + * Copy in the posting list, if provided + */ + if (nipd > 0) + { + char *ptr = RumGetPosting(itup); + ptr = rumPlaceToDataPageLeaf(ptr, attnum, &ipd[0], addInfo[0], + addInfoIsNull[0], &nullItemPointer, rumstate); + for (i = 1; i < nipd; i++) + { + ptr = rumPlaceToDataPageLeaf(ptr, attnum, &ipd[i], addInfo[i], + addInfoIsNull[i], &ipd[i-1], rumstate); + } + } + + /* + * Insert category byte, if needed + */ + if (category != RUM_CAT_NORM_KEY) + { + Assert(IndexTupleHasNulls(itup)); + RumSetNullCategory(itup, rumstate, category); + } + return itup; +} + +/* + * Adds array of item pointers to tuple's posting list, or + * creates posting tree and tuple pointing to tree in case + * of not enough space. Max size of tuple is defined in + * RumFormTuple(). Returns a new, modified index tuple. + * items[] must be in sorted order with no duplicates. + */ +static IndexTuple +addItemPointersToLeafTuple(RumState *rumstate, + IndexTuple old, + ItemPointerData *items, Datum *addInfo, + bool *addInfoIsNull, uint32 nitem, + GinStatsData *buildStats) +{ + OffsetNumber attnum; + Datum key; + RumNullCategory category; + IndexTuple res; + Datum *oldAddInfo, *newAddInfo; + bool *oldAddInfoIsNull, *newAddInfoIsNull; + ItemPointerData *newItems, *oldItems; + int oldNPosting, newNPosting; + + Assert(!RumIsPostingTree(old)); + + attnum = rumtuple_get_attrnum(rumstate, old); + key = rumtuple_get_key(rumstate, old, &category); + + oldNPosting = RumGetNPosting(old); + + oldItems = (ItemPointerData *)palloc(sizeof(ItemPointerData) * oldNPosting); + oldAddInfo = (Datum *)palloc(sizeof(Datum) * oldNPosting); + oldAddInfoIsNull = (bool *)palloc(sizeof(bool) * oldNPosting); + + newNPosting = oldNPosting + nitem; + + newItems = (ItemPointerData *)palloc(sizeof(ItemPointerData) * newNPosting); + newAddInfo = (Datum *)palloc(sizeof(Datum) * newNPosting); + newAddInfoIsNull = (bool *)palloc(sizeof(bool) * newNPosting); + + rumReadTuple(rumstate, attnum, old, oldItems, oldAddInfo, oldAddInfoIsNull); + + newNPosting = rumMergeItemPointers(newItems, newAddInfo, newAddInfoIsNull, + items, addInfo, addInfoIsNull, nitem, + oldItems, oldAddInfo, oldAddInfoIsNull, oldNPosting); + + + /* try to build tuple with room for all the items */ + res = RumFormTuple(rumstate, attnum, key, category, + newItems, newAddInfo, newAddInfoIsNull, newNPosting, + false); + + if (!res) + { + /* posting list would be too big, convert to posting tree */ + BlockNumber postingRoot; + RumPostingTreeScan *gdi; + + /* + * Initialize posting tree with the old tuple's posting list. It's + * surely small enough to fit on one posting-tree page, and should + * already be in order with no duplicates. + */ + postingRoot = createPostingTree(rumstate, + attnum, + rumstate->index, + oldItems, + oldAddInfo, + oldAddInfoIsNull, + oldNPosting); + + /* During index build, count the newly-added data page */ + if (buildStats) + buildStats->nDataPages++; + + /* Now insert the TIDs-to-be-added into the posting tree */ + gdi = rumPrepareScanPostingTree(rumstate->index, postingRoot, FALSE, attnum, rumstate); + gdi->btree.isBuild = (buildStats != NULL); + + rumInsertItemPointers(rumstate, attnum, gdi, items, addInfo, addInfoIsNull, nitem, buildStats); + + pfree(gdi); + + /* And build a new posting-tree-only result tuple */ + res = RumFormTuple(rumstate, attnum, key, category, NULL, NULL, NULL, 0, true); + RumSetPostingTree(res, postingRoot); + } + + return res; +} + +/* + * Build a fresh leaf tuple, either posting-list or posting-tree format + * depending on whether the given items list will fit. + * items[] must be in sorted order with no duplicates. + * + * This is basically the same logic as in addItemPointersToLeafTuple, + * but working from slightly different input. + */ +static IndexTuple +buildFreshLeafTuple(RumState *rumstate, + OffsetNumber attnum, Datum key, RumNullCategory category, + ItemPointerData *items, Datum *addInfo, + bool *addInfoIsNull, uint32 nitem, + GinStatsData *buildStats) +{ + IndexTuple res; + + /* try to build tuple with room for all the items */ + res = RumFormTuple(rumstate, attnum, key, category, + items, addInfo, addInfoIsNull, nitem, false); + + if (!res) + { + /* posting list would be too big, build posting tree */ + BlockNumber postingRoot; + ItemPointerData prevIptr = {{0,0},0}; + Size size = 0; + int itemsCount = 0; + + do + { + size = rumCheckPlaceToDataPageLeaf(attnum, &items[itemsCount], + addInfo[itemsCount], addInfoIsNull[itemsCount], &prevIptr, + rumstate, size); + prevIptr = items[itemsCount]; + itemsCount++; + } + while (itemsCount < nitem && size < RumDataPageSize); + itemsCount--; + + + /* + * Build posting-tree-only result tuple. We do this first so as to + * fail quickly if the key is too big. + */ + res = RumFormTuple(rumstate, attnum, key, category, NULL, NULL, NULL, 0, true); + + /* + * Initialize posting tree with as many TIDs as will fit on the first + * page. + */ + postingRoot = createPostingTree(rumstate, + attnum, + rumstate->index, + items, + addInfo, + addInfoIsNull, + itemsCount); + + /* During index build, count the newly-added data page */ + if (buildStats) + buildStats->nDataPages++; + + /* Add any remaining TIDs to the posting tree */ + if (nitem > itemsCount) + { + RumPostingTreeScan *gdi; + + gdi = rumPrepareScanPostingTree(rumstate->index, postingRoot, FALSE, attnum, rumstate); + gdi->btree.isBuild = (buildStats != NULL); + + rumInsertItemPointers(rumstate, + attnum, + gdi, + items + itemsCount, + addInfo + itemsCount, + addInfoIsNull + itemsCount, + nitem - itemsCount, + buildStats); + + pfree(gdi); + } + + /* And save the root link in the result tuple */ + RumSetPostingTree(res, postingRoot); + } + + return res; +} + +/* + * Insert one or more heap TIDs associated with the given key value. + * This will either add a single key entry, or enlarge a pre-existing entry. + * + * During an index build, buildStats is non-null and the counters + * it contains should be incremented as needed. + */ +void +rumEntryInsert(RumState *rumstate, + OffsetNumber attnum, Datum key, RumNullCategory category, + ItemPointerData *items, + Datum *addInfo, + bool *addInfoIsNull, + uint32 nitem, + GinStatsData *buildStats) +{ + RumBtreeData btree; + RumBtreeStack *stack; + IndexTuple itup; + Page page; + int i; + + if (!addInfoIsNull || !addInfo) + { + addInfoIsNull = (bool *)palloc(sizeof(bool) * nitem); + addInfo = (Datum *)palloc(sizeof(Datum) * nitem); + for (i = 0; i < nitem; i++) + { + addInfoIsNull[i] = true; + addInfo[i] = (Datum) 0; + } + } + + /* During index build, count the to-be-inserted entry */ + if (buildStats) + buildStats->nEntries++; + + rumPrepareEntryScan(&btree, attnum, key, category, rumstate); + + stack = rumFindLeafPage(&btree, NULL); + page = BufferGetPage(stack->buffer); + + if (btree.findItem(&btree, stack)) + { + /* found pre-existing entry */ + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stack->off)); + + if (RumIsPostingTree(itup)) + { + /* add entries to existing posting tree */ + BlockNumber rootPostingTree = RumGetPostingTree(itup); + RumPostingTreeScan *gdi; + + /* release all stack */ + LockBuffer(stack->buffer, RUM_UNLOCK); + freeRumBtreeStack(stack); + + /* insert into posting tree */ + gdi = rumPrepareScanPostingTree(rumstate->index, rootPostingTree, FALSE, attnum, rumstate); + gdi->btree.isBuild = (buildStats != NULL); + rumInsertItemPointers(rumstate, attnum, gdi, items, addInfo, addInfoIsNull, nitem, buildStats); + pfree(gdi); + + return; + } + + /* modify an existing leaf entry */ + itup = addItemPointersToLeafTuple(rumstate, itup, + items, addInfo, addInfoIsNull, nitem, buildStats); + + btree.isDelete = TRUE; + } + else + { + /* no match, so construct a new leaf entry */ + itup = buildFreshLeafTuple(rumstate, attnum, key, category, + items, addInfo, addInfoIsNull, nitem, buildStats); + } + + /* Insert the new or modified leaf tuple */ + btree.entry = itup; + rumInsertValue(rumstate->index, &btree, stack, buildStats); + pfree(itup); +} + +/* + * Extract index entries for a single indexable item, and add them to the + * BuildAccumulator's state. + * + * This function is used only during initial index creation. + */ +static void +rumHeapTupleBulkInsert(RumBuildState *buildstate, OffsetNumber attnum, + Datum value, bool isNull, + ItemPointer heapptr) +{ + Datum *entries; + RumNullCategory *categories; + int32 nentries; + MemoryContext oldCtx; + Datum *addInfo; + bool *addInfoIsNull; + int i; + Form_pg_attribute attr = buildstate->rumstate.addAttrs[attnum - 1]; + + oldCtx = MemoryContextSwitchTo(buildstate->funcCtx); + entries = rumExtractEntries(buildstate->accum.rumstate, attnum, + value, isNull, + &nentries, &categories, + &addInfo, &addInfoIsNull); + MemoryContextSwitchTo(oldCtx); + for (i = 0; i < nentries; i++) + { + if (!addInfoIsNull[i]) + { + addInfo[i] = datumCopy(addInfo[i], attr->attbyval, attr->attlen); + } + } + + rumInsertBAEntries(&buildstate->accum, heapptr, attnum, + entries, addInfo, addInfoIsNull, categories, nentries); + + buildstate->indtuples += nentries; + + MemoryContextReset(buildstate->funcCtx); +} + +static void +rumBuildCallback(Relation index, HeapTuple htup, Datum *values, + bool *isnull, bool tupleIsAlive, void *state) +{ + RumBuildState *buildstate = (RumBuildState *) state; + MemoryContext oldCtx; + int i; + + oldCtx = MemoryContextSwitchTo(buildstate->tmpCtx); + + for (i = 0; i < buildstate->rumstate.origTupdesc->natts; i++) + rumHeapTupleBulkInsert(buildstate, (OffsetNumber) (i + 1), + values[i], isnull[i], + &htup->t_self); + + /* If we've maxed out our available memory, dump everything to the index */ + if (buildstate->accum.allocatedMemory >= maintenance_work_mem * 1024L) + { + RumEntryAccumulatorItem *list; + Datum key; + RumNullCategory category; + uint32 nlist; + OffsetNumber attnum; + + rumBeginBAScan(&buildstate->accum); + while ((list = rumGetBAEntry(&buildstate->accum, + &attnum, &key, &category, &nlist)) != NULL) + { + ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(ItemPointerData) *nlist); + Datum *addInfo = (Datum *)palloc(sizeof(Datum) * nlist); + bool *addInfoIsNull = (bool *)palloc(sizeof(bool) * nlist); + int i; + + for (i = 0; i < nlist; i++) + { + iptrs[i] = list[i].iptr; + addInfo[i] = list[i].addInfo; + addInfoIsNull[i] = list[i].addInfoIsNull; + } + + + /* there could be many entries, so be willing to abort here */ + CHECK_FOR_INTERRUPTS(); + rumEntryInsert(&buildstate->rumstate, attnum, key, category, + iptrs, addInfo, addInfoIsNull, nlist, &buildstate->buildStats); + } + + MemoryContextReset(buildstate->tmpCtx); + rumInitBA(&buildstate->accum); + } + + MemoryContextSwitchTo(oldCtx); +} + +IndexBuildResult * +rumbuild(Relation heap, Relation index, struct IndexInfo *indexInfo) +{ + IndexBuildResult *result; + double reltuples; + RumBuildState buildstate; + Buffer RootBuffer, + MetaBuffer; + RumEntryAccumulatorItem *list; + Datum key; + RumNullCategory category; + uint32 nlist; + MemoryContext oldCtx; + OffsetNumber attnum; + GenericXLogState *state; + + if (RelationGetNumberOfBlocks(index) != 0) + elog(ERROR, "index \"%s\" already contains data", + RelationGetRelationName(index)); + + initRumState(&buildstate.rumstate, index); + buildstate.indtuples = 0; + memset(&buildstate.buildStats, 0, sizeof(GinStatsData)); + + state = GenericXLogStart(index); + + /* initialize the meta page */ + MetaBuffer = RumNewBuffer(index); + /* initialize the root page */ + RootBuffer = RumNewBuffer(index); + + RumInitMetabuffer(state, MetaBuffer); + RumInitBuffer(state, RootBuffer, RUM_LEAF); + + GenericXLogFinish(state); + + UnlockReleaseBuffer(MetaBuffer); + UnlockReleaseBuffer(RootBuffer); + + /* count the root as first entry page */ + buildstate.buildStats.nEntryPages++; + + /* + * create a temporary memory context that is reset once for each tuple + * inserted into the index + */ + buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext, + "Rum build temporary context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + buildstate.funcCtx = AllocSetContextCreate(CurrentMemoryContext, + "Rum build temporary context for user-defined function", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + buildstate.accum.rumstate = &buildstate.rumstate; + rumInitBA(&buildstate.accum); + + /* + * Do the heap scan. We disallow sync scan here because dataPlaceToPage + * prefers to receive tuples in TID order. + */ + reltuples = IndexBuildHeapScan(heap, index, indexInfo, false, + rumBuildCallback, (void *) &buildstate); + + /* dump remaining entries to the index */ + oldCtx = MemoryContextSwitchTo(buildstate.tmpCtx); + rumBeginBAScan(&buildstate.accum); + while ((list = rumGetBAEntry(&buildstate.accum, + &attnum, &key, &category, &nlist)) != NULL) + { + ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(ItemPointerData) *nlist); + Datum *addInfo = (Datum *)palloc(sizeof(Datum) * nlist); + bool *addInfoIsNull = (bool *)palloc(sizeof(bool) * nlist); + int i; + + for (i = 0; i < nlist; i++) + { + iptrs[i] = list[i].iptr; + addInfo[i] = list[i].addInfo; + addInfoIsNull[i] = list[i].addInfoIsNull; + } + + /* there could be many entries, so be willing to abort here */ + CHECK_FOR_INTERRUPTS(); + rumEntryInsert(&buildstate.rumstate, attnum, key, category, + iptrs, addInfo, addInfoIsNull, nlist, &buildstate.buildStats); + } + MemoryContextSwitchTo(oldCtx); + + MemoryContextDelete(buildstate.funcCtx); + MemoryContextDelete(buildstate.tmpCtx); + + /* + * Update metapage stats + */ + buildstate.buildStats.nTotalPages = RelationGetNumberOfBlocks(index); + rumUpdateStats(index, &buildstate.buildStats); + + /* + * Return statistics + */ + result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult)); + + result->heap_tuples = reltuples; + result->index_tuples = buildstate.indtuples; + + return result; +} + +/* + * rumbuildempty() -- build an empty rum index in the initialization fork + */ +void +rumbuildempty(Relation index) +{ + Buffer RootBuffer, + MetaBuffer; + GenericXLogState *state; + + state = GenericXLogStart(index); + + /* An empty RUM index has two pages. */ + MetaBuffer = + ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); + LockBuffer(MetaBuffer, BUFFER_LOCK_EXCLUSIVE); + RootBuffer = + ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL); + LockBuffer(RootBuffer, BUFFER_LOCK_EXCLUSIVE); + + /* Initialize and xlog metabuffer and root buffer. */ + RumInitMetabuffer(state, MetaBuffer); + RumInitBuffer(state, RootBuffer, RUM_LEAF); + + GenericXLogFinish(state); + + /* Unlock and release the buffers. */ + UnlockReleaseBuffer(MetaBuffer); + UnlockReleaseBuffer(RootBuffer); + + return ; +} + +/* + * Insert index entries for a single indexable item during "normal" + * (non-fast-update) insertion + */ +static void +rumHeapTupleInsert(RumState *rumstate, OffsetNumber attnum, + Datum value, bool isNull, + ItemPointer item) +{ + Datum *entries; + RumNullCategory *categories; + int32 i, + nentries; + Datum *addInfo; + bool *addInfoIsNull; + + entries = rumExtractEntries(rumstate, attnum, value, isNull, + &nentries, &categories, &addInfo, &addInfoIsNull); + + for (i = 0; i < nentries; i++) + rumEntryInsert(rumstate, attnum, entries[i], categories[i], + item, &addInfo[i], &addInfoIsNull[i], 1, NULL); +} + +bool +ruminsert(Relation index, Datum *values, bool *isnull, + ItemPointer ht_ctid, Relation heapRel, + IndexUniqueCheck checkUnique) +{ + RumState rumstate; + MemoryContext oldCtx; + MemoryContext insertCtx; + int i; + + insertCtx = AllocSetContextCreate(CurrentMemoryContext, + "Rum insert temporary context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + oldCtx = MemoryContextSwitchTo(insertCtx); + + initRumState(&rumstate, index); + + if (RumGetUseFastUpdate(index)) + { + RumTupleCollector collector; + + memset(&collector, 0, sizeof(RumTupleCollector)); + + for (i = 0; i < rumstate.origTupdesc->natts; i++) + rumHeapTupleFastCollect(&rumstate, &collector, + (OffsetNumber) (i + 1), + values[i], isnull[i], + ht_ctid); + + rumHeapTupleFastInsert(&rumstate, &collector); + } + else + { + for (i = 0; i < rumstate.origTupdesc->natts; i++) + rumHeapTupleInsert(&rumstate, (OffsetNumber) (i + 1), + values[i], isnull[i], + ht_ctid); + } + + MemoryContextSwitchTo(oldCtx); + MemoryContextDelete(insertCtx); + + return false; +} diff --git a/rumscan.c b/rumscan.c new file mode 100644 index 0000000000..f7ed8f6a9e --- /dev/null +++ b/rumscan.c @@ -0,0 +1,486 @@ +/*------------------------------------------------------------------------- + * + * rumscan.c + * routines to manage scans of inverted index relations + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/relscan.h" +#include "pgstat.h" +#include "utils/memutils.h" + +#include "rum.h" + +IndexScanDesc +rumbeginscan(Relation rel, int nkeys, int norderbys) +{ + IndexScanDesc scan; + RumScanOpaque so; + + scan = RelationGetIndexScan(rel, nkeys, norderbys); + + /* allocate private workspace */ + so = (RumScanOpaque) palloc(sizeof(RumScanOpaqueData)); + so->sortstate = NULL; + so->keys = NULL; + so->nkeys = 0; + so->firstCall = true; + so->tempCtx = AllocSetContextCreate(CurrentMemoryContext, + "Rum scan temporary context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + initRumState(&so->rumstate, scan->indexRelation); + + scan->opaque = so; + + return scan; +} + +/* + * Create a new RumScanEntry, unless an equivalent one already exists, + * in which case just return it + */ +static RumScanEntry +rumFillScanEntry(RumScanOpaque so, OffsetNumber attnum, + StrategyNumber strategy, int32 searchMode, + Datum queryKey, RumNullCategory queryCategory, + bool isPartialMatch, Pointer extra_data) +{ + RumState *rumstate = &so->rumstate; + RumScanEntry scanEntry; + uint32 i; + + /* + * Look for an existing equivalent entry. + * + * Entries with non-null extra_data are never considered identical, since + * we can't know exactly what the opclass might be doing with that. + */ + if (extra_data == NULL || !isPartialMatch) + { + for (i = 0; i < so->totalentries; i++) + { + RumScanEntry prevEntry = so->entries[i]; + + if (prevEntry->extra_data == NULL && + prevEntry->isPartialMatch == isPartialMatch && + prevEntry->strategy == strategy && + prevEntry->searchMode == searchMode && + prevEntry->attnum == attnum && + rumCompareEntries(rumstate, attnum, + prevEntry->queryKey, + prevEntry->queryCategory, + queryKey, + queryCategory) == 0) + { + /* Successful match */ + return prevEntry; + } + } + } + + /* Nope, create a new entry */ + scanEntry = (RumScanEntry) palloc(sizeof(RumScanEntryData)); + scanEntry->queryKey = queryKey; + scanEntry->queryCategory = queryCategory; + scanEntry->isPartialMatch = isPartialMatch; + scanEntry->extra_data = extra_data; + scanEntry->strategy = strategy; + scanEntry->searchMode = searchMode; + scanEntry->attnum = attnum; + + scanEntry->buffer = InvalidBuffer; + ItemPointerSetMin(&scanEntry->curItem); + scanEntry->matchBitmap = NULL; + scanEntry->matchIterator = NULL; + scanEntry->matchResult = NULL; + scanEntry->list = NULL; + scanEntry->nlist = 0; + scanEntry->offset = InvalidOffsetNumber; + scanEntry->isFinished = false; + scanEntry->reduceResult = false; + + /* Add it to so's array */ + if (so->totalentries >= so->allocentries) + { + so->allocentries *= 2; + so->entries = (RumScanEntry *) + repalloc(so->entries, so->allocentries * sizeof(RumScanEntry)); + } + so->entries[so->totalentries++] = scanEntry; + + return scanEntry; +} + +/* + * Initialize the next RumScanKey using the output from the extractQueryFn + */ +static void +rumFillScanKey(RumScanOpaque so, OffsetNumber attnum, + StrategyNumber strategy, int32 searchMode, + Datum query, uint32 nQueryValues, + Datum *queryValues, RumNullCategory *queryCategories, + bool *partial_matches, Pointer *extra_data, + bool orderBy) +{ + RumScanKey key = &(so->keys[so->nkeys++]); + RumState *rumstate = &so->rumstate; + uint32 nUserQueryValues = nQueryValues; + uint32 i; + + /* Non-default search modes add one "hidden" entry to each key */ + if (searchMode != GIN_SEARCH_MODE_DEFAULT) + nQueryValues++; + key->nentries = nQueryValues; + key->nuserentries = nUserQueryValues; + key->orderBy = orderBy; + + key->scanEntry = (RumScanEntry *) palloc(sizeof(RumScanEntry) * nQueryValues); + key->entryRes = (bool *) palloc0(sizeof(bool) * nQueryValues); + key->addInfo = (Datum *) palloc0(sizeof(Datum) * nQueryValues); + key->addInfoIsNull = (bool *) palloc(sizeof(bool) * nQueryValues); + for (i = 0; i < nQueryValues; i++) + key->addInfoIsNull[i] = true; + + key->query = query; + key->queryValues = queryValues; + key->queryCategories = queryCategories; + key->extra_data = extra_data; + key->strategy = strategy; + key->searchMode = searchMode; + key->attnum = attnum; + + ItemPointerSetMin(&key->curItem); + key->curItemMatches = false; + key->recheckCurItem = false; + key->isFinished = false; + + for (i = 0; i < nQueryValues; i++) + { + Datum queryKey; + RumNullCategory queryCategory; + bool isPartialMatch; + Pointer this_extra; + + if (i < nUserQueryValues) + { + /* set up normal entry using extractQueryFn's outputs */ + queryKey = queryValues[i]; + queryCategory = queryCategories[i]; + isPartialMatch = + (rumstate->canPartialMatch[attnum - 1] && partial_matches) + ? partial_matches[i] : false; + this_extra = (extra_data) ? extra_data[i] : NULL; + } + else + { + /* set up hidden entry */ + queryKey = (Datum) 0; + switch (searchMode) + { + case GIN_SEARCH_MODE_INCLUDE_EMPTY: + queryCategory = RUM_CAT_EMPTY_ITEM; + break; + case GIN_SEARCH_MODE_ALL: + queryCategory = RUM_CAT_EMPTY_QUERY; + break; + case GIN_SEARCH_MODE_EVERYTHING: + queryCategory = RUM_CAT_EMPTY_QUERY; + break; + default: + elog(ERROR, "unexpected searchMode: %d", searchMode); + queryCategory = 0; /* keep compiler quiet */ + break; + } + isPartialMatch = false; + this_extra = NULL; + + /* + * We set the strategy to a fixed value so that rumFillScanEntry + * can combine these entries for different scan keys. This is + * safe because the strategy value in the entry struct is only + * used for partial-match cases. It's OK to overwrite our local + * variable here because this is the last loop iteration. + */ + strategy = InvalidStrategy; + } + + key->scanEntry[i] = rumFillScanEntry(so, attnum, + strategy, searchMode, + queryKey, queryCategory, + isPartialMatch, this_extra); + } +} + +static void +freeScanKeys(RumScanOpaque so) +{ + uint32 i; + + if (so->keys == NULL) + return; + + for (i = 0; i < so->nkeys; i++) + { + RumScanKey key = so->keys + i; + + pfree(key->scanEntry); + pfree(key->entryRes); + } + + pfree(so->keys); + so->keys = NULL; + so->nkeys = 0; + + for (i = 0; i < so->totalentries; i++) + { + RumScanEntry entry = so->entries[i]; + + if (entry->gdi) + { + freeRumBtreeStack(entry->gdi->stack); + pfree(entry->gdi); + } + else + { + if (entry->buffer != InvalidBuffer) + ReleaseBuffer(entry->buffer); + } + if (entry->list) + pfree(entry->list); + if (entry->matchIterator) + tbm_end_iterate(entry->matchIterator); + if (entry->matchBitmap) + tbm_free(entry->matchBitmap); + pfree(entry); + } + + pfree(so->entries); + so->entries = NULL; + so->totalentries = 0; +} + +static void +initScanKey(RumScanOpaque so, ScanKey skey, bool *hasNullQuery) +{ + Datum *queryValues; + int32 nQueryValues = 0; + bool *partial_matches = NULL; + Pointer *extra_data = NULL; + bool *nullFlags = NULL; + int32 searchMode = GIN_SEARCH_MODE_DEFAULT; + + /* + * We assume that RUM-indexable operators are strict, so a null query + * argument means an unsatisfiable query. + */ + if (skey->sk_flags & SK_ISNULL) + { + so->isVoidRes = true; + return; + } + + /* OK to call the extractQueryFn */ + queryValues = (Datum *) + DatumGetPointer(FunctionCall7Coll(&so->rumstate.extractQueryFn[skey->sk_attno - 1], + so->rumstate.supportCollation[skey->sk_attno - 1], + skey->sk_argument, + PointerGetDatum(&nQueryValues), + UInt16GetDatum(skey->sk_strategy), + PointerGetDatum(&partial_matches), + PointerGetDatum(&extra_data), + PointerGetDatum(&nullFlags), + PointerGetDatum(&searchMode))); + + /* + * If bogus searchMode is returned, treat as RUM_SEARCH_MODE_ALL; note + * in particular we don't allow extractQueryFn to select + * RUM_SEARCH_MODE_EVERYTHING. + */ + if (searchMode < GIN_SEARCH_MODE_DEFAULT || + searchMode > GIN_SEARCH_MODE_ALL) + searchMode = GIN_SEARCH_MODE_ALL; + + /* Non-default modes require the index to have placeholders */ + if (searchMode != GIN_SEARCH_MODE_DEFAULT) + *hasNullQuery = true; + + /* + * In default mode, no keys means an unsatisfiable query. + */ + if (queryValues == NULL || nQueryValues <= 0) + { + if (searchMode == GIN_SEARCH_MODE_DEFAULT) + { + so->isVoidRes = true; + return; + } + nQueryValues = 0; /* ensure sane value */ + } + + /* + * If the extractQueryFn didn't create a nullFlags array, create one, + * assuming that everything's non-null. Otherwise, run through the + * array and make sure each value is exactly 0 or 1; this ensures + * binary compatibility with the RumNullCategory representation. While + * at it, detect whether any null keys are present. + */ + if (nullFlags == NULL) + nullFlags = (bool *) palloc0(nQueryValues * sizeof(bool)); + else + { + int32 j; + + for (j = 0; j < nQueryValues; j++) + { + if (nullFlags[j]) + { + nullFlags[j] = true; /* not any other nonzero value */ + *hasNullQuery = true; + } + } + } + /* now we can use the nullFlags as category codes */ + + rumFillScanKey(so, skey->sk_attno, + skey->sk_strategy, searchMode, + skey->sk_argument, nQueryValues, + queryValues, (RumNullCategory *) nullFlags, + partial_matches, extra_data, + (skey->sk_flags & SK_ORDER_BY) ? true: false); +} + +void +rumNewScanKey(IndexScanDesc scan) +{ + RumScanOpaque so = (RumScanOpaque) scan->opaque; + int i; + bool hasNullQuery = false; + + /* if no scan keys provided, allocate extra EVERYTHING RumScanKey */ + so->keys = (RumScanKey) + palloc(Max(scan->numberOfKeys + scan->numberOfOrderBys, 1) * + sizeof(RumScanKeyData)); + so->nkeys = 0; + + /* initialize expansible array of RumScanEntry pointers */ + so->totalentries = 0; + so->allocentries = 32; + so->entries = (RumScanEntry *) + palloc0(so->allocentries * sizeof(RumScanEntry)); + + so->isVoidRes = false; + + for (i = 0; i < scan->numberOfKeys; i++) + { + initScanKey(so, &scan->keyData[i], &hasNullQuery); + if (so->isVoidRes) + break; + } + + for (i = 0; i < scan->numberOfOrderBys; i++) + { + initScanKey(so, &scan->orderByData[i], &hasNullQuery); + if (so->isVoidRes) + break; + } + + if (scan->numberOfOrderBys > 0) + { + scan->xs_orderbyvals = palloc0(sizeof(Datum) * scan->numberOfOrderBys); + scan->xs_orderbynulls = palloc(sizeof(bool) * scan->numberOfOrderBys); + memset(scan->xs_orderbynulls, true, sizeof(bool) * + scan->numberOfOrderBys); + } + + /* + * If there are no regular scan keys, generate an EVERYTHING scankey to + * drive a full-index scan. + */ + if (so->nkeys == 0 && !so->isVoidRes) + { + hasNullQuery = true; + rumFillScanKey(so, FirstOffsetNumber, + InvalidStrategy, GIN_SEARCH_MODE_EVERYTHING, + (Datum) 0, 0, + NULL, NULL, NULL, NULL, false); + } + + /* + * If the index is version 0, it may be missing null and placeholder + * entries, which would render searches for nulls and full-index scans + * unreliable. Throw an error if so. + */ + if (hasNullQuery && !so->isVoidRes) + { + GinStatsData rumStats; + + rumGetStats(scan->indexRelation, &rumStats); + if (rumStats.ginVersion < 1) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("old RUM indexes do not support whole-index scans nor searches for nulls"), + errhint("To fix this, do REINDEX INDEX \"%s\".", + RelationGetRelationName(scan->indexRelation)))); + } + + pgstat_count_index_scan(scan->indexRelation); +} + +void +rumrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, + ScanKey orderbys, int norderbys) +{ + /* remaining arguments are ignored */ + RumScanOpaque so = (RumScanOpaque) scan->opaque; + + so->firstCall = true; + + freeScanKeys(so); + + if (scankey && scan->numberOfKeys > 0) + { + memmove(scan->keyData, scankey, + scan->numberOfKeys * sizeof(ScanKeyData)); + memmove(scan->orderByData, orderbys, + scan->numberOfOrderBys * sizeof(ScanKeyData)); + } +} + +void +rumendscan(IndexScanDesc scan) +{ + RumScanOpaque so = (RumScanOpaque) scan->opaque; + + freeScanKeys(so); + + if (so->sortstate) + rum_tuplesort_end(so->sortstate); + + MemoryContextDelete(so->tempCtx); + + pfree(so); +} + +Datum +rummarkpos(PG_FUNCTION_ARGS) +{ + elog(ERROR, "RUM does not support mark/restore"); + PG_RETURN_VOID(); +} + +Datum +rumrestrpos(PG_FUNCTION_ARGS) +{ + elog(ERROR, "RUM does not support mark/restore"); + PG_RETURN_VOID(); +} diff --git a/rumsort.c b/rumsort.c new file mode 100644 index 0000000000..92bdadcbf5 --- /dev/null +++ b/rumsort.c @@ -0,0 +1,3916 @@ +/*------------------------------------------------------------------------- + * + * rumsort.h + * Generalized tuple sorting routines. + * + * This module handles sorting of heap tuples, index tuples, or single + * Datums (and could easily support other kinds of sortable objects, + * if necessary). It works efficiently for both small and large amounts + * of data. Small amounts are sorted in-memory using qsort(). Large + * amounts are sorted using temporary files and a standard external sort + * algorithm. + * + * See Knuth, volume 3, for more than you want to know about the external + * sorting algorithm. Historically, we divided the input into sorted runs + * using replacement selection, in the form of a priority tree implemented + * as a heap (essentially his Algorithm 5.2.3H -- although that strategy is + * often avoided altogether), but that can now only happen first the first + * run. We merge the runs using polyphase merge, Knuth's Algorithm + * 5.4.2D. The logical "tapes" used by Algorithm D are implemented by + * logtape.c, which avoids space wastage by recycling disk space as soon + * as each block is read from its "tape". + * + * We never form the initial runs using Knuth's recommended replacement + * selection data structure (Algorithm 5.4.1R), because it uses a fixed + * number of records in memory at all times. Since we are dealing with + * tuples that may vary considerably in size, we want to be able to vary + * the number of records kept in memory to ensure full utilization of the + * allowed sort memory space. So, we keep the tuples in a variable-size + * heap, with the next record to go out at the top of the heap. Like + * Algorithm 5.4.1R, each record is stored with the run number that it + * must go into, and we use (run number, key) as the ordering key for the + * heap. When the run number at the top of the heap changes, we know that + * no more records of the prior run are left in the heap. Note that there + * are in practice only ever two distinct run numbers, due to the greatly + * reduced use of replacement selection in PostgreSQL 9.6. + * + * In PostgreSQL 9.6, a heap (based on Knuth's Algorithm H, with some small + * customizations) is only used with the aim of producing just one run, + * thereby avoiding all merging. Only the first run can use replacement + * selection, which is why there are now only two possible valid run + * numbers, and why heapification is customized to not distinguish between + * tuples in the second run (those will be quicksorted). We generally + * prefer a simple hybrid sort-merge strategy, where runs are sorted in much + * the same way as the entire input of an internal sort is sorted (using + * qsort()). The replacement_sort_tuples GUC controls the limited remaining + * use of replacement selection for the first run. + * + * There are several reasons to favor a hybrid sort-merge strategy. + * Maintaining a priority tree/heap has poor CPU cache characteristics. + * Furthermore, the growth in main memory sizes has greatly diminished the + * value of having runs that are larger than available memory, even in the + * case where there is partially sorted input and runs can be made far + * larger by using a heap. In most cases, a single-pass merge step is all + * that is required even when runs are no larger than available memory. + * Avoiding multiple merge passes was traditionally considered to be the + * major advantage of using replacement selection. + * + * The approximate amount of memory allowed for any one sort operation + * is specified in kilobytes by the caller (most pass work_mem). Initially, + * we absorb tuples and simply store them in an unsorted array as long as + * we haven't exceeded workMem. If we reach the end of the input without + * exceeding workMem, we sort the array using qsort() and subsequently return + * tuples just by scanning the tuple array sequentially. If we do exceed + * workMem, we begin to emit tuples into sorted runs in temporary tapes. + * When tuples are dumped in batch after quicksorting, we begin a new run + * with a new output tape (selected per Algorithm D). After the end of the + * input is reached, we dump out remaining tuples in memory into a final run + * (or two, when replacement selection is still used), then merge the runs + * using Algorithm D. + * + * When merging runs, we use a heap containing just the frontmost tuple from + * each source run; we repeatedly output the smallest tuple and insert the + * next tuple from its source tape (if any). When the heap empties, the merge + * is complete. The basic merge algorithm thus needs very little memory --- + * only M tuples for an M-way merge, and M is constrained to a small number. + * However, we can still make good use of our full workMem allocation by + * pre-reading additional tuples from each source tape. Without prereading, + * our access pattern to the temporary file would be very erratic; on average + * we'd read one block from each of M source tapes during the same time that + * we're writing M blocks to the output tape, so there is no sequentiality of + * access at all, defeating the read-ahead methods used by most Unix kernels. + * Worse, the output tape gets written into a very random sequence of blocks + * of the temp file, ensuring that things will be even worse when it comes + * time to read that tape. A straightforward merge pass thus ends up doing a + * lot of waiting for disk seeks. We can improve matters by prereading from + * each source tape sequentially, loading about workMem/M bytes from each tape + * in turn. Then we run the merge algorithm, writing but not reading until + * one of the preloaded tuple series runs out. Then we switch back to preread + * mode, fill memory again, and repeat. This approach helps to localize both + * read and write accesses. + * + * When the caller requests random access to the sort result, we form + * the final sorted run on a logical tape which is then "frozen", so + * that we can access it randomly. When the caller does not need random + * access, we return from rum_tuplesort_performsort() as soon as we are down + * to one run per logical tape. The final merge is then performed + * on-the-fly as the caller repeatedly calls rum_tuplesort_getXXX; this + * saves one cycle of writing all the data out to disk and reading it in. + * + * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the + * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according + * to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that + * tape drives are expensive beasts, and in particular that there will always + * be many more runs than tape drives. In our implementation a "tape drive" + * doesn't cost much more than a few Kb of memory buffers, so we can afford + * to have lots of them. In particular, if we can have as many tape drives + * as sorted runs, we can eliminate any repeated I/O at all. In the current + * code we determine the number of tapes M on the basis of workMem: we want + * workMem/M to be large enough that we read a fair amount of data each time + * we preread from a tape, so as to maintain the locality of access described + * above. Nonetheless, with large workMem we can have many tapes. + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" +#include "miscadmin.h" +#include "rumsort.h" + +#include "access/htup_details.h" +#include "access/nbtree.h" +#include "catalog/index.h" +#include "catalog/pg_am.h" +#include "commands/tablespace.h" +#include "executor/executor.h" +#include "utils/datum.h" +#include "utils/logtape.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/pg_rusage.h" +#include "utils/probes.h" +#include "utils/rel.h" +#include "utils/sortsupport.h" + +/* sort-type codes for sort__start probes */ +#define HEAP_SORT 0 +#define INDEX_SORT 1 +#define DATUM_SORT 2 +#define CLUSTER_SORT 3 + +/* GUC variables */ +#ifdef TRACE_SORT +bool trace_sort = false; +#endif + +#ifdef DEBUG_BOUNDED_SORT +bool optimize_bounded_sort = true; +#endif + + +/* + * The objects we actually sort are SortTuple structs. These contain + * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple), + * which is a separate palloc chunk --- we assume it is just one chunk and + * can be freed by a simple pfree(). SortTuples also contain the tuple's + * first key column in Datum/nullflag format, and an index integer. + * + * Storing the first key column lets us save heap_getattr or index_getattr + * calls during tuple comparisons. We could extract and save all the key + * columns not just the first, but this would increase code complexity and + * overhead, and wouldn't actually save any comparison cycles in the common + * case where the first key determines the comparison result. Note that + * for a pass-by-reference datatype, datum1 points into the "tuple" storage. + * + * When sorting single Datums, the data value is represented directly by + * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false, + * then datum1 points to a separately palloc'd data value that is also pointed + * to by the "tuple" pointer; otherwise "tuple" is NULL. + * + * While building initial runs, tupindex holds the tuple's run number. During + * merge passes, we re-use it to hold the input tape number that each tuple in + * the heap was read from, or to hold the index of the next tuple pre-read + * from the same tape in the case of pre-read entries. tupindex goes unused + * if the sort occurs entirely in memory. + */ +typedef struct +{ + void *tuple; /* the tuple proper */ + Datum datum1; /* value of first key column */ + bool isnull1; /* is first key column NULL? */ + int tupindex; /* see notes above */ +} SortTuple; + + +/* + * Possible states of a Tuplesort object. These denote the states that + * persist between calls of Tuplesort routines. + */ +typedef enum +{ + TSS_INITIAL, /* Loading tuples; still within memory limit */ + TSS_BOUNDED, /* Loading tuples into bounded-size heap */ + TSS_BUILDRUNS, /* Loading tuples; writing to tape */ + TSS_SORTEDINMEM, /* Sort completed entirely in memory */ + TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */ + TSS_FINALMERGE /* Performing final merge on-the-fly */ +} TupSortStatus; + +/* + * Parameters for calculation of number of tapes to use --- see inittapes() + * and rum_tuplesort_merge_order(). + * + * In this calculation we assume that each tape will cost us about 3 blocks + * worth of buffer space (which is an underestimate for very large data + * volumes, but it's probably close enough --- see logtape.c). + * + * MERGE_BUFFER_SIZE is how much data we'd like to read from each input + * tape during a preread cycle (see discussion at top of file). + */ +#define MINORDER 6 /* minimum merge order */ +#define TAPE_BUFFER_OVERHEAD (BLCKSZ * 3) +#define MERGE_BUFFER_SIZE (BLCKSZ * 32) + +typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b, + Tuplesortstate *state); + +/* + * Private state of a Tuplesort operation. + */ +struct Tuplesortstate +{ + TupSortStatus status; /* enumerated value as shown above */ + int nKeys; /* number of columns in sort key */ + bool randomAccess; /* did caller request random access? */ + bool bounded; /* did caller specify a maximum number of + * tuples to return? */ + bool boundUsed; /* true if we made use of a bounded heap */ + int bound; /* if bounded, the maximum number of tuples */ + long availMem; /* remaining memory available, in bytes */ + long allowedMem; /* total memory allowed, in bytes */ + int maxTapes; /* number of tapes (Knuth's T) */ + int tapeRange; /* maxTapes-1 (Knuth's P) */ + MemoryContext sortcontext; /* memory context holding all sort data */ + LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */ + + /* + * These function pointers decouple the routines that must know what kind + * of tuple we are sorting from the routines that don't need to know it. + * They are set up by the rum_tuplesort_begin_xxx routines. + * + * Function to compare two tuples; result is per qsort() convention, ie: + * <0, 0, >0 according as ab. The API must match + * qsort_arg_comparator. + */ + SortTupleComparator comparetup; + + /* + * Function to copy a supplied input tuple into palloc'd space and set up + * its SortTuple representation (ie, set tuple/datum1/isnull1). Also, + * state->availMem must be decreased by the amount of space used for the + * tuple copy (note the SortTuple struct itself is not counted). + */ + void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup); + + /* + * Function to write a stored tuple onto tape. The representation of the + * tuple on tape need not be the same as it is in memory; requirements on + * the tape representation are given below. After writing the tuple, + * pfree() the out-of-line data (not the SortTuple struct!), and increase + * state->availMem by the amount of memory space thereby released. + */ + void (*writetup) (Tuplesortstate *state, int tapenum, + SortTuple *stup); + + /* + * Function to read a stored tuple from tape back into memory. 'len' is + * the already-read length of the stored tuple. Create a palloc'd copy, + * initialize tuple/datum1/isnull1 in the target SortTuple struct, and + * decrease state->availMem by the amount of memory space consumed. + */ + void (*readtup) (Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len); + + /* + * Function to reverse the sort direction from its current state. (We + * could dispense with this if we wanted to enforce that all variants + * represent the sort key information alike.) + */ + void (*reversedirection) (Tuplesortstate *state); + + /* + * This array holds the tuples now in sort memory. If we are in state + * INITIAL, the tuples are in no particular order; if we are in state + * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS + * and FINALMERGE, the tuples are organized in "heap" order per Algorithm + * H. (Note that memtupcount only counts the tuples that are part of the + * heap --- during merge passes, memtuples[] entries beyond tapeRange are + * never in the heap and are used to hold pre-read tuples.) In state + * SORTEDONTAPE, the array is not used. + */ + SortTuple *memtuples; /* array of SortTuple structs */ + int memtupcount; /* number of tuples currently present */ + int memtupsize; /* allocated length of memtuples array */ + bool growmemtuples; /* memtuples' growth still underway? */ + + /* + * While building initial runs, this is the current output run number + * (starting at 0). Afterwards, it is the number of initial runs we made. + */ + int currentRun; + + /* + * Unless otherwise noted, all pointer variables below are pointers to + * arrays of length maxTapes, holding per-tape data. + */ + + /* + * These variables are only used during merge passes. mergeactive[i] is + * true if we are reading an input run from (actual) tape number i and + * have not yet exhausted that run. mergenext[i] is the memtuples index + * of the next pre-read tuple (next to be loaded into the heap) for tape + * i, or 0 if we are out of pre-read tuples. mergelast[i] similarly + * points to the last pre-read tuple from each tape. mergeavailslots[i] + * is the number of unused memtuples[] slots reserved for tape i, and + * mergeavailmem[i] is the amount of unused space allocated for tape i. + * mergefreelist and mergefirstfree keep track of unused locations in the + * memtuples[] array. The memtuples[].tupindex fields link together + * pre-read tuples for each tape as well as recycled locations in + * mergefreelist. It is OK to use 0 as a null link in these lists, because + * memtuples[0] is part of the merge heap and is never a pre-read tuple. + */ + bool *mergeactive; /* active input run source? */ + int *mergenext; /* first preread tuple for each source */ + int *mergelast; /* last preread tuple for each source */ + int *mergeavailslots; /* slots left for prereading each tape */ + long *mergeavailmem; /* availMem for prereading each tape */ + int mergefreelist; /* head of freelist of recycled slots */ + int mergefirstfree; /* first slot never used in this merge */ + + /* + * Variables for Algorithm D. Note that destTape is a "logical" tape + * number, ie, an index into the tp_xxx[] arrays. Be careful to keep + * "logical" and "actual" tape numbers straight! + */ + int Level; /* Knuth's l */ + int destTape; /* current output tape (Knuth's j, less 1) */ + int *tp_fib; /* Target Fibonacci run counts (A[]) */ + int *tp_runs; /* # of real runs on each tape */ + int *tp_dummy; /* # of dummy runs for each tape (D[]) */ + int *tp_tapenum; /* Actual tape numbers (TAPE[]) */ + int activeTapes; /* # of active input tapes in merge pass */ + + /* + * These variables are used after completion of sorting to keep track of + * the next tuple to return. (In the tape case, the tape's current read + * position is also critical state.) + */ + int result_tape; /* actual tape number of finished output */ + int current; /* array index (only used if SORTEDINMEM) */ + bool eof_reached; /* reached EOF (needed for cursors) */ + + /* markpos_xxx holds marked position for mark and restore */ + long markpos_block; /* tape block# (only used if SORTEDONTAPE) */ + int markpos_offset; /* saved "current", or offset in tape block */ + bool markpos_eof; /* saved "eof_reached" */ + + /* + * These variables are specific to the MinimalTuple case; they are set by + * rum_tuplesort_begin_heap and used only by the MinimalTuple routines. + */ + TupleDesc tupDesc; + SortSupport sortKeys; /* array of length nKeys */ + + /* + * This variable is shared by the single-key MinimalTuple case and the + * Datum case (which both use qsort_ssup()). Otherwise it's NULL. + */ + SortSupport onlyKey; + + /* + * These variables are specific to the CLUSTER case; they are set by + * rum_tuplesort_begin_cluster. Note CLUSTER also uses tupDesc and + * indexScanKey. + */ + IndexInfo *indexInfo; /* info about index being used for reference */ + EState *estate; /* for evaluating index expressions */ + + /* + * These variables are specific to the IndexTuple case; they are set by + * rum_tuplesort_begin_index_xxx and used only by the IndexTuple routines. + */ + Relation heapRel; /* table the index is being built on */ + Relation indexRel; /* index being built */ + + /* These are specific to the index_btree subcase: */ + ScanKey indexScanKey; + bool enforceUnique; /* complain if we find duplicate tuples */ + + /* These are specific to the index_hash subcase: */ + uint32 hash_mask; /* mask for sortable part of hash code */ + + /* + * These variables are specific to the Datum case; they are set by + * rum_tuplesort_begin_datum and used only by the DatumTuple routines. + */ + Oid datumType; + /* we need typelen and byval in order to know how to copy the Datums. */ + int datumTypeLen; + bool datumTypeByVal; + + bool reverse; + + /* + * Resource snapshot for time of sort start. + */ +#ifdef TRACE_SORT + PGRUsage ru_start; +#endif +}; + +#define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state)) +#define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup)) +#define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup)) +#define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len)) +#define REVERSEDIRECTION(state) ((*(state)->reversedirection) (state)) +#define LACKMEM(state) ((state)->availMem < 0) +#define USEMEM(state,amt) ((state)->availMem -= (amt)) +#define FREEMEM(state,amt) ((state)->availMem += (amt)) + +/* + * NOTES about on-tape representation of tuples: + * + * We require the first "unsigned int" of a stored tuple to be the total size + * on-tape of the tuple, including itself (so it is never zero; an all-zero + * unsigned int is used to delimit runs). The remainder of the stored tuple + * may or may not match the in-memory representation of the tuple --- + * any conversion needed is the job of the writetup and readtup routines. + * + * If state->randomAccess is true, then the stored representation of the + * tuple must be followed by another "unsigned int" that is a copy of the + * length --- so the total tape space used is actually sizeof(unsigned int) + * more than the stored length value. This allows read-backwards. When + * randomAccess is not true, the write/read routines may omit the extra + * length word. + * + * writetup is expected to write both length words as well as the tuple + * data. When readtup is called, the tape is positioned just after the + * front length word; readtup must read the tuple data and advance past + * the back length word (if present). + * + * The write/read routines can make use of the tuple description data + * stored in the Tuplesortstate record, if needed. They are also expected + * to adjust state->availMem by the amount of memory space (not tape space!) + * released or consumed. There is no error return from either writetup + * or readtup; they should ereport() on failure. + * + * + * NOTES about memory consumption calculations: + * + * We count space allocated for tuples against the workMem limit, plus + * the space used by the variable-size memtuples array. Fixed-size space + * is not counted; it's small enough to not be interesting. + * + * Note that we count actual space used (as shown by GetMemoryChunkSpace) + * rather than the originally-requested size. This is important since + * palloc can add substantial overhead. It's not a complete answer since + * we won't count any wasted space in palloc allocation blocks, but it's + * a lot better than what we were doing before 7.3. + */ + +/* When using this macro, beware of double evaluation of len */ +#define LogicalTapeReadExact(tapeset, tapenum, ptr, len) \ + do { \ + if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \ + elog(ERROR, "unexpected end of data"); \ + } while(0) + + +static Tuplesortstate *rum_tuplesort_begin_common(int workMem, bool randomAccess); +static void puttuple_common(Tuplesortstate *state, SortTuple *tuple); +static void inittapes(Tuplesortstate *state); +static void selectnewtape(Tuplesortstate *state); +static void mergeruns(Tuplesortstate *state); +static void mergeonerun(Tuplesortstate *state); +static void beginmerge(Tuplesortstate *state); +static void mergepreread(Tuplesortstate *state); +static void mergeprereadone(Tuplesortstate *state, int srcTape); +static void dumptuples(Tuplesortstate *state, bool alltuples); +static void make_bounded_heap(Tuplesortstate *state); +static void sort_bounded_heap(Tuplesortstate *state); +static void rum_tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple, + int tupleindex, bool checkIndex); +static void rum_tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex); +static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK); +static void markrunend(Tuplesortstate *state, int tapenum); +static int comparetup_heap(const SortTuple *a, const SortTuple *b, + Tuplesortstate *state); +static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup); +static void writetup_heap(Tuplesortstate *state, int tapenum, + SortTuple *stup); +static void readtup_heap(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len); +static void reversedirection_heap(Tuplesortstate *state); +static int comparetup_cluster(const SortTuple *a, const SortTuple *b, + Tuplesortstate *state); +static void copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup); +static void writetup_cluster(Tuplesortstate *state, int tapenum, + SortTuple *stup); +static void readtup_cluster(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len); +static int comparetup_index_btree(const SortTuple *a, const SortTuple *b, + Tuplesortstate *state); +static int comparetup_index_hash(const SortTuple *a, const SortTuple *b, + Tuplesortstate *state); +static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup); +static void writetup_index(Tuplesortstate *state, int tapenum, + SortTuple *stup); +static void readtup_index(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len); +static void reversedirection_index_btree(Tuplesortstate *state); +static void reversedirection_index_hash(Tuplesortstate *state); +static int comparetup_datum(const SortTuple *a, const SortTuple *b, + Tuplesortstate *state); +static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup); +static void writetup_datum(Tuplesortstate *state, int tapenum, + SortTuple *stup); +static void readtup_datum(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len); +static void reversedirection_datum(Tuplesortstate *state); +static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup); +static int comparetup_rum(const SortTuple *a, const SortTuple *b, + Tuplesortstate *state); +static void copytup_rum(Tuplesortstate *state, SortTuple *stup, void *tup); +static void writetup_rum(Tuplesortstate *state, int tapenum, + SortTuple *stup); +static void readtup_rum(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len); +static void reversedirection_rum(Tuplesortstate *state); + +/* + * Special versions of qsort just for SortTuple objects. qsort_tuple() sorts + * any variant of SortTuples, using the appropriate comparetup function. + * qsort_ssup() is specialized for the case where the comparetup function + * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts + * and Datum sorts. + */ +//#include "qsort_tuple.c" + +static void +swapfunc(SortTuple *a, SortTuple *b, size_t n) +{ + do + { + SortTuple t = *a; + *a++ = *b; + *b++ = t; + } while (--n > 0); +} + +#define cmp_ssup(a, b, ssup) \ + ApplySortComparator((a)->datum1, (a)->isnull1, \ + (b)->datum1, (b)->isnull1, ssup) + +#define swap(a, b) \ + do { \ + SortTuple t = *(a); \ + *(a) = *(b); \ + *(b) = t; \ + } while (0); + +#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n) + +static SortTuple * +med3_tuple(SortTuple *a, SortTuple *b, SortTuple *c, SortTupleComparator cmp_tuple, Tuplesortstate *state) +{ + return cmp_tuple(a, b, state) < 0 ? + (cmp_tuple(b, c, state) < 0 ? b : + (cmp_tuple(a, c, state) < 0 ? c : a)) + : (cmp_tuple(b, c, state) > 0 ? b : + (cmp_tuple(a, c, state) < 0 ? a : c)); +} + +static SortTuple * +med3_ssup(SortTuple *a, SortTuple *b, SortTuple *c, SortSupport ssup) +{ + return cmp_ssup(a, b, ssup) < 0 ? + (cmp_ssup(b, c, ssup) < 0 ? b : + (cmp_ssup(a, c, ssup) < 0 ? c : a)) + : (cmp_ssup(b, c, ssup) > 0 ? b : + (cmp_ssup(a, c, ssup) < 0 ? a : c)); +} + +static void +qsort_ssup(SortTuple *a, size_t n, SortSupport ssup) +{ + SortTuple *pa, + *pb, + *pc, + *pd, + *pl, + *pm, + *pn; + size_t d1, + d2; + int r, + presorted; + +loop: + CHECK_FOR_INTERRUPTS(); + if (n < 7) + { + for (pm = a + 1; pm < a + n; pm++) + for (pl = pm; pl > a && cmp_ssup(pl - 1, pl, ssup) > 0; pl--) + swap(pl, pl - 1); + return; + } + presorted = 1; + for (pm = a + 1; pm < a + n; pm++) + { + CHECK_FOR_INTERRUPTS(); + if (cmp_ssup(pm - 1, pm, ssup) > 0) + { + presorted = 0; + break; + } + } + if (presorted) + return; + pm = a + (n / 2); + if (n > 7) + { + pl = a; + pn = a + (n - 1); + if (n > 40) + { + size_t d = (n / 8); + + pl = med3_ssup(pl, pl + d, pl + 2 * d, ssup); + pm = med3_ssup(pm - d, pm, pm + d, ssup); + pn = med3_ssup(pn - 2 * d, pn - d, pn, ssup); + } + pm = med3_ssup(pl, pm, pn, ssup); + } + swap(a, pm); + pa = pb = a + 1; + pc = pd = a + (n - 1); + for (;;) + { + while (pb <= pc && (r = cmp_ssup(pb, a, ssup)) <= 0) + { + if (r == 0) + { + swap(pa, pb); + pa++; + } + pb++; + CHECK_FOR_INTERRUPTS(); + } + while (pb <= pc && (r = cmp_ssup(pc, a, ssup)) >= 0) + { + if (r == 0) + { + swap(pc, pd); + pd--; + } + pc--; + CHECK_FOR_INTERRUPTS(); + } + if (pb > pc) + break; + swap(pb, pc); + pb++; + pc--; + } + pn = a + n; + d1 = Min(pa - a, pb - pa); + vecswap(a, pb - d1, d1); + d1 = Min(pd - pc, pn - pd - 1); + vecswap(pb, pn - d1, d1); + d1 = pb - pa; + d2 = pd - pc; + if (d1 <= d2) + { + /* Recurse on left partition, then iterate on right partition */ + if (d1 > 1) + qsort_ssup(a, d1, ssup); + if (d2 > 1) + { + /* Iterate rather than recurse to save stack space */ + /* qsort_ssup(pn - d2, d2, ssup); */ + a = pn - d2; + n = d2; + goto loop; + } + } + else + { + /* Recurse on right partition, then iterate on left partition */ + if (d2 > 1) + qsort_ssup(pn - d2, d2, ssup); + if (d1 > 1) + { + /* Iterate rather than recurse to save stack space */ + /* qsort_ssup(a, d1, ssup); */ + n = d1; + goto loop; + } + } +} + +static void +qsort_tuple(SortTuple *a, size_t n, SortTupleComparator cmp_tuple, Tuplesortstate *state) +{ + SortTuple *pa, + *pb, + *pc, + *pd, + *pl, + *pm, + *pn; + size_t d1, + d2; + int r, + presorted; + +loop: + CHECK_FOR_INTERRUPTS(); + if (n < 7) + { + for (pm = a + 1; pm < a + n; pm++) + for (pl = pm; pl > a && cmp_tuple(pl - 1, pl, state) > 0; pl--) + swap(pl, pl - 1); + return; + } + presorted = 1; + for (pm = a + 1; pm < a + n; pm++) + { + CHECK_FOR_INTERRUPTS(); + if (cmp_tuple(pm - 1, pm, state) > 0) + { + presorted = 0; + break; + } + } + if (presorted) + return; + pm = a + (n / 2); + if (n > 7) + { + pl = a; + pn = a + (n - 1); + if (n > 40) + { + size_t d = (n / 8); + + pl = med3_tuple(pl, pl + d, pl + 2 * d, cmp_tuple, state); + pm = med3_tuple(pm - d, pm, pm + d, cmp_tuple, state); + pn = med3_tuple(pn - 2 * d, pn - d, pn, cmp_tuple, state); + } + pm = med3_tuple(pl, pm, pn, cmp_tuple, state); + } + swap(a, pm); + pa = pb = a + 1; + pc = pd = a + (n - 1); + for (;;) + { + while (pb <= pc && (r = cmp_tuple(pb, a, state)) <= 0) + { + if (r == 0) + { + swap(pa, pb); + pa++; + } + pb++; + CHECK_FOR_INTERRUPTS(); + } + while (pb <= pc && (r = cmp_tuple(pc, a, state)) >= 0) + { + if (r == 0) + { + swap(pc, pd); + pd--; + } + pc--; + CHECK_FOR_INTERRUPTS(); + } + if (pb > pc) + break; + swap(pb, pc); + pb++; + pc--; + } + pn = a + n; + d1 = Min(pa - a, pb - pa); + vecswap(a, pb - d1, d1); + d1 = Min(pd - pc, pn - pd - 1); + vecswap(pb, pn - d1, d1); + d1 = pb - pa; + d2 = pd - pc; + if (d1 <= d2) + { + /* Recurse on left partition, then iterate on right partition */ + if (d1 > 1) + qsort_tuple(a, d1, cmp_tuple, state); + if (d2 > 1) + { + /* Iterate rather than recurse to save stack space */ + /* qsort_tuple(pn - d2, d2, cmp_tuple, state); */ + a = pn - d2; + n = d2; + goto loop; + } + } + else + { + /* Recurse on right partition, then iterate on left partition */ + if (d2 > 1) + qsort_tuple(pn - d2, d2, cmp_tuple, state); + if (d1 > 1) + { + /* Iterate rather than recurse to save stack space */ + /* qsort_tuple(a, d1, cmp_tuple, state); */ + n = d1; + goto loop; + } + } +} + +/* + * rum_tuplesort_begin_xxx + * + * Initialize for a tuple sort operation. + * + * After calling rum_tuplesort_begin, the caller should call rum_tuplesort_putXXX + * zero or more times, then call rum_tuplesort_performsort when all the tuples + * have been supplied. After performsort, retrieve the tuples in sorted + * order by calling rum_tuplesort_getXXX until it returns false/NULL. (If random + * access was requested, rescan, markpos, and restorepos can also be called.) + * Call rum_tuplesort_end to terminate the operation and release memory/disk space. + * + * Each variant of rum_tuplesort_begin has a workMem parameter specifying the + * maximum number of kilobytes of RAM to use before spilling data to disk. + * (The normal value of this parameter is work_mem, but some callers use + * other values.) Each variant also has a randomAccess parameter specifying + * whether the caller needs non-sequential access to the sort result. + */ + +static Tuplesortstate * +rum_tuplesort_begin_common(int workMem, bool randomAccess) +{ + Tuplesortstate *state; + MemoryContext sortcontext; + MemoryContext oldcontext; + + /* + * Create a working memory context for this sort operation. All data + * needed by the sort will live inside this context. + */ + sortcontext = AllocSetContextCreate(CurrentMemoryContext, + "TupleSort", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + /* + * Make the Tuplesortstate within the per-sort context. This way, we + * don't need a separate pfree() operation for it at shutdown. + */ + oldcontext = MemoryContextSwitchTo(sortcontext); + + state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate)); + +#ifdef TRACE_SORT + if (trace_sort) + pg_rusage_init(&state->ru_start); +#endif + + state->status = TSS_INITIAL; + state->randomAccess = randomAccess; + state->bounded = false; + state->boundUsed = false; + state->allowedMem = workMem * 1024L; + state->availMem = state->allowedMem; + state->sortcontext = sortcontext; + state->tapeset = NULL; + + state->memtupcount = 0; + + /* + * Initial size of array must be more than ALLOCSET_SEPARATE_THRESHOLD; + * see comments in grow_memtuples(). + */ + state->memtupsize = Max(1024, + ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1); + + state->growmemtuples = true; + state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple)); + + USEMEM(state, GetMemoryChunkSpace(state->memtuples)); + + /* workMem must be large enough for the minimal memtuples array */ + if (LACKMEM(state)) + elog(ERROR, "insufficient memory allowed for sort"); + + state->currentRun = 0; + + /* + * maxTapes, tapeRange, and Algorithm D variables will be initialized by + * inittapes(), if needed + */ + + state->result_tape = -1; /* flag that result tape has not been formed */ + + MemoryContextSwitchTo(oldcontext); + + return state; +} + +Tuplesortstate * +rum_tuplesort_begin_heap(TupleDesc tupDesc, + int nkeys, AttrNumber *attNums, + Oid *sortOperators, Oid *sortCollations, + bool *nullsFirstFlags, + int workMem, bool randomAccess) +{ + Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); + MemoryContext oldcontext; + int i; + + oldcontext = MemoryContextSwitchTo(state->sortcontext); + + AssertArg(nkeys > 0); + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, + "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c", + nkeys, workMem, randomAccess ? 't' : 'f'); +#endif + + state->nKeys = nkeys; + + TRACE_POSTGRESQL_SORT_START(HEAP_SORT, + false, /* no unique check */ + nkeys, + workMem, + randomAccess); + + state->comparetup = comparetup_heap; + state->copytup = copytup_heap; + state->writetup = writetup_heap; + state->readtup = readtup_heap; + state->reversedirection = reversedirection_heap; + + state->tupDesc = tupDesc; /* assume we need not copy tupDesc */ + + /* Prepare SortSupport data for each column */ + state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData)); + + for (i = 0; i < nkeys; i++) + { + SortSupport sortKey = state->sortKeys + i; + + AssertArg(attNums[i] != 0); + AssertArg(sortOperators[i] != 0); + + sortKey->ssup_cxt = CurrentMemoryContext; + sortKey->ssup_collation = sortCollations[i]; + sortKey->ssup_nulls_first = nullsFirstFlags[i]; + sortKey->ssup_attno = attNums[i]; + + PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey); + } + + if (nkeys == 1) + state->onlyKey = state->sortKeys; + + MemoryContextSwitchTo(oldcontext); + + return state; +} + +Tuplesortstate * +rum_tuplesort_begin_cluster(TupleDesc tupDesc, + Relation indexRel, + int workMem, bool randomAccess) +{ + Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); + MemoryContext oldcontext; + + Assert(indexRel->rd_rel->relam == BTREE_AM_OID); + + oldcontext = MemoryContextSwitchTo(state->sortcontext); + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, + "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c", + RelationGetNumberOfAttributes(indexRel), + workMem, randomAccess ? 't' : 'f'); +#endif + + state->nKeys = RelationGetNumberOfAttributes(indexRel); + + TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT, + false, /* no unique check */ + state->nKeys, + workMem, + randomAccess); + + state->comparetup = comparetup_cluster; + state->copytup = copytup_cluster; + state->writetup = writetup_cluster; + state->readtup = readtup_cluster; + state->reversedirection = reversedirection_index_btree; + + state->indexInfo = BuildIndexInfo(indexRel); + state->indexScanKey = _bt_mkscankey_nodata(indexRel); + + state->tupDesc = tupDesc; /* assume we need not copy tupDesc */ + + if (state->indexInfo->ii_Expressions != NULL) + { + TupleTableSlot *slot; + ExprContext *econtext; + + /* + * We will need to use FormIndexDatum to evaluate the index + * expressions. To do that, we need an EState, as well as a + * TupleTableSlot to put the table tuples into. The econtext's + * scantuple has to point to that slot, too. + */ + state->estate = CreateExecutorState(); + slot = MakeSingleTupleTableSlot(tupDesc); + econtext = GetPerTupleExprContext(state->estate); + econtext->ecxt_scantuple = slot; + } + + MemoryContextSwitchTo(oldcontext); + + return state; +} + +Tuplesortstate * +rum_tuplesort_begin_index_btree(Relation heapRel, + Relation indexRel, + bool enforceUnique, + int workMem, bool randomAccess) +{ + Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); + MemoryContext oldcontext; + + oldcontext = MemoryContextSwitchTo(state->sortcontext); + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, + "begin index sort: unique = %c, workMem = %d, randomAccess = %c", + enforceUnique ? 't' : 'f', + workMem, randomAccess ? 't' : 'f'); +#endif + + state->nKeys = RelationGetNumberOfAttributes(indexRel); + + TRACE_POSTGRESQL_SORT_START(INDEX_SORT, + enforceUnique, + state->nKeys, + workMem, + randomAccess); + + state->comparetup = comparetup_index_btree; + state->copytup = copytup_index; + state->writetup = writetup_index; + state->readtup = readtup_index; + state->reversedirection = reversedirection_index_btree; + + state->heapRel = heapRel; + state->indexRel = indexRel; + state->indexScanKey = _bt_mkscankey_nodata(indexRel); + state->enforceUnique = enforceUnique; + + MemoryContextSwitchTo(oldcontext); + + return state; +} + +Tuplesortstate * +rum_tuplesort_begin_index_hash(Relation heapRel, + Relation indexRel, + uint32 hash_mask, + int workMem, bool randomAccess) +{ + Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); + MemoryContext oldcontext; + + oldcontext = MemoryContextSwitchTo(state->sortcontext); + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, + "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c", + hash_mask, + workMem, randomAccess ? 't' : 'f'); +#endif + + state->nKeys = 1; /* Only one sort column, the hash code */ + + state->comparetup = comparetup_index_hash; + state->copytup = copytup_index; + state->writetup = writetup_index; + state->readtup = readtup_index; + state->reversedirection = reversedirection_index_hash; + + state->heapRel = heapRel; + state->indexRel = indexRel; + + state->hash_mask = hash_mask; + + MemoryContextSwitchTo(oldcontext); + + return state; +} + +Tuplesortstate * +rum_tuplesort_begin_rum(int workMem, int nKeys, bool randomAccess) +{ + Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); + MemoryContext oldcontext; + + oldcontext = MemoryContextSwitchTo(state->sortcontext); + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, + "begin rum sort: nKeys = %d, workMem = %d, randomAccess = %c", + nKeys, workMem, randomAccess ? 't' : 'f'); +#endif + + state->nKeys = nKeys; + + TRACE_POSTGRESQL_SORT_START(INDEX_SORT, + enforceUnique, + state->nKeys, + workMem, + randomAccess); + + state->comparetup = comparetup_rum; + state->copytup = copytup_rum; + state->writetup = writetup_rum; + state->readtup = readtup_rum; + state->reversedirection = reversedirection_rum; + state->reverse = false; + + MemoryContextSwitchTo(oldcontext); + + return state; +} + +Tuplesortstate * +rum_tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, + bool nullsFirstFlag, + int workMem, bool randomAccess) +{ + Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); + MemoryContext oldcontext; + int16 typlen; + bool typbyval; + + oldcontext = MemoryContextSwitchTo(state->sortcontext); + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, + "begin datum sort: workMem = %d, randomAccess = %c", + workMem, randomAccess ? 't' : 'f'); +#endif + + state->nKeys = 1; /* always a one-column sort */ + + TRACE_POSTGRESQL_SORT_START(DATUM_SORT, + false, /* no unique check */ + 1, + workMem, + randomAccess); + + state->comparetup = comparetup_datum; + state->copytup = copytup_datum; + state->writetup = writetup_datum; + state->readtup = readtup_datum; + state->reversedirection = reversedirection_datum; + + state->datumType = datumType; + + /* Prepare SortSupport data */ + state->onlyKey = (SortSupport) palloc0(sizeof(SortSupportData)); + + state->onlyKey->ssup_cxt = CurrentMemoryContext; + state->onlyKey->ssup_collation = sortCollation; + state->onlyKey->ssup_nulls_first = nullsFirstFlag; + + PrepareSortSupportFromOrderingOp(sortOperator, state->onlyKey); + + /* lookup necessary attributes of the datum type */ + get_typlenbyval(datumType, &typlen, &typbyval); + state->datumTypeLen = typlen; + state->datumTypeByVal = typbyval; + + MemoryContextSwitchTo(oldcontext); + + return state; +} + +/* + * rum_tuplesort_set_bound + * + * Advise tuplesort that at most the first N result tuples are required. + * + * Must be called before inserting any tuples. (Actually, we could allow it + * as long as the sort hasn't spilled to disk, but there seems no need for + * delayed calls at the moment.) + * + * This is a hint only. The tuplesort may still return more tuples than + * requested. + */ +void +rum_tuplesort_set_bound(Tuplesortstate *state, int64 bound) +{ + /* Assert we're called before loading any tuples */ + Assert(state->status == TSS_INITIAL); + Assert(state->memtupcount == 0); + Assert(!state->bounded); + +#ifdef DEBUG_BOUNDED_SORT + /* Honor GUC setting that disables the feature (for easy testing) */ + if (!optimize_bounded_sort) + return; +#endif + + /* We want to be able to compute bound * 2, so limit the setting */ + if (bound > (int64) (INT_MAX / 2)) + return; + + state->bounded = true; + state->bound = (int) bound; +} + +/* + * rum_tuplesort_end + * + * Release resources and clean up. + * + * NOTE: after calling this, any pointers returned by rum_tuplesort_getXXX are + * pointing to garbage. Be careful not to attempt to use or free such + * pointers afterwards! + */ +void +rum_tuplesort_end(Tuplesortstate *state) +{ + /* context swap probably not needed, but let's be safe */ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + +#ifdef TRACE_SORT + long spaceUsed; + + if (state->tapeset) + spaceUsed = LogicalTapeSetBlocks(state->tapeset); + else + spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024; +#endif + + /* + * Delete temporary "tape" files, if any. + * + * Note: want to include this in reported total cost of sort, hence need + * for two #ifdef TRACE_SORT sections. + */ + if (state->tapeset) + LogicalTapeSetClose(state->tapeset); + +#ifdef TRACE_SORT + if (trace_sort) + { + if (state->tapeset) + elog(LOG, "external sort ended, %ld disk blocks used: %s", + spaceUsed, pg_rusage_show(&state->ru_start)); + else + elog(LOG, "internal sort ended, %ld KB used: %s", + spaceUsed, pg_rusage_show(&state->ru_start)); + } + + TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed); +#else + + /* + * If you disabled TRACE_SORT, you can still probe sort__done, but you + * ain't getting space-used stats. + */ + TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L); +#endif + + /* Free any execution state created for CLUSTER case */ + if (state->estate != NULL) + { + ExprContext *econtext = GetPerTupleExprContext(state->estate); + + ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple); + FreeExecutorState(state->estate); + } + + MemoryContextSwitchTo(oldcontext); + + /* + * Free the per-sort memory context, thereby releasing all working memory, + * including the Tuplesortstate struct itself. + */ + MemoryContextDelete(state->sortcontext); +} + +/* + * Grow the memtuples[] array, if possible within our memory constraint. + * Return TRUE if we were able to enlarge the array, FALSE if not. + * + * Normally, at each increment we double the size of the array. When we no + * longer have enough memory to do that, we attempt one last, smaller increase + * (and then clear the growmemtuples flag so we don't try any more). That + * allows us to use allowedMem as fully as possible; sticking to the pure + * doubling rule could result in almost half of allowedMem going unused. + * Because availMem moves around with tuple addition/removal, we need some + * rule to prevent making repeated small increases in memtupsize, which would + * just be useless thrashing. The growmemtuples flag accomplishes that and + * also prevents useless recalculations in this function. + */ +static bool +grow_memtuples(Tuplesortstate *state) +{ + int newmemtupsize; + int memtupsize = state->memtupsize; + long memNowUsed = state->allowedMem - state->availMem; + + /* Forget it if we've already maxed out memtuples, per comment above */ + if (!state->growmemtuples) + return false; + + /* Select new value of memtupsize */ + if (memNowUsed <= state->availMem) + { + /* + * It is surely safe to double memtupsize if we've used no more than + * half of allowedMem. + * + * Note: it might seem that we need to worry about memtupsize * 2 + * overflowing an int, but the MaxAllocSize clamp applied below + * ensures the existing memtupsize can't be large enough for that. + */ + newmemtupsize = memtupsize * 2; + } + else + { + /* + * This will be the last increment of memtupsize. Abandon doubling + * strategy and instead increase as much as we safely can. + * + * To stay within allowedMem, we can't increase memtupsize by more + * than availMem / sizeof(SortTuple) elements. In practice, we want + * to increase it by considerably less, because we need to leave some + * space for the tuples to which the new array slots will refer. We + * assume the new tuples will be about the same size as the tuples + * we've already seen, and thus we can extrapolate from the space + * consumption so far to estimate an appropriate new size for the + * memtuples array. The optimal value might be higher or lower than + * this estimate, but it's hard to know that in advance. + * + * This calculation is safe against enlarging the array so much that + * LACKMEM becomes true, because the memory currently used includes + * the present array; thus, there would be enough allowedMem for the + * new array elements even if no other memory were currently used. + * + * We do the arithmetic in float8, because otherwise the product of + * memtupsize and allowedMem could overflow. (A little algebra shows + * that grow_ratio must be less than 2 here, so we are not risking + * integer overflow this way.) Any inaccuracy in the result should be + * insignificant; but even if we computed a completely insane result, + * the checks below will prevent anything really bad from happening. + */ + double grow_ratio; + + grow_ratio = (double) state->allowedMem / (double) memNowUsed; + newmemtupsize = (int) (memtupsize * grow_ratio); + + /* We won't make any further enlargement attempts */ + state->growmemtuples = false; + } + + /* Must enlarge array by at least one element, else report failure */ + if (newmemtupsize <= memtupsize) + goto noalloc; + + /* + * On a 64-bit machine, allowedMem could be more than MaxAllocSize. Clamp + * to ensure our request won't be rejected by palloc. + */ + if ((Size) newmemtupsize >= MaxAllocSize / sizeof(SortTuple)) + { + newmemtupsize = (int) (MaxAllocSize / sizeof(SortTuple)); + state->growmemtuples = false; /* can't grow any more */ + } + + /* + * We need to be sure that we do not cause LACKMEM to become true, else + * the space management algorithm will go nuts. The code above should + * never generate a dangerous request, but to be safe, check explicitly + * that the array growth fits within availMem. (We could still cause + * LACKMEM if the memory chunk overhead associated with the memtuples + * array were to increase. That shouldn't happen because we chose the + * initial array size large enough to ensure that palloc will be treating + * both old and new arrays as separate chunks. But we'll check LACKMEM + * explicitly below just in case.) + */ + if (state->availMem < (long) ((newmemtupsize - memtupsize) * sizeof(SortTuple))) + goto noalloc; + + /* OK, do it */ + FREEMEM(state, GetMemoryChunkSpace(state->memtuples)); + state->memtupsize = newmemtupsize; + state->memtuples = (SortTuple *) + repalloc(state->memtuples, + state->memtupsize * sizeof(SortTuple)); + USEMEM(state, GetMemoryChunkSpace(state->memtuples)); + if (LACKMEM(state)) + elog(ERROR, "unexpected out-of-memory situation in tuplesort"); + return true; + +noalloc: + /* If for any reason we didn't realloc, shut off future attempts */ + state->growmemtuples = false; + return false; +} + +/* + * Accept one tuple while collecting input data for sort. + * + * Note that the input data is always copied; the caller need not save it. + */ +void +rum_tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + + /* + * Copy the given tuple into memory we control, and decrease availMem. + * Then call the common code. + */ + COPYTUP(state, &stup, (void *) slot); + + puttuple_common(state, &stup); + + MemoryContextSwitchTo(oldcontext); +} + +/* + * Accept one tuple while collecting input data for sort. + * + * Note that the input data is always copied; the caller need not save it. + */ +void +rum_tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + + /* + * Copy the given tuple into memory we control, and decrease availMem. + * Then call the common code. + */ + COPYTUP(state, &stup, (void *) tup); + + puttuple_common(state, &stup); + + MemoryContextSwitchTo(oldcontext); +} + +/* + * Accept one index tuple while collecting input data for sort. + * + * Note that the input tuple is always copied; the caller need not save it. + */ +void +rum_tuplesort_putindextuple(Tuplesortstate *state, IndexTuple tuple) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + + /* + * Copy the given tuple into memory we control, and decrease availMem. + * Then call the common code. + */ + COPYTUP(state, &stup, (void *) tuple); + + puttuple_common(state, &stup); + + MemoryContextSwitchTo(oldcontext); +} + +/* + * Accept one Datum while collecting input data for sort. + * + * If the Datum is pass-by-ref type, the value will be copied. + */ +void +rum_tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + + /* + * If it's a pass-by-reference value, copy it into memory we control, and + * decrease availMem. Then call the common code. + */ + if (isNull || state->datumTypeByVal) + { + stup.datum1 = val; + stup.isnull1 = isNull; + stup.tuple = NULL; /* no separate storage */ + } + else + { + stup.datum1 = datumCopy(val, false, state->datumTypeLen); + stup.isnull1 = false; + stup.tuple = DatumGetPointer(stup.datum1); + USEMEM(state, GetMemoryChunkSpace(stup.tuple)); + } + + puttuple_common(state, &stup); + + MemoryContextSwitchTo(oldcontext); +} + +void +rum_tuplesort_putrum(Tuplesortstate *state, RumSortItem *item) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + + /* + * Copy the given tuple into memory we control, and decrease availMem. + * Then call the common code. + */ + COPYTUP(state, &stup, (void *) item); + + puttuple_common(state, &stup); + + MemoryContextSwitchTo(oldcontext); +} + +/* + * Shared code for tuple and datum cases. + */ +static void +puttuple_common(Tuplesortstate *state, SortTuple *tuple) +{ + switch (state->status) + { + case TSS_INITIAL: + + /* + * Save the tuple into the unsorted array. First, grow the array + * as needed. Note that we try to grow the array when there is + * still one free slot remaining --- if we fail, there'll still be + * room to store the incoming tuple, and then we'll switch to + * tape-based operation. + */ + if (state->memtupcount >= state->memtupsize - 1) + { + (void) grow_memtuples(state); + Assert(state->memtupcount < state->memtupsize); + } + state->memtuples[state->memtupcount++] = *tuple; + + /* + * Check if it's time to switch over to a bounded heapsort. We do + * so if the input tuple count exceeds twice the desired tuple + * count (this is a heuristic for where heapsort becomes cheaper + * than a quicksort), or if we've just filled workMem and have + * enough tuples to meet the bound. + * + * Note that once we enter TSS_BOUNDED state we will always try to + * complete the sort that way. In the worst case, if later input + * tuples are larger than earlier ones, this might cause us to + * exceed workMem significantly. + */ + if (state->bounded && + (state->memtupcount > state->bound * 2 || + (state->memtupcount > state->bound && LACKMEM(state)))) + { +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, "switching to bounded heapsort at %d tuples: %s", + state->memtupcount, + pg_rusage_show(&state->ru_start)); +#endif + make_bounded_heap(state); + return; + } + + /* + * Done if we still fit in available memory and have array slots. + */ + if (state->memtupcount < state->memtupsize && !LACKMEM(state)) + return; + + /* + * Nope; time to switch to tape-based operation. + */ + inittapes(state); + + /* + * Dump tuples until we are back under the limit. + */ + dumptuples(state, false); + break; + + case TSS_BOUNDED: + + /* + * We don't want to grow the array here, so check whether the new + * tuple can be discarded before putting it in. This should be a + * good speed optimization, too, since when there are many more + * input tuples than the bound, most input tuples can be discarded + * with just this one comparison. Note that because we currently + * have the sort direction reversed, we must check for <= not >=. + */ + if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0) + { + /* new tuple <= top of the heap, so we can discard it */ + free_sort_tuple(state, tuple); + CHECK_FOR_INTERRUPTS(); + } + else + { + /* discard top of heap, sift up, insert new tuple */ + free_sort_tuple(state, &state->memtuples[0]); + rum_tuplesort_heap_siftup(state, false); + rum_tuplesort_heap_insert(state, tuple, 0, false); + } + break; + + case TSS_BUILDRUNS: + + /* + * Insert the tuple into the heap, with run number currentRun if + * it can go into the current run, else run number currentRun+1. + * The tuple can go into the current run if it is >= the first + * not-yet-output tuple. (Actually, it could go into the current + * run if it is >= the most recently output tuple ... but that + * would require keeping around the tuple we last output, and it's + * simplest to let writetup free each tuple as soon as it's + * written.) + * + * Note there will always be at least one tuple in the heap at + * this point; see dumptuples. + */ + Assert(state->memtupcount > 0); + if (COMPARETUP(state, tuple, &state->memtuples[0]) >= 0) + rum_tuplesort_heap_insert(state, tuple, state->currentRun, true); + else + rum_tuplesort_heap_insert(state, tuple, state->currentRun + 1, true); + + /* + * If we are over the memory limit, dump tuples till we're under. + */ + dumptuples(state, false); + break; + + default: + elog(ERROR, "invalid tuplesort state"); + break; + } +} + +/* + * All tuples have been provided; finish the sort. + */ +void +rum_tuplesort_performsort(Tuplesortstate *state) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, "performsort starting: %s", + pg_rusage_show(&state->ru_start)); +#endif + + switch (state->status) + { + case TSS_INITIAL: + + /* + * We were able to accumulate all the tuples within the allowed + * amount of memory. Just qsort 'em and we're done. + */ + if (state->memtupcount > 1) + { + /* Can we use the single-key sort function? */ + if (state->onlyKey != NULL) + qsort_ssup(state->memtuples, state->memtupcount, + state->onlyKey); + else + qsort_tuple(state->memtuples, + state->memtupcount, + state->comparetup, + state); + } + state->current = 0; + state->eof_reached = false; + state->markpos_offset = 0; + state->markpos_eof = false; + state->status = TSS_SORTEDINMEM; + break; + + case TSS_BOUNDED: + + /* + * We were able to accumulate all the tuples required for output + * in memory, using a heap to eliminate excess tuples. Now we + * have to transform the heap to a properly-sorted array. + */ + sort_bounded_heap(state); + state->current = 0; + state->eof_reached = false; + state->markpos_offset = 0; + state->markpos_eof = false; + state->status = TSS_SORTEDINMEM; + break; + + case TSS_BUILDRUNS: + + /* + * Finish tape-based sort. First, flush all tuples remaining in + * memory out to tape; then merge until we have a single remaining + * run (or, if !randomAccess, one run per tape). Note that + * mergeruns sets the correct state->status. + */ + dumptuples(state, true); + mergeruns(state); + state->eof_reached = false; + state->markpos_block = 0L; + state->markpos_offset = 0; + state->markpos_eof = false; + break; + + default: + elog(ERROR, "invalid tuplesort state"); + break; + } + +#ifdef TRACE_SORT + if (trace_sort) + { + if (state->status == TSS_FINALMERGE) + elog(LOG, "performsort done (except %d-way final merge): %s", + state->activeTapes, + pg_rusage_show(&state->ru_start)); + else + elog(LOG, "performsort done: %s", + pg_rusage_show(&state->ru_start)); + } +#endif + + MemoryContextSwitchTo(oldcontext); +} + +/* + * Internal routine to fetch the next tuple in either forward or back + * direction into *stup. Returns FALSE if no more tuples. + * If *should_free is set, the caller must pfree stup.tuple when done with it. + */ +static bool +rum_tuplesort_gettuple_common(Tuplesortstate *state, bool forward, + SortTuple *stup, bool *should_free) +{ + unsigned int tuplen; + + switch (state->status) + { + case TSS_SORTEDINMEM: + Assert(forward || state->randomAccess); + *should_free = false; + if (forward) + { + if (state->current < state->memtupcount) + { + *stup = state->memtuples[state->current++]; + return true; + } + state->eof_reached = true; + + /* + * Complain if caller tries to retrieve more tuples than + * originally asked for in a bounded sort. This is because + * returning EOF here might be the wrong thing. + */ + if (state->bounded && state->current >= state->bound) + elog(ERROR, "retrieved too many tuples in a bounded sort"); + + return false; + } + else + { + if (state->current <= 0) + return false; + + /* + * if all tuples are fetched already then we return last + * tuple, else - tuple before last returned. + */ + if (state->eof_reached) + state->eof_reached = false; + else + { + state->current--; /* last returned tuple */ + if (state->current <= 0) + return false; + } + *stup = state->memtuples[state->current - 1]; + return true; + } + break; + + case TSS_SORTEDONTAPE: + Assert(forward || state->randomAccess); + *should_free = true; + if (forward) + { + if (state->eof_reached) + return false; + if ((tuplen = getlen(state, state->result_tape, true)) != 0) + { + READTUP(state, stup, state->result_tape, tuplen); + return true; + } + else + { + state->eof_reached = true; + return false; + } + } + + /* + * Backward. + * + * if all tuples are fetched already then we return last tuple, + * else - tuple before last returned. + */ + if (state->eof_reached) + { + /* + * Seek position is pointing just past the zero tuplen at the + * end of file; back up to fetch last tuple's ending length + * word. If seek fails we must have a completely empty file. + */ + if (!LogicalTapeBackspace(state->tapeset, + state->result_tape, + 2 * sizeof(unsigned int))) + return false; + state->eof_reached = false; + } + else + { + /* + * Back up and fetch previously-returned tuple's ending length + * word. If seek fails, assume we are at start of file. + */ + if (!LogicalTapeBackspace(state->tapeset, + state->result_tape, + sizeof(unsigned int))) + return false; + tuplen = getlen(state, state->result_tape, false); + + /* + * Back up to get ending length word of tuple before it. + */ + if (!LogicalTapeBackspace(state->tapeset, + state->result_tape, + tuplen + 2 * sizeof(unsigned int))) + { + /* + * If that fails, presumably the prev tuple is the first + * in the file. Back up so that it becomes next to read + * in forward direction (not obviously right, but that is + * what in-memory case does). + */ + if (!LogicalTapeBackspace(state->tapeset, + state->result_tape, + tuplen + sizeof(unsigned int))) + elog(ERROR, "bogus tuple length in backward scan"); + return false; + } + } + + tuplen = getlen(state, state->result_tape, false); + + /* + * Now we have the length of the prior tuple, back up and read it. + * Note: READTUP expects we are positioned after the initial + * length word of the tuple, so back up to that point. + */ + if (!LogicalTapeBackspace(state->tapeset, + state->result_tape, + tuplen)) + elog(ERROR, "bogus tuple length in backward scan"); + READTUP(state, stup, state->result_tape, tuplen); + return true; + + case TSS_FINALMERGE: + Assert(forward); + *should_free = true; + + /* + * This code should match the inner loop of mergeonerun(). + */ + if (state->memtupcount > 0) + { + int srcTape = state->memtuples[0].tupindex; + Size tuplen; + int tupIndex; + SortTuple *newtup; + + *stup = state->memtuples[0]; + /* returned tuple is no longer counted in our memory space */ + if (stup->tuple) + { + tuplen = GetMemoryChunkSpace(stup->tuple); + state->availMem += tuplen; + state->mergeavailmem[srcTape] += tuplen; + } + rum_tuplesort_heap_siftup(state, false); + if ((tupIndex = state->mergenext[srcTape]) == 0) + { + /* + * out of preloaded data on this tape, try to read more + * + * Unlike mergeonerun(), we only preload from the single + * tape that's run dry. See mergepreread() comments. + */ + mergeprereadone(state, srcTape); + + /* + * if still no data, we've reached end of run on this tape + */ + if ((tupIndex = state->mergenext[srcTape]) == 0) + return true; + } + /* pull next preread tuple from list, insert in heap */ + newtup = &state->memtuples[tupIndex]; + state->mergenext[srcTape] = newtup->tupindex; + if (state->mergenext[srcTape] == 0) + state->mergelast[srcTape] = 0; + rum_tuplesort_heap_insert(state, newtup, srcTape, false); + /* put the now-unused memtuples entry on the freelist */ + newtup->tupindex = state->mergefreelist; + state->mergefreelist = tupIndex; + state->mergeavailslots[srcTape]++; + return true; + } + return false; + + default: + elog(ERROR, "invalid tuplesort state"); + return false; /* keep compiler quiet */ + } +} + +/* + * Fetch the next tuple in either forward or back direction. + * If successful, put tuple in slot and return TRUE; else, clear the slot + * and return FALSE. + */ +bool +rum_tuplesort_gettupleslot(Tuplesortstate *state, bool forward, + TupleTableSlot *slot) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + bool should_free; + + if (!rum_tuplesort_gettuple_common(state, forward, &stup, &should_free)) + stup.tuple = NULL; + + MemoryContextSwitchTo(oldcontext); + + if (stup.tuple) + { + ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free); + return true; + } + else + { + ExecClearTuple(slot); + return false; + } +} + +/* + * Fetch the next tuple in either forward or back direction. + * Returns NULL if no more tuples. If *should_free is set, the + * caller must pfree the returned tuple when done with it. + */ +HeapTuple +rum_tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + + if (!rum_tuplesort_gettuple_common(state, forward, &stup, should_free)) + stup.tuple = NULL; + + MemoryContextSwitchTo(oldcontext); + + return stup.tuple; +} + +/* + * Fetch the next index tuple in either forward or back direction. + * Returns NULL if no more tuples. If *should_free is set, the + * caller must pfree the returned tuple when done with it. + */ +IndexTuple +rum_tuplesort_getindextuple(Tuplesortstate *state, bool forward, + bool *should_free) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + + if (!rum_tuplesort_gettuple_common(state, forward, &stup, should_free)) + stup.tuple = NULL; + + MemoryContextSwitchTo(oldcontext); + + return (IndexTuple) stup.tuple; +} + +/* + * Fetch the next Datum in either forward or back direction. + * Returns FALSE if no more datums. + * + * If the Datum is pass-by-ref type, the returned value is freshly palloc'd + * and is now owned by the caller. + */ +bool +rum_tuplesort_getdatum(Tuplesortstate *state, bool forward, + Datum *val, bool *isNull) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + bool should_free; + + if (!rum_tuplesort_gettuple_common(state, forward, &stup, &should_free)) + { + MemoryContextSwitchTo(oldcontext); + return false; + } + + if (stup.isnull1 || state->datumTypeByVal) + { + *val = stup.datum1; + *isNull = stup.isnull1; + } + else + { + if (should_free) + *val = stup.datum1; + else + *val = datumCopy(stup.datum1, false, state->datumTypeLen); + *isNull = false; + } + + MemoryContextSwitchTo(oldcontext); + + return true; +} + +RumSortItem * +rum_tuplesort_getrum(Tuplesortstate *state, bool forward, bool *should_free) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + SortTuple stup; + + if (!rum_tuplesort_gettuple_common(state, forward, &stup, should_free)) + stup.tuple = NULL; + + MemoryContextSwitchTo(oldcontext); + + return (RumSortItem *) stup.tuple; +} + + +/* + * rum_tuplesort_merge_order - report merge order we'll use for given memory + * (note: "merge order" just means the number of input tapes in the merge). + * + * This is exported for use by the planner. allowedMem is in bytes. + */ +int +rum_tuplesort_merge_order(long allowedMem) +{ + int mOrder; + + /* + * We need one tape for each merge input, plus another one for the output, + * and each of these tapes needs buffer space. In addition we want + * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't + * count). + * + * Note: you might be thinking we need to account for the memtuples[] + * array in this calculation, but we effectively treat that as part of the + * MERGE_BUFFER_SIZE workspace. + */ + mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) / + (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD); + + /* Even in minimum memory, use at least a MINORDER merge */ + mOrder = Max(mOrder, MINORDER); + + return mOrder; +} + +/* + * inittapes - initialize for tape sorting. + * + * This is called only if we have found we don't have room to sort in memory. + */ +static void +inittapes(Tuplesortstate *state) +{ + int maxTapes, + ntuples, + j; + long tapeSpace; + + /* Compute number of tapes to use: merge order plus 1 */ + maxTapes = rum_tuplesort_merge_order(state->allowedMem) + 1; + + /* + * We must have at least 2*maxTapes slots in the memtuples[] array, else + * we'd not have room for merge heap plus preread. It seems unlikely that + * this case would ever occur, but be safe. + */ + maxTapes = Min(maxTapes, state->memtupsize / 2); + + state->maxTapes = maxTapes; + state->tapeRange = maxTapes - 1; + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, "switching to external sort with %d tapes: %s", + maxTapes, pg_rusage_show(&state->ru_start)); +#endif + + /* + * Decrease availMem to reflect the space needed for tape buffers; but + * don't decrease it to the point that we have no room for tuples. (That + * case is only likely to occur if sorting pass-by-value Datums; in all + * other scenarios the memtuples[] array is unlikely to occupy more than + * half of allowedMem. In the pass-by-value case it's not important to + * account for tuple space, so we don't care if LACKMEM becomes + * inaccurate.) + */ + tapeSpace = (long) maxTapes * TAPE_BUFFER_OVERHEAD; + if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem) + USEMEM(state, tapeSpace); + + /* + * Make sure that the temp file(s) underlying the tape set are created in + * suitable temp tablespaces. + */ + PrepareTempTablespaces(); + + /* + * Create the tape set and allocate the per-tape data arrays. + */ + state->tapeset = LogicalTapeSetCreate(maxTapes); + + state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool)); + state->mergenext = (int *) palloc0(maxTapes * sizeof(int)); + state->mergelast = (int *) palloc0(maxTapes * sizeof(int)); + state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int)); + state->mergeavailmem = (long *) palloc0(maxTapes * sizeof(long)); + state->tp_fib = (int *) palloc0(maxTapes * sizeof(int)); + state->tp_runs = (int *) palloc0(maxTapes * sizeof(int)); + state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int)); + state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int)); + + /* + * Convert the unsorted contents of memtuples[] into a heap. Each tuple is + * marked as belonging to run number zero. + * + * NOTE: we pass false for checkIndex since there's no point in comparing + * indexes in this step, even though we do intend the indexes to be part + * of the sort key... + */ + ntuples = state->memtupcount; + state->memtupcount = 0; /* make the heap empty */ + for (j = 0; j < ntuples; j++) + { + /* Must copy source tuple to avoid possible overwrite */ + SortTuple stup = state->memtuples[j]; + + rum_tuplesort_heap_insert(state, &stup, 0, false); + } + Assert(state->memtupcount == ntuples); + + state->currentRun = 0; + + /* + * Initialize variables of Algorithm D (step D1). + */ + for (j = 0; j < maxTapes; j++) + { + state->tp_fib[j] = 1; + state->tp_runs[j] = 0; + state->tp_dummy[j] = 1; + state->tp_tapenum[j] = j; + } + state->tp_fib[state->tapeRange] = 0; + state->tp_dummy[state->tapeRange] = 0; + + state->Level = 1; + state->destTape = 0; + + state->status = TSS_BUILDRUNS; +} + +/* + * selectnewtape -- select new tape for new initial run. + * + * This is called after finishing a run when we know another run + * must be started. This implements steps D3, D4 of Algorithm D. + */ +static void +selectnewtape(Tuplesortstate *state) +{ + int j; + int a; + + /* Step D3: advance j (destTape) */ + if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1]) + { + state->destTape++; + return; + } + if (state->tp_dummy[state->destTape] != 0) + { + state->destTape = 0; + return; + } + + /* Step D4: increase level */ + state->Level++; + a = state->tp_fib[0]; + for (j = 0; j < state->tapeRange; j++) + { + state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j]; + state->tp_fib[j] = a + state->tp_fib[j + 1]; + } + state->destTape = 0; +} + +/* + * mergeruns -- merge all the completed initial runs. + * + * This implements steps D5, D6 of Algorithm D. All input data has + * already been written to initial runs on tape (see dumptuples). + */ +static void +mergeruns(Tuplesortstate *state) +{ + int tapenum, + svTape, + svRuns, + svDummy; + + Assert(state->status == TSS_BUILDRUNS); + Assert(state->memtupcount == 0); + + /* + * If we produced only one initial run (quite likely if the total data + * volume is between 1X and 2X workMem), we can just use that tape as the + * finished output, rather than doing a useless merge. (This obvious + * optimization is not in Knuth's algorithm.) + */ + if (state->currentRun == 1) + { + state->result_tape = state->tp_tapenum[state->destTape]; + /* must freeze and rewind the finished output tape */ + LogicalTapeFreeze(state->tapeset, state->result_tape); + state->status = TSS_SORTEDONTAPE; + return; + } + + /* End of step D2: rewind all output tapes to prepare for merging */ + for (tapenum = 0; tapenum < state->tapeRange; tapenum++) + LogicalTapeRewind(state->tapeset, tapenum, false); + + for (;;) + { + /* + * At this point we know that tape[T] is empty. If there's just one + * (real or dummy) run left on each input tape, then only one merge + * pass remains. If we don't have to produce a materialized sorted + * tape, we can stop at this point and do the final merge on-the-fly. + */ + if (!state->randomAccess) + { + bool allOneRun = true; + + Assert(state->tp_runs[state->tapeRange] == 0); + for (tapenum = 0; tapenum < state->tapeRange; tapenum++) + { + if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1) + { + allOneRun = false; + break; + } + } + if (allOneRun) + { + /* Tell logtape.c we won't be writing anymore */ + LogicalTapeSetForgetFreeSpace(state->tapeset); + /* Initialize for the final merge pass */ + beginmerge(state); + state->status = TSS_FINALMERGE; + return; + } + } + + /* Step D5: merge runs onto tape[T] until tape[P] is empty */ + while (state->tp_runs[state->tapeRange - 1] || + state->tp_dummy[state->tapeRange - 1]) + { + bool allDummy = true; + + for (tapenum = 0; tapenum < state->tapeRange; tapenum++) + { + if (state->tp_dummy[tapenum] == 0) + { + allDummy = false; + break; + } + } + + if (allDummy) + { + state->tp_dummy[state->tapeRange]++; + for (tapenum = 0; tapenum < state->tapeRange; tapenum++) + state->tp_dummy[tapenum]--; + } + else + mergeonerun(state); + } + + /* Step D6: decrease level */ + if (--state->Level == 0) + break; + /* rewind output tape T to use as new input */ + LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange], + false); + /* rewind used-up input tape P, and prepare it for write pass */ + LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange - 1], + true); + state->tp_runs[state->tapeRange - 1] = 0; + + /* + * reassign tape units per step D6; note we no longer care about A[] + */ + svTape = state->tp_tapenum[state->tapeRange]; + svDummy = state->tp_dummy[state->tapeRange]; + svRuns = state->tp_runs[state->tapeRange]; + for (tapenum = state->tapeRange; tapenum > 0; tapenum--) + { + state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1]; + state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1]; + state->tp_runs[tapenum] = state->tp_runs[tapenum - 1]; + } + state->tp_tapenum[0] = svTape; + state->tp_dummy[0] = svDummy; + state->tp_runs[0] = svRuns; + } + + /* + * Done. Knuth says that the result is on TAPE[1], but since we exited + * the loop without performing the last iteration of step D6, we have not + * rearranged the tape unit assignment, and therefore the result is on + * TAPE[T]. We need to do it this way so that we can freeze the final + * output tape while rewinding it. The last iteration of step D6 would be + * a waste of cycles anyway... + */ + state->result_tape = state->tp_tapenum[state->tapeRange]; + LogicalTapeFreeze(state->tapeset, state->result_tape); + state->status = TSS_SORTEDONTAPE; +} + +/* + * Merge one run from each input tape, except ones with dummy runs. + * + * This is the inner loop of Algorithm D step D5. We know that the + * output tape is TAPE[T]. + */ +static void +mergeonerun(Tuplesortstate *state) +{ + int destTape = state->tp_tapenum[state->tapeRange]; + int srcTape; + int tupIndex; + SortTuple *tup; + long priorAvail, + spaceFreed; + + /* + * Start the merge by loading one tuple from each active source tape into + * the heap. We can also decrease the input run/dummy run counts. + */ + beginmerge(state); + + /* + * Execute merge by repeatedly extracting lowest tuple in heap, writing it + * out, and replacing it with next tuple from same tape (if there is + * another one). + */ + while (state->memtupcount > 0) + { + /* write the tuple to destTape */ + priorAvail = state->availMem; + srcTape = state->memtuples[0].tupindex; + WRITETUP(state, destTape, &state->memtuples[0]); + /* writetup adjusted total free space, now fix per-tape space */ + spaceFreed = state->availMem - priorAvail; + state->mergeavailmem[srcTape] += spaceFreed; + /* compact the heap */ + rum_tuplesort_heap_siftup(state, false); + if ((tupIndex = state->mergenext[srcTape]) == 0) + { + /* out of preloaded data on this tape, try to read more */ + mergepreread(state); + /* if still no data, we've reached end of run on this tape */ + if ((tupIndex = state->mergenext[srcTape]) == 0) + continue; + } + /* pull next preread tuple from list, insert in heap */ + tup = &state->memtuples[tupIndex]; + state->mergenext[srcTape] = tup->tupindex; + if (state->mergenext[srcTape] == 0) + state->mergelast[srcTape] = 0; + rum_tuplesort_heap_insert(state, tup, srcTape, false); + /* put the now-unused memtuples entry on the freelist */ + tup->tupindex = state->mergefreelist; + state->mergefreelist = tupIndex; + state->mergeavailslots[srcTape]++; + } + + /* + * When the heap empties, we're done. Write an end-of-run marker on the + * output tape, and increment its count of real runs. + */ + markrunend(state, destTape); + state->tp_runs[state->tapeRange]++; + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, "finished %d-way merge step: %s", state->activeTapes, + pg_rusage_show(&state->ru_start)); +#endif +} + +/* + * beginmerge - initialize for a merge pass + * + * We decrease the counts of real and dummy runs for each tape, and mark + * which tapes contain active input runs in mergeactive[]. Then, load + * as many tuples as we can from each active input tape, and finally + * fill the merge heap with the first tuple from each active tape. + */ +static void +beginmerge(Tuplesortstate *state) +{ + int activeTapes; + int tapenum; + int srcTape; + int slotsPerTape; + long spacePerTape; + + /* Heap should be empty here */ + Assert(state->memtupcount == 0); + + /* Adjust run counts and mark the active tapes */ + memset(state->mergeactive, 0, + state->maxTapes * sizeof(*state->mergeactive)); + activeTapes = 0; + for (tapenum = 0; tapenum < state->tapeRange; tapenum++) + { + if (state->tp_dummy[tapenum] > 0) + state->tp_dummy[tapenum]--; + else + { + Assert(state->tp_runs[tapenum] > 0); + state->tp_runs[tapenum]--; + srcTape = state->tp_tapenum[tapenum]; + state->mergeactive[srcTape] = true; + activeTapes++; + } + } + state->activeTapes = activeTapes; + + /* Clear merge-pass state variables */ + memset(state->mergenext, 0, + state->maxTapes * sizeof(*state->mergenext)); + memset(state->mergelast, 0, + state->maxTapes * sizeof(*state->mergelast)); + state->mergefreelist = 0; /* nothing in the freelist */ + state->mergefirstfree = activeTapes; /* 1st slot avail for preread */ + + /* + * Initialize space allocation to let each active input tape have an equal + * share of preread space. + */ + Assert(activeTapes > 0); + slotsPerTape = (state->memtupsize - state->mergefirstfree) / activeTapes; + Assert(slotsPerTape > 0); + spacePerTape = state->availMem / activeTapes; + for (srcTape = 0; srcTape < state->maxTapes; srcTape++) + { + if (state->mergeactive[srcTape]) + { + state->mergeavailslots[srcTape] = slotsPerTape; + state->mergeavailmem[srcTape] = spacePerTape; + } + } + + /* + * Preread as many tuples as possible (and at least one) from each active + * tape + */ + mergepreread(state); + + /* Load the merge heap with the first tuple from each input tape */ + for (srcTape = 0; srcTape < state->maxTapes; srcTape++) + { + int tupIndex = state->mergenext[srcTape]; + SortTuple *tup; + + if (tupIndex) + { + tup = &state->memtuples[tupIndex]; + state->mergenext[srcTape] = tup->tupindex; + if (state->mergenext[srcTape] == 0) + state->mergelast[srcTape] = 0; + rum_tuplesort_heap_insert(state, tup, srcTape, false); + /* put the now-unused memtuples entry on the freelist */ + tup->tupindex = state->mergefreelist; + state->mergefreelist = tupIndex; + state->mergeavailslots[srcTape]++; + } + } +} + +/* + * mergepreread - load tuples from merge input tapes + * + * This routine exists to improve sequentiality of reads during a merge pass, + * as explained in the header comments of this file. Load tuples from each + * active source tape until the tape's run is exhausted or it has used up + * its fair share of available memory. In any case, we guarantee that there + * is at least one preread tuple available from each unexhausted input tape. + * + * We invoke this routine at the start of a merge pass for initial load, + * and then whenever any tape's preread data runs out. Note that we load + * as much data as possible from all tapes, not just the one that ran out. + * This is because logtape.c works best with a usage pattern that alternates + * between reading a lot of data and writing a lot of data, so whenever we + * are forced to read, we should fill working memory completely. + * + * In FINALMERGE state, we *don't* use this routine, but instead just preread + * from the single tape that ran dry. There's no read/write alternation in + * that state and so no point in scanning through all the tapes to fix one. + * (Moreover, there may be quite a lot of inactive tapes in that state, since + * we might have had many fewer runs than tapes. In a regular tape-to-tape + * merge we can expect most of the tapes to be active.) + */ +static void +mergepreread(Tuplesortstate *state) +{ + int srcTape; + + for (srcTape = 0; srcTape < state->maxTapes; srcTape++) + mergeprereadone(state, srcTape); +} + +/* + * mergeprereadone - load tuples from one merge input tape + * + * Read tuples from the specified tape until it has used up its free memory + * or array slots; but ensure that we have at least one tuple, if any are + * to be had. + */ +static void +mergeprereadone(Tuplesortstate *state, int srcTape) +{ + unsigned int tuplen; + SortTuple stup; + int tupIndex; + long priorAvail, + spaceUsed; + + if (!state->mergeactive[srcTape]) + return; /* tape's run is already exhausted */ + priorAvail = state->availMem; + state->availMem = state->mergeavailmem[srcTape]; + while ((state->mergeavailslots[srcTape] > 0 && !LACKMEM(state)) || + state->mergenext[srcTape] == 0) + { + /* read next tuple, if any */ + if ((tuplen = getlen(state, srcTape, true)) == 0) + { + state->mergeactive[srcTape] = false; + break; + } + READTUP(state, &stup, srcTape, tuplen); + /* find a free slot in memtuples[] for it */ + tupIndex = state->mergefreelist; + if (tupIndex) + state->mergefreelist = state->memtuples[tupIndex].tupindex; + else + { + tupIndex = state->mergefirstfree++; + Assert(tupIndex < state->memtupsize); + } + state->mergeavailslots[srcTape]--; + /* store tuple, append to list for its tape */ + stup.tupindex = 0; + state->memtuples[tupIndex] = stup; + if (state->mergelast[srcTape]) + state->memtuples[state->mergelast[srcTape]].tupindex = tupIndex; + else + state->mergenext[srcTape] = tupIndex; + state->mergelast[srcTape] = tupIndex; + } + /* update per-tape and global availmem counts */ + spaceUsed = state->mergeavailmem[srcTape] - state->availMem; + state->mergeavailmem[srcTape] = state->availMem; + state->availMem = priorAvail - spaceUsed; +} + +/* + * dumptuples - remove tuples from heap and write to tape + * + * This is used during initial-run building, but not during merging. + * + * When alltuples = false, dump only enough tuples to get under the + * availMem limit (and leave at least one tuple in the heap in any case, + * since puttuple assumes it always has a tuple to compare to). We also + * insist there be at least one free slot in the memtuples[] array. + * + * When alltuples = true, dump everything currently in memory. + * (This case is only used at end of input data.) + * + * If we empty the heap, close out the current run and return (this should + * only happen at end of input data). If we see that the tuple run number + * at the top of the heap has changed, start a new run. + */ +static void +dumptuples(Tuplesortstate *state, bool alltuples) +{ + while (alltuples || + (LACKMEM(state) && state->memtupcount > 1) || + state->memtupcount >= state->memtupsize) + { + /* + * Dump the heap's frontmost entry, and sift up to remove it from the + * heap. + */ + Assert(state->memtupcount > 0); + WRITETUP(state, state->tp_tapenum[state->destTape], + &state->memtuples[0]); + rum_tuplesort_heap_siftup(state, true); + + /* + * If the heap is empty *or* top run number has changed, we've + * finished the current run. + */ + if (state->memtupcount == 0 || + state->currentRun != state->memtuples[0].tupindex) + { + markrunend(state, state->tp_tapenum[state->destTape]); + state->currentRun++; + state->tp_runs[state->destTape]++; + state->tp_dummy[state->destTape]--; /* per Alg D step D2 */ + +#ifdef TRACE_SORT + if (trace_sort) + elog(LOG, "finished writing%s run %d to tape %d: %s", + (state->memtupcount == 0) ? " final" : "", + state->currentRun, state->destTape, + pg_rusage_show(&state->ru_start)); +#endif + + /* + * Done if heap is empty, else prepare for new run. + */ + if (state->memtupcount == 0) + break; + Assert(state->currentRun == state->memtuples[0].tupindex); + selectnewtape(state); + } + } +} + +/* + * rum_tuplesort_rescan - rewind and replay the scan + */ +void +rum_tuplesort_rescan(Tuplesortstate *state) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + + Assert(state->randomAccess); + + switch (state->status) + { + case TSS_SORTEDINMEM: + state->current = 0; + state->eof_reached = false; + state->markpos_offset = 0; + state->markpos_eof = false; + break; + case TSS_SORTEDONTAPE: + LogicalTapeRewind(state->tapeset, + state->result_tape, + false); + state->eof_reached = false; + state->markpos_block = 0L; + state->markpos_offset = 0; + state->markpos_eof = false; + break; + default: + elog(ERROR, "invalid tuplesort state"); + break; + } + + MemoryContextSwitchTo(oldcontext); +} + +/* + * rum_tuplesort_markpos - saves current position in the merged sort file + */ +void +rum_tuplesort_markpos(Tuplesortstate *state) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + + Assert(state->randomAccess); + + switch (state->status) + { + case TSS_SORTEDINMEM: + state->markpos_offset = state->current; + state->markpos_eof = state->eof_reached; + break; + case TSS_SORTEDONTAPE: + LogicalTapeTell(state->tapeset, + state->result_tape, + &state->markpos_block, + &state->markpos_offset); + state->markpos_eof = state->eof_reached; + break; + default: + elog(ERROR, "invalid tuplesort state"); + break; + } + + MemoryContextSwitchTo(oldcontext); +} + +/* + * rum_tuplesort_restorepos - restores current position in merged sort file to + * last saved position + */ +void +rum_tuplesort_restorepos(Tuplesortstate *state) +{ + MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); + + Assert(state->randomAccess); + + switch (state->status) + { + case TSS_SORTEDINMEM: + state->current = state->markpos_offset; + state->eof_reached = state->markpos_eof; + break; + case TSS_SORTEDONTAPE: + if (!LogicalTapeSeek(state->tapeset, + state->result_tape, + state->markpos_block, + state->markpos_offset)) + elog(ERROR, "rum_tuplesort_restorepos failed"); + state->eof_reached = state->markpos_eof; + break; + default: + elog(ERROR, "invalid tuplesort state"); + break; + } + + MemoryContextSwitchTo(oldcontext); +} + +/* + * rum_tuplesort_get_stats - extract summary statistics + * + * This can be called after rum_tuplesort_performsort() finishes to obtain + * printable summary information about how the sort was performed. + * spaceUsed is measured in kilobytes. + */ +void +rum_tuplesort_get_stats(Tuplesortstate *state, + const char **sortMethod, + const char **spaceType, + long *spaceUsed) +{ + /* + * Note: it might seem we should provide both memory and disk usage for a + * disk-based sort. However, the current code doesn't track memory space + * accurately once we have begun to return tuples to the caller (since we + * don't account for pfree's the caller is expected to do), so we cannot + * rely on availMem in a disk sort. This does not seem worth the overhead + * to fix. Is it worth creating an API for the memory context code to + * tell us how much is actually used in sortcontext? + */ + if (state->tapeset) + { + *spaceType = "Disk"; + *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024); + } + else + { + *spaceType = "Memory"; + *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024; + } + + switch (state->status) + { + case TSS_SORTEDINMEM: + if (state->boundUsed) + *sortMethod = "top-N heapsort"; + else + *sortMethod = "quicksort"; + break; + case TSS_SORTEDONTAPE: + *sortMethod = "external sort"; + break; + case TSS_FINALMERGE: + *sortMethod = "external merge"; + break; + default: + *sortMethod = "still in progress"; + break; + } +} + + +/* + * Heap manipulation routines, per Knuth's Algorithm 5.2.3H. + * + * Compare two SortTuples. If checkIndex is true, use the tuple index + * as the front of the sort key; otherwise, no. + */ + +#define HEAPCOMPARE(tup1,tup2) \ + (checkIndex && ((tup1)->tupindex != (tup2)->tupindex) ? \ + ((tup1)->tupindex) - ((tup2)->tupindex) : \ + COMPARETUP(state, tup1, tup2)) + +/* + * Convert the existing unordered array of SortTuples to a bounded heap, + * discarding all but the smallest "state->bound" tuples. + * + * When working with a bounded heap, we want to keep the largest entry + * at the root (array entry zero), instead of the smallest as in the normal + * sort case. This allows us to discard the largest entry cheaply. + * Therefore, we temporarily reverse the sort direction. + * + * We assume that all entries in a bounded heap will always have tupindex + * zero; it therefore doesn't matter that HEAPCOMPARE() doesn't reverse + * the direction of comparison for tupindexes. + */ +static void +make_bounded_heap(Tuplesortstate *state) +{ + int tupcount = state->memtupcount; + int i; + + Assert(state->status == TSS_INITIAL); + Assert(state->bounded); + Assert(tupcount >= state->bound); + + /* Reverse sort direction so largest entry will be at root */ + REVERSEDIRECTION(state); + + state->memtupcount = 0; /* make the heap empty */ + for (i = 0; i < tupcount; i++) + { + if (state->memtupcount >= state->bound && + COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0) + { + /* New tuple would just get thrown out, so skip it */ + free_sort_tuple(state, &state->memtuples[i]); + CHECK_FOR_INTERRUPTS(); + } + else + { + /* Insert next tuple into heap */ + /* Must copy source tuple to avoid possible overwrite */ + SortTuple stup = state->memtuples[i]; + + rum_tuplesort_heap_insert(state, &stup, 0, false); + + /* If heap too full, discard largest entry */ + if (state->memtupcount > state->bound) + { + free_sort_tuple(state, &state->memtuples[0]); + rum_tuplesort_heap_siftup(state, false); + } + } + } + + Assert(state->memtupcount == state->bound); + state->status = TSS_BOUNDED; +} + +/* + * Convert the bounded heap to a properly-sorted array + */ +static void +sort_bounded_heap(Tuplesortstate *state) +{ + int tupcount = state->memtupcount; + + Assert(state->status == TSS_BOUNDED); + Assert(state->bounded); + Assert(tupcount == state->bound); + + /* + * We can unheapify in place because each sift-up will remove the largest + * entry, which we can promptly store in the newly freed slot at the end. + * Once we're down to a single-entry heap, we're done. + */ + while (state->memtupcount > 1) + { + SortTuple stup = state->memtuples[0]; + + /* this sifts-up the next-largest entry and decreases memtupcount */ + rum_tuplesort_heap_siftup(state, false); + state->memtuples[state->memtupcount] = stup; + } + state->memtupcount = tupcount; + + /* + * Reverse sort direction back to the original state. This is not + * actually necessary but seems like a good idea for tidiness. + */ + REVERSEDIRECTION(state); + + state->status = TSS_SORTEDINMEM; + state->boundUsed = true; +} + +/* + * Insert a new tuple into an empty or existing heap, maintaining the + * heap invariant. Caller is responsible for ensuring there's room. + * + * Note: we assume *tuple is a temporary variable that can be scribbled on. + * For some callers, tuple actually points to a memtuples[] entry above the + * end of the heap. This is safe as long as it's not immediately adjacent + * to the end of the heap (ie, in the [memtupcount] array entry) --- if it + * is, it might get overwritten before being moved into the heap! + */ +static void +rum_tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple, + int tupleindex, bool checkIndex) +{ + SortTuple *memtuples; + int j; + + /* + * Save the tupleindex --- see notes above about writing on *tuple. It's a + * historical artifact that tupleindex is passed as a separate argument + * and not in *tuple, but it's notationally convenient so let's leave it + * that way. + */ + tuple->tupindex = tupleindex; + + memtuples = state->memtuples; + Assert(state->memtupcount < state->memtupsize); + + CHECK_FOR_INTERRUPTS(); + + /* + * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is + * using 1-based array indexes, not 0-based. + */ + j = state->memtupcount++; + while (j > 0) + { + int i = (j - 1) >> 1; + + if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0) + break; + memtuples[j] = memtuples[i]; + j = i; + } + memtuples[j] = *tuple; +} + +/* + * The tuple at state->memtuples[0] has been removed from the heap. + * Decrement memtupcount, and sift up to maintain the heap invariant. + */ +static void +rum_tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex) +{ + SortTuple *memtuples = state->memtuples; + SortTuple *tuple; + int i, + n; + + if (--state->memtupcount <= 0) + return; + + CHECK_FOR_INTERRUPTS(); + + n = state->memtupcount; + tuple = &memtuples[n]; /* tuple that must be reinserted */ + i = 0; /* i is where the "hole" is */ + for (;;) + { + int j = 2 * i + 1; + + if (j >= n) + break; + if (j + 1 < n && + HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0) + j++; + if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0) + break; + memtuples[i] = memtuples[j]; + i = j; + } + memtuples[i] = *tuple; +} + + +/* + * Tape interface routines + */ + +static unsigned int +getlen(Tuplesortstate *state, int tapenum, bool eofOK) +{ + unsigned int len; + + if (LogicalTapeRead(state->tapeset, tapenum, + &len, sizeof(len)) != sizeof(len)) + elog(ERROR, "unexpected end of tape"); + if (len == 0 && !eofOK) + elog(ERROR, "unexpected end of data"); + return len; +} + +static void +markrunend(Tuplesortstate *state, int tapenum) +{ + unsigned int len = 0; + + LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len)); +} + + +/* + * Inline-able copy of FunctionCall2Coll() to save some cycles in sorting. + */ +static inline Datum +myFunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2) +{ + FunctionCallInfoData fcinfo; + Datum result; + + InitFunctionCallInfoData(fcinfo, flinfo, 2, collation, NULL, NULL); + + fcinfo.arg[0] = arg1; + fcinfo.arg[1] = arg2; + fcinfo.argnull[0] = false; + fcinfo.argnull[1] = false; + + result = FunctionCallInvoke(&fcinfo); + + /* Check for null result, since caller is clearly not expecting one */ + if (fcinfo.isnull) + elog(ERROR, "function %u returned NULL", fcinfo.flinfo->fn_oid); + + return result; +} + +/* + * Apply a sort function (by now converted to fmgr lookup form) + * and return a 3-way comparison result. This takes care of handling + * reverse-sort and NULLs-ordering properly. We assume that DESC and + * NULLS_FIRST options are encoded in sk_flags the same way btree does it. + */ +static inline int32 +inlineApplySortFunction(FmgrInfo *sortFunction, int sk_flags, Oid collation, + Datum datum1, bool isNull1, + Datum datum2, bool isNull2) +{ + int32 compare; + + if (isNull1) + { + if (isNull2) + compare = 0; /* NULL "=" NULL */ + else if (sk_flags & SK_BT_NULLS_FIRST) + compare = -1; /* NULL "<" NOT_NULL */ + else + compare = 1; /* NULL ">" NOT_NULL */ + } + else if (isNull2) + { + if (sk_flags & SK_BT_NULLS_FIRST) + compare = 1; /* NOT_NULL ">" NULL */ + else + compare = -1; /* NOT_NULL "<" NULL */ + } + else + { + compare = DatumGetInt32(myFunctionCall2Coll(sortFunction, collation, + datum1, datum2)); + + if (sk_flags & SK_BT_DESC) + compare = -compare; + } + + return compare; +} + + +/* + * Routines specialized for HeapTuple (actually MinimalTuple) case + */ + +static int +comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state) +{ + SortSupport sortKey = state->sortKeys; + HeapTupleData ltup; + HeapTupleData rtup; + TupleDesc tupDesc; + int nkey; + int32 compare; + + /* Compare the leading sort key */ + compare = ApplySortComparator(a->datum1, a->isnull1, + b->datum1, b->isnull1, + sortKey); + if (compare != 0) + return compare; + + /* Compare additional sort keys */ + ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET; + ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET); + rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET; + rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET); + tupDesc = state->tupDesc; + sortKey++; + for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++) + { + AttrNumber attno = sortKey->ssup_attno; + Datum datum1, + datum2; + bool isnull1, + isnull2; + + datum1 = heap_getattr(<up, attno, tupDesc, &isnull1); + datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2); + + compare = ApplySortComparator(datum1, isnull1, + datum2, isnull2, + sortKey); + if (compare != 0) + return compare; + } + + return 0; +} + +static void +copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup) +{ + /* + * We expect the passed "tup" to be a TupleTableSlot, and form a + * MinimalTuple using the exported interface for that. + */ + TupleTableSlot *slot = (TupleTableSlot *) tup; + MinimalTuple tuple; + HeapTupleData htup; + + /* copy the tuple into sort storage */ + tuple = ExecCopySlotMinimalTuple(slot); + stup->tuple = (void *) tuple; + USEMEM(state, GetMemoryChunkSpace(tuple)); + /* set up first-column key value */ + htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET; + htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET); + stup->datum1 = heap_getattr(&htup, + state->sortKeys[0].ssup_attno, + state->tupDesc, + &stup->isnull1); +} + +static void +writetup_heap(Tuplesortstate *state, int tapenum, SortTuple *stup) +{ + MinimalTuple tuple = (MinimalTuple) stup->tuple; + + /* the part of the MinimalTuple we'll write: */ + char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET; + unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET; + + /* total on-disk footprint: */ + unsigned int tuplen = tupbodylen + sizeof(int); + + LogicalTapeWrite(state->tapeset, tapenum, + (void *) &tuplen, sizeof(tuplen)); + LogicalTapeWrite(state->tapeset, tapenum, + (void *) tupbody, tupbodylen); + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeWrite(state->tapeset, tapenum, + (void *) &tuplen, sizeof(tuplen)); + + FREEMEM(state, GetMemoryChunkSpace(tuple)); + heap_free_minimal_tuple(tuple); +} + +static void +readtup_heap(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len) +{ + unsigned int tupbodylen = len - sizeof(int); + unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET; + MinimalTuple tuple = (MinimalTuple) palloc(tuplen); + char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET; + HeapTupleData htup; + + USEMEM(state, GetMemoryChunkSpace(tuple)); + /* read in the tuple proper */ + tuple->t_len = tuplen; + LogicalTapeReadExact(state->tapeset, tapenum, + tupbody, tupbodylen); + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeReadExact(state->tapeset, tapenum, + &tuplen, sizeof(tuplen)); + stup->tuple = (void *) tuple; + /* set up first-column key value */ + htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET; + htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET); + stup->datum1 = heap_getattr(&htup, + state->sortKeys[0].ssup_attno, + state->tupDesc, + &stup->isnull1); +} + +static void +reversedirection_heap(Tuplesortstate *state) +{ + SortSupport sortKey = state->sortKeys; + int nkey; + + for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++) + { + sortKey->ssup_reverse = !sortKey->ssup_reverse; + sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first; + } +} + + +/* + * Routines specialized for the CLUSTER case (HeapTuple data, with + * comparisons per a btree index definition) + */ + +static int +comparetup_cluster(const SortTuple *a, const SortTuple *b, + Tuplesortstate *state) +{ + ScanKey scanKey = state->indexScanKey; + HeapTuple ltup; + HeapTuple rtup; + TupleDesc tupDesc; + int nkey; + int32 compare; + + /* Compare the leading sort key, if it's simple */ + if (state->indexInfo->ii_KeyAttrNumbers[0] != 0) + { + compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags, + scanKey->sk_collation, + a->datum1, a->isnull1, + b->datum1, b->isnull1); + if (compare != 0 || state->nKeys == 1) + return compare; + /* Compare additional columns the hard way */ + scanKey++; + nkey = 1; + } + else + { + /* Must compare all keys the hard way */ + nkey = 0; + } + + /* Compare additional sort keys */ + ltup = (HeapTuple) a->tuple; + rtup = (HeapTuple) b->tuple; + + if (state->indexInfo->ii_Expressions == NULL) + { + /* If not expression index, just compare the proper heap attrs */ + tupDesc = state->tupDesc; + + for (; nkey < state->nKeys; nkey++, scanKey++) + { + AttrNumber attno = state->indexInfo->ii_KeyAttrNumbers[nkey]; + Datum datum1, + datum2; + bool isnull1, + isnull2; + + datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1); + datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2); + + compare = inlineApplySortFunction(&scanKey->sk_func, + scanKey->sk_flags, + scanKey->sk_collation, + datum1, isnull1, + datum2, isnull2); + if (compare != 0) + return compare; + } + } + else + { + /* + * In the expression index case, compute the whole index tuple and + * then compare values. It would perhaps be faster to compute only as + * many columns as we need to compare, but that would require + * duplicating all the logic in FormIndexDatum. + */ + Datum l_index_values[INDEX_MAX_KEYS]; + bool l_index_isnull[INDEX_MAX_KEYS]; + Datum r_index_values[INDEX_MAX_KEYS]; + bool r_index_isnull[INDEX_MAX_KEYS]; + TupleTableSlot *ecxt_scantuple; + + /* Reset context each time to prevent memory leakage */ + ResetPerTupleExprContext(state->estate); + + ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple; + + ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false); + FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate, + l_index_values, l_index_isnull); + + ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false); + FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate, + r_index_values, r_index_isnull); + + for (; nkey < state->nKeys; nkey++, scanKey++) + { + compare = inlineApplySortFunction(&scanKey->sk_func, + scanKey->sk_flags, + scanKey->sk_collation, + l_index_values[nkey], + l_index_isnull[nkey], + r_index_values[nkey], + r_index_isnull[nkey]); + if (compare != 0) + return compare; + } + } + + return 0; +} + +static void +copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup) +{ + HeapTuple tuple = (HeapTuple) tup; + + /* copy the tuple into sort storage */ + tuple = heap_copytuple(tuple); + stup->tuple = (void *) tuple; + USEMEM(state, GetMemoryChunkSpace(tuple)); + /* set up first-column key value, if it's a simple column */ + if (state->indexInfo->ii_KeyAttrNumbers[0] != 0) + stup->datum1 = heap_getattr(tuple, + state->indexInfo->ii_KeyAttrNumbers[0], + state->tupDesc, + &stup->isnull1); +} + +static void +writetup_cluster(Tuplesortstate *state, int tapenum, SortTuple *stup) +{ + HeapTuple tuple = (HeapTuple) stup->tuple; + unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int); + + /* We need to store t_self, but not other fields of HeapTupleData */ + LogicalTapeWrite(state->tapeset, tapenum, + &tuplen, sizeof(tuplen)); + LogicalTapeWrite(state->tapeset, tapenum, + &tuple->t_self, sizeof(ItemPointerData)); + LogicalTapeWrite(state->tapeset, tapenum, + tuple->t_data, tuple->t_len); + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeWrite(state->tapeset, tapenum, + &tuplen, sizeof(tuplen)); + + FREEMEM(state, GetMemoryChunkSpace(tuple)); + heap_freetuple(tuple); +} + +static void +readtup_cluster(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int tuplen) +{ + unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int); + HeapTuple tuple = (HeapTuple) palloc(t_len + HEAPTUPLESIZE); + + USEMEM(state, GetMemoryChunkSpace(tuple)); + /* Reconstruct the HeapTupleData header */ + tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE); + tuple->t_len = t_len; + LogicalTapeReadExact(state->tapeset, tapenum, + &tuple->t_self, sizeof(ItemPointerData)); + /* We don't currently bother to reconstruct t_tableOid */ + tuple->t_tableOid = InvalidOid; + /* Read in the tuple body */ + LogicalTapeReadExact(state->tapeset, tapenum, + tuple->t_data, tuple->t_len); + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeReadExact(state->tapeset, tapenum, + &tuplen, sizeof(tuplen)); + stup->tuple = (void *) tuple; + /* set up first-column key value, if it's a simple column */ + if (state->indexInfo->ii_KeyAttrNumbers[0] != 0) + stup->datum1 = heap_getattr(tuple, + state->indexInfo->ii_KeyAttrNumbers[0], + state->tupDesc, + &stup->isnull1); +} + + +/* + * Routines specialized for IndexTuple case + * + * The btree and hash cases require separate comparison functions, but the + * IndexTuple representation is the same so the copy/write/read support + * functions can be shared. + */ + +static int +comparetup_index_btree(const SortTuple *a, const SortTuple *b, + Tuplesortstate *state) +{ + /* + * This is similar to _bt_tuplecompare(), but we have already done the + * index_getattr calls for the first column, and we need to keep track of + * whether any null fields are present. Also see the special treatment + * for equal keys at the end. + */ + ScanKey scanKey = state->indexScanKey; + IndexTuple tuple1; + IndexTuple tuple2; + int keysz; + TupleDesc tupDes; + bool equal_hasnull = false; + int nkey; + int32 compare; + + /* Compare the leading sort key */ + compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags, + scanKey->sk_collation, + a->datum1, a->isnull1, + b->datum1, b->isnull1); + if (compare != 0) + return compare; + + /* they are equal, so we only need to examine one null flag */ + if (a->isnull1) + equal_hasnull = true; + + /* Compare additional sort keys */ + tuple1 = (IndexTuple) a->tuple; + tuple2 = (IndexTuple) b->tuple; + keysz = state->nKeys; + tupDes = RelationGetDescr(state->indexRel); + scanKey++; + for (nkey = 2; nkey <= keysz; nkey++, scanKey++) + { + Datum datum1, + datum2; + bool isnull1, + isnull2; + + datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1); + datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2); + + compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags, + scanKey->sk_collation, + datum1, isnull1, + datum2, isnull2); + if (compare != 0) + return compare; /* done when we find unequal attributes */ + + /* they are equal, so we only need to examine one null flag */ + if (isnull1) + equal_hasnull = true; + } + + /* + * If btree has asked us to enforce uniqueness, complain if two equal + * tuples are detected (unless there was at least one NULL field). + * + * It is sufficient to make the test here, because if two tuples are equal + * they *must* get compared at some stage of the sort --- otherwise the + * sort algorithm wouldn't have checked whether one must appear before the + * other. + */ + if (state->enforceUnique && !equal_hasnull) + { + Datum values[INDEX_MAX_KEYS]; + bool isnull[INDEX_MAX_KEYS]; + char *key_desc; + + /* + * Some rather brain-dead implementations of qsort (such as the one in + * QNX 4) will sometimes call the comparison routine to compare a + * value to itself, but we always use our own implementation, which + * does not. + */ + Assert(tuple1 != tuple2); + + index_deform_tuple(tuple1, tupDes, values, isnull); + + key_desc = BuildIndexValueDescription(state->indexRel, values, isnull); + + ereport(ERROR, + (errcode(ERRCODE_UNIQUE_VIOLATION), + errmsg("could not create unique index \"%s\"", + RelationGetRelationName(state->indexRel)), + key_desc ? errdetail("Key %s is duplicated.", key_desc) : + errdetail("Duplicate keys exist."), + errtableconstraint(state->heapRel, + RelationGetRelationName(state->indexRel)))); + } + + /* + * If key values are equal, we sort on ItemPointer. This does not affect + * validity of the finished index, but it may be useful to have index + * scans in physical order. + */ + { + BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid); + BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid); + + if (blk1 != blk2) + return (blk1 < blk2) ? -1 : 1; + } + { + OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid); + OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid); + + if (pos1 != pos2) + return (pos1 < pos2) ? -1 : 1; + } + + return 0; +} + +static int +comparetup_index_hash(const SortTuple *a, const SortTuple *b, + Tuplesortstate *state) +{ + uint32 hash1; + uint32 hash2; + IndexTuple tuple1; + IndexTuple tuple2; + + /* + * Fetch hash keys and mask off bits we don't want to sort by. We know + * that the first column of the index tuple is the hash key. + */ + Assert(!a->isnull1); + hash1 = DatumGetUInt32(a->datum1) & state->hash_mask; + Assert(!b->isnull1); + hash2 = DatumGetUInt32(b->datum1) & state->hash_mask; + + if (hash1 > hash2) + return 1; + else if (hash1 < hash2) + return -1; + + /* + * If hash values are equal, we sort on ItemPointer. This does not affect + * validity of the finished index, but it may be useful to have index + * scans in physical order. + */ + tuple1 = (IndexTuple) a->tuple; + tuple2 = (IndexTuple) b->tuple; + + { + BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid); + BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid); + + if (blk1 != blk2) + return (blk1 < blk2) ? -1 : 1; + } + { + OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid); + OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid); + + if (pos1 != pos2) + return (pos1 < pos2) ? -1 : 1; + } + + return 0; +} + +static void +copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup) +{ + IndexTuple tuple = (IndexTuple) tup; + unsigned int tuplen = IndexTupleSize(tuple); + IndexTuple newtuple; + + /* copy the tuple into sort storage */ + newtuple = (IndexTuple) palloc(tuplen); + memcpy(newtuple, tuple, tuplen); + USEMEM(state, GetMemoryChunkSpace(newtuple)); + stup->tuple = (void *) newtuple; + /* set up first-column key value */ + stup->datum1 = index_getattr(newtuple, + 1, + RelationGetDescr(state->indexRel), + &stup->isnull1); +} + +static void +writetup_index(Tuplesortstate *state, int tapenum, SortTuple *stup) +{ + IndexTuple tuple = (IndexTuple) stup->tuple; + unsigned int tuplen; + + tuplen = IndexTupleSize(tuple) + sizeof(tuplen); + LogicalTapeWrite(state->tapeset, tapenum, + (void *) &tuplen, sizeof(tuplen)); + LogicalTapeWrite(state->tapeset, tapenum, + (void *) tuple, IndexTupleSize(tuple)); + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeWrite(state->tapeset, tapenum, + (void *) &tuplen, sizeof(tuplen)); + + FREEMEM(state, GetMemoryChunkSpace(tuple)); + pfree(tuple); +} + +static void +readtup_index(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len) +{ + unsigned int tuplen = len - sizeof(unsigned int); + IndexTuple tuple = (IndexTuple) palloc(tuplen); + + USEMEM(state, GetMemoryChunkSpace(tuple)); + LogicalTapeReadExact(state->tapeset, tapenum, + tuple, tuplen); + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeReadExact(state->tapeset, tapenum, + &tuplen, sizeof(tuplen)); + stup->tuple = (void *) tuple; + /* set up first-column key value */ + stup->datum1 = index_getattr(tuple, + 1, + RelationGetDescr(state->indexRel), + &stup->isnull1); +} + +static void +reversedirection_index_btree(Tuplesortstate *state) +{ + ScanKey scanKey = state->indexScanKey; + int nkey; + + for (nkey = 0; nkey < state->nKeys; nkey++, scanKey++) + { + scanKey->sk_flags ^= (SK_BT_DESC | SK_BT_NULLS_FIRST); + } +} + +static void +reversedirection_index_hash(Tuplesortstate *state) +{ + /* We don't support reversing direction in a hash index sort */ + elog(ERROR, "reversedirection_index_hash is not implemented"); +} + + +/* + * Routines specialized for DatumTuple case + */ + +static int +comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state) +{ + return ApplySortComparator(a->datum1, a->isnull1, + b->datum1, b->isnull1, + state->onlyKey); +} + +static void +copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup) +{ + /* Not currently needed */ + elog(ERROR, "copytup_datum() should not be called"); +} + +static void +writetup_datum(Tuplesortstate *state, int tapenum, SortTuple *stup) +{ + void *waddr; + unsigned int tuplen; + unsigned int writtenlen; + + if (stup->isnull1) + { + waddr = NULL; + tuplen = 0; + } + else if (state->datumTypeByVal) + { + waddr = &stup->datum1; + tuplen = sizeof(Datum); + } + else + { + waddr = DatumGetPointer(stup->datum1); + tuplen = datumGetSize(stup->datum1, false, state->datumTypeLen); + Assert(tuplen != 0); + } + + writtenlen = tuplen + sizeof(unsigned int); + + LogicalTapeWrite(state->tapeset, tapenum, + (void *) &writtenlen, sizeof(writtenlen)); + LogicalTapeWrite(state->tapeset, tapenum, + waddr, tuplen); + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeWrite(state->tapeset, tapenum, + (void *) &writtenlen, sizeof(writtenlen)); + + if (stup->tuple) + { + FREEMEM(state, GetMemoryChunkSpace(stup->tuple)); + pfree(stup->tuple); + } +} + +static void +readtup_datum(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len) +{ + unsigned int tuplen = len - sizeof(unsigned int); + + if (tuplen == 0) + { + /* it's NULL */ + stup->datum1 = (Datum) 0; + stup->isnull1 = true; + stup->tuple = NULL; + } + else if (state->datumTypeByVal) + { + Assert(tuplen == sizeof(Datum)); + LogicalTapeReadExact(state->tapeset, tapenum, + &stup->datum1, tuplen); + stup->isnull1 = false; + stup->tuple = NULL; + } + else + { + void *raddr = palloc(tuplen); + + LogicalTapeReadExact(state->tapeset, tapenum, + raddr, tuplen); + stup->datum1 = PointerGetDatum(raddr); + stup->isnull1 = false; + stup->tuple = raddr; + USEMEM(state, GetMemoryChunkSpace(raddr)); + } + + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeReadExact(state->tapeset, tapenum, + &tuplen, sizeof(tuplen)); +} + +static void +reversedirection_datum(Tuplesortstate *state) +{ + state->onlyKey->ssup_reverse = !state->onlyKey->ssup_reverse; + state->onlyKey->ssup_nulls_first = !state->onlyKey->ssup_nulls_first; +} + +/* + * Convenience routine to free a tuple previously loaded into sort memory + */ +static void +free_sort_tuple(Tuplesortstate *state, SortTuple *stup) +{ + FREEMEM(state, GetMemoryChunkSpace(stup->tuple)); + pfree(stup->tuple); +} + +static int +comparetup_rum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state) +{ + RumSortItem *i1, *i2; + float8 v1 = DatumGetFloat8(a->datum1); + float8 v2 = DatumGetFloat8(b->datum1); + int i; + + if (v1 < v2) + return -1; + else if (v1 > v2) + return 1; + + i1 = (RumSortItem *)a; + i2 = (RumSortItem *)b; + for (i = 1; i < state->nKeys; i++) + { + if (i1->data[i] < i2->data[i]) + return -1; + else if (i1->data[i] > i2->data[i]) + return 1; + } + return 0; +} + +static void +copytup_rum(Tuplesortstate *state, SortTuple *stup, void *tup) +{ + RumSortItem *item = (RumSortItem *)tup; + + stup->datum1 = Float8GetDatum(state->nKeys > 0 ? item->data[0] : 0); + stup->isnull1 = false; + stup->tuple = tup; + USEMEM(state, GetMemoryChunkSpace(tup)); +} + +static void +writetup_rum(Tuplesortstate *state, int tapenum, SortTuple *stup) +{ + RumSortItem *item = (RumSortItem *)stup->tuple; + unsigned int writtenlen = RumSortItemSize(state->nKeys) + sizeof(unsigned int); + + + LogicalTapeWrite(state->tapeset, tapenum, + (void *) &writtenlen, sizeof(writtenlen)); + LogicalTapeWrite(state->tapeset, tapenum, + (void *) item, RumSortItemSize(state->nKeys)); + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeWrite(state->tapeset, tapenum, + (void *) &writtenlen, sizeof(writtenlen)); + + FREEMEM(state, GetMemoryChunkSpace(item)); + pfree(item); +} + +static void +readtup_rum(Tuplesortstate *state, SortTuple *stup, + int tapenum, unsigned int len) +{ + unsigned int tuplen = len - sizeof(unsigned int); + RumSortItem *item = (RumSortItem *)palloc(RumSortItemSize(state->nKeys)); + + Assert(tuplen == RumSortItemSize(state->nKeys)); + + USEMEM(state, GetMemoryChunkSpace(item)); + LogicalTapeReadExact(state->tapeset, tapenum, + (void *)item, RumSortItemSize(state->nKeys)); + stup->datum1 = Float8GetDatum(state->nKeys > 0 ? item->data[0] : 0); + stup->isnull1 = false; + stup->tuple = item; + + if (state->randomAccess) /* need trailing length word? */ + LogicalTapeReadExact(state->tapeset, tapenum, + &tuplen, sizeof(tuplen)); +} + +static void +reversedirection_rum(Tuplesortstate *state) +{ + state->reverse = !state->reverse; +} diff --git a/rumsort.h b/rumsort.h new file mode 100644 index 0000000000..f93c81be84 --- /dev/null +++ b/rumsort.h @@ -0,0 +1,135 @@ +/*------------------------------------------------------------------------- + * + * rumsort.h + * Generalized tuple sorting routines. + * + * This module handles sorting of heap tuples, index tuples, or single + * Datums (and could easily support other kinds of sortable objects, + * if necessary). It works efficiently for both small and large amounts + * of data. Small amounts are sorted in-memory using qsort(). Large + * amounts are sorted using temporary files and a standard external sort + * algorithm. + * + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#ifndef TUPLESORT_H +/* Hide tuplesort.h and tuplesort.c */ +#define TUPLESORT_H + +#include "postgres.h" + +#include "access/itup.h" +#include "executor/tuptable.h" +#include "utils/relcache.h" + +/* Tuplesortstate is an opaque type whose details are not known outside + * tuplesort.c. + */ +typedef struct Tuplesortstate Tuplesortstate; + +/* + * We provide multiple interfaces to what is essentially the same code, + * since different callers have different data to be sorted and want to + * specify the sort key information differently. There are two APIs for + * sorting HeapTuples and two more for sorting IndexTuples. Yet another + * API supports sorting bare Datums. + * + * The "heap" API actually stores/sorts MinimalTuples, which means it doesn't + * preserve the system columns (tuple identity and transaction visibility + * info). The sort keys are specified by column numbers within the tuples + * and sort operator OIDs. We save some cycles by passing and returning the + * tuples in TupleTableSlots, rather than forming actual HeapTuples (which'd + * have to be converted to MinimalTuples). This API works well for sorts + * executed as parts of plan trees. + * + * The "cluster" API stores/sorts full HeapTuples including all visibility + * info. The sort keys are specified by reference to a btree index that is + * defined on the relation to be sorted. Note that putheaptuple/getheaptuple + * go with this API, not the "begin_heap" one! + * + * The "index_btree" API stores/sorts IndexTuples (preserving all their + * header fields). The sort keys are specified by a btree index definition. + * + * The "index_hash" API is similar to index_btree, but the tuples are + * actually sorted by their hash codes not the raw data. + */ + +typedef struct +{ + ItemPointerData iptr; + bool recheck; + float8 data[FLEXIBLE_ARRAY_MEMBER]; +} RumSortItem; + +#define RumSortItemSize(nKeys) (offsetof(RumSortItem,data)+(nKeys)*sizeof(float8)) + +extern Tuplesortstate *rum_tuplesort_begin_heap(TupleDesc tupDesc, + int nkeys, AttrNumber *attNums, + Oid *sortOperators, Oid *sortCollations, + bool *nullsFirstFlags, + int workMem, bool randomAccess); +extern Tuplesortstate *rum_tuplesort_begin_cluster(TupleDesc tupDesc, + Relation indexRel, + int workMem, bool randomAccess); +extern Tuplesortstate *rum_tuplesort_begin_index_btree(Relation heapRel, + Relation indexRel, + bool enforceUnique, + int workMem, bool randomAccess); +extern Tuplesortstate *rum_tuplesort_begin_index_hash(Relation heapRel, + Relation indexRel, + uint32 hash_mask, + int workMem, bool randomAccess); +extern Tuplesortstate *rum_tuplesort_begin_datum(Oid datumType, + Oid sortOperator, Oid sortCollation, + bool nullsFirstFlag, + int workMem, bool randomAccess); +extern Tuplesortstate *rum_tuplesort_begin_rum(int workMem, + int nKeys, bool randomAccess); + +extern void rum_tuplesort_set_bound(Tuplesortstate *state, int64 bound); + +extern void rum_tuplesort_puttupleslot(Tuplesortstate *state, + TupleTableSlot *slot); +extern void rum_tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup); +extern void rum_tuplesort_putindextuple(Tuplesortstate *state, IndexTuple tuple); +extern void rum_tuplesort_putdatum(Tuplesortstate *state, Datum val, + bool isNull); +extern void rum_tuplesort_putrum(Tuplesortstate *state, RumSortItem *item); + +extern void rum_tuplesort_performsort(Tuplesortstate *state); + +extern bool rum_tuplesort_gettupleslot(Tuplesortstate *state, bool forward, + TupleTableSlot *slot); +extern HeapTuple rum_tuplesort_getheaptuple(Tuplesortstate *state, bool forward, + bool *should_free); +extern IndexTuple rum_tuplesort_getindextuple(Tuplesortstate *state, bool forward, + bool *should_free); +extern bool rum_tuplesort_getdatum(Tuplesortstate *state, bool forward, + Datum *val, bool *isNull); +extern RumSortItem *rum_tuplesort_getrum(Tuplesortstate *state, bool forward, + bool *should_free); + +extern void rum_tuplesort_end(Tuplesortstate *state); + +extern void rum_tuplesort_get_stats(Tuplesortstate *state, + const char **sortMethod, + const char **spaceType, + long *spaceUsed); + +extern int rum_tuplesort_merge_order(long allowedMem); + +/* + * These routines may only be called if randomAccess was specified 'true'. + * Likewise, backwards scan in gettuple/getdatum is only allowed if + * randomAccess was specified. + */ + +extern void rum_tuplesort_rescan(Tuplesortstate *state); +extern void rum_tuplesort_markpos(Tuplesortstate *state); +extern void rum_tuplesort_restorepos(Tuplesortstate *state); + +#endif /* TUPLESORT_H */ diff --git a/rumutil.c b/rumutil.c new file mode 100644 index 0000000000..105aa0acd9 --- /dev/null +++ b/rumutil.c @@ -0,0 +1,805 @@ +/*------------------------------------------------------------------------- + * + * rumutil.c + * utilities routines for the postgres inverted index access method. + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/reloptions.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_type.h" +#include "storage/indexfsm.h" +#include "storage/lmgr.h" +#include "utils/guc.h" +#include "utils/index_selfuncs.h" + +#include "rum.h" + +PG_MODULE_MAGIC; + +void _PG_init(void); + +PG_FUNCTION_INFO_V1(rumhandler); + +/* + * Module load callback + */ +void +_PG_init(void) +{ + /* Define custom GUC variables. */ + DefineCustomIntVariable("rum_fuzzy_search_limit", + "Sets the maximum allowed result for exact search by RUM.", + NULL, + &RumFuzzySearchLimit, + 0, 0, INT_MAX, + PGC_USERSET, 0, + NULL, NULL, NULL); +} + +/* + * RUM handler function: return IndexAmRoutine with access method parameters + * and callbacks. + */ +Datum +rumhandler(PG_FUNCTION_ARGS) +{ + IndexAmRoutine *amroutine = makeNode(IndexAmRoutine); + + amroutine->amstrategies = 0; + amroutine->amsupport = 9; + amroutine->amcanorder = false; + amroutine->amcanorderbyop = true; + amroutine->amcanbackward = false; + amroutine->amcanunique = false; + amroutine->amcanmulticol = true; + amroutine->amoptionalkey = true; + amroutine->amsearcharray = false; + amroutine->amsearchnulls = false; + amroutine->amstorage = true; + amroutine->amclusterable = false; + amroutine->ampredlocks = false; + amroutine->amkeytype = InvalidOid; + + amroutine->ambuild = rumbuild; + amroutine->ambuildempty = rumbuildempty; + amroutine->aminsert = ruminsert; + amroutine->ambulkdelete = rumbulkdelete; + amroutine->amvacuumcleanup = rumvacuumcleanup; + amroutine->amcanreturn = NULL; + amroutine->amcostestimate = gincostestimate; + amroutine->amoptions = rumoptions; + amroutine->amvalidate = rumvalidate; + amroutine->ambeginscan = rumbeginscan; + amroutine->amrescan = rumrescan; + amroutine->amgettuple = rumgettuple; + amroutine->amgetbitmap = rumgetbitmap; + amroutine->amendscan = rumendscan; + amroutine->ammarkpos = NULL; + amroutine->amrestrpos = NULL; + + PG_RETURN_POINTER(amroutine); +} + +/* + * initRumState: fill in an empty RumState struct to describe the index + * + * Note: assorted subsidiary data is allocated in the CurrentMemoryContext. + */ +void +initRumState(RumState *state, Relation index) +{ + TupleDesc origTupdesc = RelationGetDescr(index); + int i; + + MemSet(state, 0, sizeof(RumState)); + + state->index = index; + state->oneCol = (origTupdesc->natts == 1) ? true : false; + state->origTupdesc = origTupdesc; + + for (i = 0; i < origTupdesc->natts; i++) + { + RumConfig rumConfig; + + rumConfig.addInfoTypeOid = InvalidOid; + + if (index_getprocid(index, i + 1, RUM_CONFIG_PROC) != InvalidOid) + { + fmgr_info_copy(&(state->configFn[i]), + index_getprocinfo(index, i + 1, RUM_CONFIG_PROC), + CurrentMemoryContext); + + FunctionCall1(&state->configFn[i], PointerGetDatum(&rumConfig)); + } + state->addInfoTypeOid[i] = rumConfig.addInfoTypeOid; + + if (state->oneCol) + { + state->tupdesc[i] = CreateTemplateTupleDesc( + OidIsValid(state->addInfoTypeOid[i]) ? 2 : 1, false); + TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 1, NULL, + origTupdesc->attrs[i]->atttypid, + origTupdesc->attrs[i]->atttypmod, + origTupdesc->attrs[i]->attndims); + TupleDescInitEntryCollation(state->tupdesc[i], (AttrNumber) 1, + origTupdesc->attrs[i]->attcollation); + if (OidIsValid(state->addInfoTypeOid[i])) + { + TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 2, NULL, + state->addInfoTypeOid[i], -1, 0); + state->addAttrs[i] = state->tupdesc[i]->attrs[1]; + } + else + { + state->addAttrs[i] = NULL; + } + } + else + { + state->tupdesc[i] = CreateTemplateTupleDesc( + OidIsValid(state->addInfoTypeOid[i]) ? 3 : 2, false); + TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 1, NULL, + INT2OID, -1, 0); + TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 2, NULL, + origTupdesc->attrs[i]->atttypid, + origTupdesc->attrs[i]->atttypmod, + origTupdesc->attrs[i]->attndims); + TupleDescInitEntryCollation(state->tupdesc[i], (AttrNumber) 2, + origTupdesc->attrs[i]->attcollation); + if (OidIsValid(state->addInfoTypeOid[i])) + { + TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 3, NULL, + state->addInfoTypeOid[i], -1, 0); + state->addAttrs[i] = state->tupdesc[i]->attrs[2]; + } + else + { + state->addAttrs[i] = NULL; + } + } + + fmgr_info_copy(&(state->compareFn[i]), + index_getprocinfo(index, i + 1, GIN_COMPARE_PROC), + CurrentMemoryContext); + fmgr_info_copy(&(state->extractValueFn[i]), + index_getprocinfo(index, i + 1, GIN_EXTRACTVALUE_PROC), + CurrentMemoryContext); + fmgr_info_copy(&(state->extractQueryFn[i]), + index_getprocinfo(index, i + 1, GIN_EXTRACTQUERY_PROC), + CurrentMemoryContext); + fmgr_info_copy(&(state->consistentFn[i]), + index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC), + CurrentMemoryContext); + + /* + * Check opclass capability to do partial match. + */ + if (index_getprocid(index, i + 1, GIN_COMPARE_PARTIAL_PROC) != InvalidOid) + { + fmgr_info_copy(&(state->comparePartialFn[i]), + index_getprocinfo(index, i + 1, GIN_COMPARE_PARTIAL_PROC), + CurrentMemoryContext); + state->canPartialMatch[i] = true; + } + else + { + state->canPartialMatch[i] = false; + } + + /* + * Check opclass capability to do pre consistent check. + */ + if (index_getprocid(index, i + 1, RUM_PRE_CONSISTENT_PROC) != InvalidOid) + { + fmgr_info_copy(&(state->preConsistentFn[i]), + index_getprocinfo(index, i + 1, RUM_PRE_CONSISTENT_PROC), + CurrentMemoryContext); + state->canPreConsistent[i] = true; + } + else + { + state->canPreConsistent[i] = false; + } + + /* + * Check opclass capability to do order by. + */ + if (index_getprocid(index, i + 1, RUM_ORDERING_PROC) != InvalidOid) + { + fmgr_info_copy(&(state->orderingFn[i]), + index_getprocinfo(index, i + 1, RUM_ORDERING_PROC), + CurrentMemoryContext); + state->canOrdering[i] = true; + } + else + { + state->canOrdering[i] = false; + } + + /* + * If the index column has a specified collation, we should honor that + * while doing comparisons. However, we may have a collatable storage + * type for a noncollatable indexed data type (for instance, hstore + * uses text index entries). If there's no index collation then + * specify default collation in case the support functions need + * collation. This is harmless if the support functions don't care + * about collation, so we just do it unconditionally. (We could + * alternatively call get_typcollation, but that seems like expensive + * overkill --- there aren't going to be any cases where a RUM storage + * type has a nondefault collation.) + */ + if (OidIsValid(index->rd_indcollation[i])) + state->supportCollation[i] = index->rd_indcollation[i]; + else + state->supportCollation[i] = DEFAULT_COLLATION_OID; + } +} + +/* + * Extract attribute (column) number of stored entry from RUM tuple + */ +OffsetNumber +rumtuple_get_attrnum(RumState *rumstate, IndexTuple tuple) +{ + OffsetNumber colN; + + if (rumstate->oneCol) + { + /* column number is not stored explicitly */ + colN = FirstOffsetNumber; + } + else + { + Datum res; + bool isnull; + + /* + * First attribute is always int16, so we can safely use any tuple + * descriptor to obtain first attribute of tuple + */ + res = index_getattr(tuple, FirstOffsetNumber, rumstate->tupdesc[0], + &isnull); + Assert(!isnull); + + colN = DatumGetUInt16(res); + Assert(colN >= FirstOffsetNumber && colN <= rumstate->origTupdesc->natts); + } + + return colN; +} + +/* + * Extract stored datum (and possible null category) from RUM tuple + */ +Datum +rumtuple_get_key(RumState *rumstate, IndexTuple tuple, + RumNullCategory *category) +{ + Datum res; + bool isnull; + + if (rumstate->oneCol) + { + /* + * Single column index doesn't store attribute numbers in tuples + */ + res = index_getattr(tuple, FirstOffsetNumber, rumstate->origTupdesc, + &isnull); + } + else + { + /* + * Since the datum type depends on which index column it's from, we + * must be careful to use the right tuple descriptor here. + */ + OffsetNumber colN = rumtuple_get_attrnum(rumstate, tuple); + + res = index_getattr(tuple, OffsetNumberNext(FirstOffsetNumber), + rumstate->tupdesc[colN - 1], + &isnull); + } + + if (isnull) + *category = RumGetNullCategory(tuple, rumstate); + else + *category = RUM_CAT_NORM_KEY; + + return res; +} + +/* + * Allocate a new page (either by recycling, or by extending the index file) + * The returned buffer is already pinned and exclusive-locked + * Caller is responsible for initializing the page by calling RumInitBuffer + */ +Buffer +RumNewBuffer(Relation index) +{ + Buffer buffer; + bool needLock; + + /* First, try to get a page from FSM */ + for (;;) + { + BlockNumber blkno = GetFreeIndexPage(index); + + if (blkno == InvalidBlockNumber) + break; + + buffer = ReadBuffer(index, blkno); + + /* + * We have to guard against the possibility that someone else already + * recycled this page; the buffer may be locked if so. + */ + if (ConditionalLockBuffer(buffer)) + { + Page page = BufferGetPage(buffer); + + if (PageIsNew(page)) + return buffer; /* OK to use, if never initialized */ + + if (RumPageIsDeleted(page)) + return buffer; /* OK to use */ + + LockBuffer(buffer, RUM_UNLOCK); + } + + /* Can't use it, so release buffer and try again */ + ReleaseBuffer(buffer); + } + + /* Must extend the file */ + needLock = !RELATION_IS_LOCAL(index); + if (needLock) + LockRelationForExtension(index, ExclusiveLock); + + buffer = ReadBuffer(index, P_NEW); + LockBuffer(buffer, RUM_EXCLUSIVE); + + if (needLock) + UnlockRelationForExtension(index, ExclusiveLock); + + return buffer; +} + +void +RumInitPage(Page page, uint32 f, Size pageSize) +{ + RumPageOpaque opaque; + + PageInit(page, pageSize, sizeof(RumPageOpaqueData)); + + opaque = RumPageGetOpaque(page); + memset(opaque, 0, sizeof(RumPageOpaqueData)); + opaque->flags = f; + opaque->rightlink = InvalidBlockNumber; +} + +void +RumInitBuffer(GenericXLogState *state, Buffer buffer, uint32 flags) +{ + Page page; + + page = GenericXLogRegisterBuffer(state, buffer, GENERIC_XLOG_FULL_IMAGE); + + RumInitPage(page, flags, BufferGetPageSize(buffer)); +} + +void +RumInitMetabuffer(GenericXLogState *state, Buffer metaBuffer) +{ + Page metaPage; + RumMetaPageData *metadata; + + /* Initialize contents of meta page */ + metaPage = GenericXLogRegisterBuffer(state, metaBuffer, + GENERIC_XLOG_FULL_IMAGE); + + RumInitPage(metaPage, RUM_META, BufferGetPageSize(metaBuffer)); + metadata = RumPageGetMeta(metaPage); + memset(metadata, 0, sizeof(RumMetaPageData)); + + metadata->head = metadata->tail = InvalidBlockNumber; + metadata->tailFreeSize = 0; + metadata->nPendingPages = 0; + metadata->nPendingHeapTuples = 0; + metadata->nTotalPages = 0; + metadata->nEntryPages = 0; + metadata->nDataPages = 0; + metadata->nEntries = 0; + metadata->rumVersion = RUM_CURRENT_VERSION; + + ((PageHeader) metaPage)->pd_lower += sizeof(RumMetaPageData); +} + +/* + * Compare two keys of the same index column + */ +int +rumCompareEntries(RumState *rumstate, OffsetNumber attnum, + Datum a, RumNullCategory categorya, + Datum b, RumNullCategory categoryb) +{ + /* if not of same null category, sort by that first */ + if (categorya != categoryb) + return (categorya < categoryb) ? -1 : 1; + + /* all null items in same category are equal */ + if (categorya != RUM_CAT_NORM_KEY) + return 0; + + /* both not null, so safe to call the compareFn */ + return DatumGetInt32(FunctionCall2Coll(&rumstate->compareFn[attnum - 1], + rumstate->supportCollation[attnum - 1], + a, b)); +} + +/* + * Compare two keys of possibly different index columns + */ +int +rumCompareAttEntries(RumState *rumstate, + OffsetNumber attnuma, Datum a, RumNullCategory categorya, + OffsetNumber attnumb, Datum b, RumNullCategory categoryb) +{ + /* attribute number is the first sort key */ + if (attnuma != attnumb) + return (attnuma < attnumb) ? -1 : 1; + + return rumCompareEntries(rumstate, attnuma, a, categorya, b, categoryb); +} + + +/* + * Support for sorting key datums in rumExtractEntries + * + * Note: we only have to worry about null and not-null keys here; + * rumExtractEntries never generates more than one placeholder null, + * so it doesn't have to sort those. + */ +typedef struct +{ + Datum datum; + Datum addInfo; + bool isnull; + bool addInfoIsNull; +} keyEntryData; + +typedef struct +{ + FmgrInfo *cmpDatumFunc; + Oid collation; + bool haveDups; +} cmpEntriesArg; + +static int +cmpEntries(const void *a, const void *b, void *arg) +{ + const keyEntryData *aa = (const keyEntryData *) a; + const keyEntryData *bb = (const keyEntryData *) b; + cmpEntriesArg *data = (cmpEntriesArg *) arg; + int res; + + if (aa->isnull) + { + if (bb->isnull) + res = 0; /* NULL "=" NULL */ + else + res = 1; /* NULL ">" not-NULL */ + } + else if (bb->isnull) + res = -1; /* not-NULL "<" NULL */ + else + res = DatumGetInt32(FunctionCall2Coll(data->cmpDatumFunc, + data->collation, + aa->datum, bb->datum)); + + /* + * Detect if we have any duplicates. If there are equal keys, qsort must + * compare them at some point, else it wouldn't know whether one should go + * before or after the other. + */ + if (res == 0) + data->haveDups = true; + + return res; +} + + +/* + * Extract the index key values from an indexable item + * + * The resulting key values are sorted, and any duplicates are removed. + * This avoids generating redundant index entries. + */ +Datum * +rumExtractEntries(RumState *rumstate, OffsetNumber attnum, + Datum value, bool isNull, + int32 *nentries, RumNullCategory **categories, + Datum **addInfo, bool **addInfoIsNull) +{ + Datum *entries; + bool *nullFlags; + int32 i; + + /* + * We don't call the extractValueFn on a null item. Instead generate a + * placeholder. + */ + if (isNull) + { + *nentries = 1; + entries = (Datum *) palloc(sizeof(Datum)); + entries[0] = (Datum) 0; + *addInfo = (Datum *) palloc(sizeof(Datum)); + (*addInfo)[0] = (Datum) 0; + *addInfoIsNull = (bool *) palloc(sizeof(bool)); + (*addInfoIsNull)[0] = true; + *categories = (RumNullCategory *) palloc(sizeof(RumNullCategory)); + (*categories)[0] = RUM_CAT_NULL_ITEM; + return entries; + } + + /* OK, call the opclass's extractValueFn */ + nullFlags = NULL; /* in case extractValue doesn't set it */ + *addInfo = NULL; + *addInfoIsNull = NULL; + entries = (Datum *) + DatumGetPointer(FunctionCall5Coll(&rumstate->extractValueFn[attnum - 1], + rumstate->supportCollation[attnum - 1], + value, + PointerGetDatum(nentries), + PointerGetDatum(&nullFlags), + PointerGetDatum(addInfo), + PointerGetDatum(addInfoIsNull) + )); + + /* + * Generate a placeholder if the item contained no keys. + */ + if (entries == NULL || *nentries <= 0) + { + *nentries = 1; + entries = (Datum *) palloc(sizeof(Datum)); + entries[0] = (Datum) 0; + *addInfo = (Datum *) palloc(sizeof(Datum)); + (*addInfo)[0] = (Datum) 0; + *addInfoIsNull = (bool *) palloc(sizeof(bool)); + (*addInfoIsNull)[0] = true; + *categories = (RumNullCategory *) palloc(sizeof(RumNullCategory)); + (*categories)[0] = RUM_CAT_EMPTY_ITEM; + return entries; + } + + if (!(*addInfo)) + { + (*addInfo) = (Datum *)palloc(sizeof(Datum) * *nentries); + for (i = 0; i < *nentries; i++) + (*addInfo)[i] = (Datum) 0; + } + if (!(*addInfoIsNull)) + { + (*addInfoIsNull) = (bool *)palloc(sizeof(bool) * *nentries); + for (i = 0; i < *nentries; i++) + (*addInfoIsNull)[i] = true; + } + + /* + * If the extractValueFn didn't create a nullFlags array, create one, + * assuming that everything's non-null. Otherwise, run through the array + * and make sure each value is exactly 0 or 1; this ensures binary + * compatibility with the RumNullCategory representation. + */ + if (nullFlags == NULL) + nullFlags = (bool *) palloc0(*nentries * sizeof(bool)); + else + { + for (i = 0; i < *nentries; i++) + nullFlags[i] = (nullFlags[i] ? true : false); + } + /* now we can use the nullFlags as category codes */ + *categories = (RumNullCategory *) nullFlags; + + /* + * If there's more than one key, sort and unique-ify. + * + * XXX Using qsort here is notationally painful, and the overhead is + * pretty bad too. For small numbers of keys it'd likely be better to use + * a simple insertion sort. + */ + if (*nentries > 1) + { + keyEntryData *keydata; + cmpEntriesArg arg; + + keydata = (keyEntryData *) palloc(*nentries * sizeof(keyEntryData)); + for (i = 0; i < *nentries; i++) + { + keydata[i].datum = entries[i]; + keydata[i].isnull = nullFlags[i]; + keydata[i].addInfo = (*addInfo)[i]; + keydata[i].addInfoIsNull = (*addInfoIsNull)[i]; + } + + arg.cmpDatumFunc = &rumstate->compareFn[attnum - 1]; + arg.collation = rumstate->supportCollation[attnum - 1]; + arg.haveDups = false; + qsort_arg(keydata, *nentries, sizeof(keyEntryData), + cmpEntries, (void *) &arg); + + if (arg.haveDups) + { + /* there are duplicates, must get rid of 'em */ + int32 j; + + entries[0] = keydata[0].datum; + nullFlags[0] = keydata[0].isnull; + (*addInfo)[0] = keydata[0].addInfo; + (*addInfoIsNull)[0] = keydata[0].addInfoIsNull; + j = 1; + for (i = 1; i < *nentries; i++) + { + if (cmpEntries(&keydata[i - 1], &keydata[i], &arg) != 0) + { + entries[j] = keydata[i].datum; + nullFlags[j] = keydata[i].isnull; + (*addInfo)[j] = keydata[i].addInfo; + (*addInfoIsNull)[j] = keydata[i].addInfoIsNull; + j++; + } + } + *nentries = j; + } + else + { + /* easy, no duplicates */ + for (i = 0; i < *nentries; i++) + { + entries[i] = keydata[i].datum; + nullFlags[i] = keydata[i].isnull; + (*addInfo)[i] = keydata[i].addInfo; + (*addInfoIsNull)[i] = keydata[i].addInfoIsNull; + } + } + + pfree(keydata); + } + + return entries; +} + +bytea * +rumoptions(Datum reloptions, bool validate) +{ + relopt_value *options; + RumOptions *rdopts; + int numoptions; + static const relopt_parse_elt tab[] = { + {"fastupdate", RELOPT_TYPE_BOOL, offsetof(RumOptions, useFastUpdate)} + }; + + options = parseRelOptions(reloptions, validate, RELOPT_KIND_GIN, + &numoptions); + + /* if none set, we're done */ + if (numoptions == 0) + return NULL; + + rdopts = allocateReloptStruct(sizeof(RumOptions), options, numoptions); + + fillRelOptions((void *) rdopts, sizeof(RumOptions), options, numoptions, + validate, tab, lengthof(tab)); + + pfree(options); + + return (bytea *) rdopts; +} + +/* + * Fetch index's statistical data into *stats + * + * Note: in the result, nPendingPages can be trusted to be up-to-date, + * as can rumVersion; but the other fields are as of the last VACUUM. + */ +void +rumGetStats(Relation index, GinStatsData *stats) +{ + Buffer metabuffer; + Page metapage; + RumMetaPageData *metadata; + + metabuffer = ReadBuffer(index, RUM_METAPAGE_BLKNO); + LockBuffer(metabuffer, RUM_SHARE); + metapage = BufferGetPage(metabuffer); + metadata = RumPageGetMeta(metapage); + + stats->nPendingPages = metadata->nPendingPages; + stats->nTotalPages = metadata->nTotalPages; + stats->nEntryPages = metadata->nEntryPages; + stats->nDataPages = metadata->nDataPages; + stats->nEntries = metadata->nEntries; + stats->ginVersion = metadata->rumVersion; + + UnlockReleaseBuffer(metabuffer); +} + +/* + * Write the given statistics to the index's metapage + * + * Note: nPendingPages and rumVersion are *not* copied over + */ +void +rumUpdateStats(Relation index, const GinStatsData *stats) +{ + Buffer metabuffer; + Page metapage; + RumMetaPageData *metadata; + GenericXLogState *state; + + state = GenericXLogStart(index); + + metabuffer = ReadBuffer(index, RUM_METAPAGE_BLKNO); + LockBuffer(metabuffer, RUM_EXCLUSIVE); + metapage = GenericXLogRegisterBuffer(state, metabuffer, 0); + metadata = RumPageGetMeta(metapage); + + metadata->nTotalPages = stats->nTotalPages; + metadata->nEntryPages = stats->nEntryPages; + metadata->nDataPages = stats->nDataPages; + metadata->nEntries = stats->nEntries; + + GenericXLogFinish(state); + + UnlockReleaseBuffer(metabuffer); +} + +Datum +FunctionCall10Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, + Datum arg3, Datum arg4, Datum arg5, + Datum arg6, Datum arg7, Datum arg8, + Datum arg9, Datum arg10) +{ + FunctionCallInfoData fcinfo; + Datum result; + + InitFunctionCallInfoData(fcinfo, flinfo, 10, collation, NULL, NULL); + + fcinfo.arg[0] = arg1; + fcinfo.arg[1] = arg2; + fcinfo.arg[2] = arg3; + fcinfo.arg[3] = arg4; + fcinfo.arg[4] = arg5; + fcinfo.arg[5] = arg6; + fcinfo.arg[6] = arg7; + fcinfo.arg[7] = arg8; + fcinfo.arg[8] = arg9; + fcinfo.arg[9] = arg10; + fcinfo.argnull[0] = false; + fcinfo.argnull[1] = false; + fcinfo.argnull[2] = false; + fcinfo.argnull[3] = false; + fcinfo.argnull[4] = false; + fcinfo.argnull[5] = false; + fcinfo.argnull[6] = false; + fcinfo.argnull[7] = false; + fcinfo.argnull[8] = false; + fcinfo.argnull[9] = false; + + result = FunctionCallInvoke(&fcinfo); + + /* Check for null result, since caller is clearly not expecting one */ + if (fcinfo.isnull) + elog(ERROR, "function %u returned NULL", fcinfo.flinfo->fn_oid); + + return result; +} diff --git a/rumvacuum.c b/rumvacuum.c new file mode 100644 index 0000000000..bce208563e --- /dev/null +++ b/rumvacuum.c @@ -0,0 +1,798 @@ +/*------------------------------------------------------------------------- + * + * rumvacuum.c + * delete & vacuum routines for the postgres RUM + * + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "commands/vacuum.h" +#include "postmaster/autovacuum.h" +#include "storage/indexfsm.h" +#include "storage/lmgr.h" + +#include "rum.h" + +typedef struct +{ + Relation index; + IndexBulkDeleteResult *result; + IndexBulkDeleteCallback callback; + void *callback_state; + RumState rumstate; + BufferAccessStrategy strategy; +} RumVacuumState; + + +/* + * Cleans array of ItemPointer (removes dead pointers) + * Results are always stored in *cleaned, which will be allocated + * if it's needed. In case of *cleaned!=NULL caller is responsible to + * have allocated enough space. *cleaned and items may point to the same + * memory address. + */ + +static uint32 +rumVacuumPostingList(RumVacuumState *gvs, OffsetNumber attnum, Pointer src, + uint32 nitem, Pointer *cleaned, Size size, Size *newSize) +{ + uint32 i, + j = 0; + ItemPointerData iptr = {{0,0},0}, prevIptr; + Datum addInfo = 0; + bool addInfoIsNull; + Pointer dst = NULL, prev, ptr = src; + + /* + * just scan over ItemPointer array + */ + + prevIptr = iptr; + for (i = 0; i < nitem; i++) + { + prev = ptr; + ptr = rumDataPageLeafRead(ptr, attnum, &iptr, &addInfo, &addInfoIsNull, + &gvs->rumstate); + if (gvs->callback(&iptr, gvs->callback_state)) + { + gvs->result->tuples_removed += 1; + if (!dst) + { + dst = (Pointer) palloc(size); + *cleaned = dst; + if (i != 0) + { + memcpy(dst, src, prev - src); + dst += prev - src; + } + } + } + else + { + gvs->result->num_index_tuples += 1; + if (i != j) + dst = rumPlaceToDataPageLeaf(dst, attnum, &iptr, + addInfo, + addInfoIsNull, + &prevIptr, &gvs->rumstate); + j++; + prevIptr = iptr; + } + } + + if (i != j) + *newSize = dst - *cleaned; + return j; +} + +/* + * Form a tuple for entry tree based on already encoded array of item pointers + * with additional information. + */ +static IndexTuple +RumFormTuple(RumState *rumstate, + OffsetNumber attnum, Datum key, RumNullCategory category, + Pointer data, + Size dataSize, + uint32 nipd, + bool errorTooBig) +{ + Datum datums[3]; + bool isnull[3]; + IndexTuple itup; + uint32 newsize; + + /* Build the basic tuple: optional column number, plus key datum */ + if (rumstate->oneCol) + { + datums[0] = key; + isnull[0] = (category != RUM_CAT_NORM_KEY); + isnull[1] = true; + } + else + { + datums[0] = UInt16GetDatum(attnum); + isnull[0] = false; + datums[1] = key; + isnull[1] = (category != RUM_CAT_NORM_KEY); + isnull[2] = true; + } + + itup = index_form_tuple(rumstate->tupdesc[attnum - 1], datums, isnull); + + /* + * Determine and store offset to the posting list, making sure there is + * room for the category byte if needed. + * + * Note: because index_form_tuple MAXALIGNs the tuple size, there may well + * be some wasted pad space. Is it worth recomputing the data length to + * prevent that? That would also allow us to Assert that the real data + * doesn't overlap the RumNullCategory byte, which this code currently + * takes on faith. + */ + newsize = IndexTupleSize(itup); + + RumSetPostingOffset(itup, newsize); + + RumSetNPosting(itup, nipd); + + /* + * Add space needed for posting list, if any. Then check that the tuple + * won't be too big to store. + */ + + if (nipd > 0) + { + newsize += dataSize; + } + + if (category != RUM_CAT_NORM_KEY) + { + Assert(IndexTupleHasNulls(itup)); + newsize = newsize + sizeof(RumNullCategory); + } + newsize = MAXALIGN(newsize); + + if (newsize > Min(INDEX_SIZE_MASK, RumMaxItemSize)) + { + if (errorTooBig) + ereport(ERROR, + (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), + errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", + (unsigned long) newsize, + (unsigned long) Min(INDEX_SIZE_MASK, + RumMaxItemSize), + RelationGetRelationName(rumstate->index)))); + pfree(itup); + return NULL; + } + + /* + * Resize tuple if needed + */ + if (newsize != IndexTupleSize(itup)) + { + itup = repalloc(itup, newsize); + + /* set new size in tuple header */ + itup->t_info &= ~INDEX_SIZE_MASK; + itup->t_info |= newsize; + } + + /* + * Copy in the posting list, if provided + */ + if (nipd > 0) + { + char *ptr = RumGetPosting(itup); + memcpy(ptr, data, dataSize); + } + + /* + * Insert category byte, if needed + */ + if (category != RUM_CAT_NORM_KEY) + { + Assert(IndexTupleHasNulls(itup)); + RumSetNullCategory(itup, rumstate, category); + } + return itup; +} + +static bool +rumVacuumPostingTreeLeaves(RumVacuumState *gvs, OffsetNumber attnum, + BlockNumber blkno, bool isRoot, Buffer *rootBuffer) +{ + Buffer buffer; + Page page; + bool hasVoidPage = FALSE; + + buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno, + RBM_NORMAL, gvs->strategy); + page = BufferGetPage(buffer); + + /* + * We should be sure that we don't concurrent with inserts, insert process + * never release root page until end (but it can unlock it and lock + * again). New scan can't start but previously started ones work + * concurrently. + */ + + if (isRoot) + LockBufferForCleanup(buffer); + else + LockBuffer(buffer, RUM_EXCLUSIVE); + + Assert(RumPageIsData(page)); + + if (RumPageIsLeaf(page)) + { + OffsetNumber newMaxOff, + oldMaxOff = RumPageGetOpaque(page)->maxoff; + Pointer cleaned = NULL; + Size newSize; + GenericXLogState *state; + + state = GenericXLogStart(gvs->index); + page = GenericXLogRegisterBuffer(state, buffer, 0); + + newMaxOff = rumVacuumPostingList(gvs, attnum, + RumDataPageGetData(page), oldMaxOff, &cleaned, + RumDataPageSize - RumPageGetOpaque(page)->freespace, &newSize); + + /* saves changes about deleted tuple ... */ + if (oldMaxOff != newMaxOff) + { + if (newMaxOff > 0) + memcpy(RumDataPageGetData(page), cleaned, newSize); + + pfree(cleaned); + RumPageGetOpaque(page)->maxoff = newMaxOff; + updateItemIndexes(page, attnum, &gvs->rumstate); + + /* if root is a leaf page, we don't desire further processing */ + if (!isRoot && RumPageGetOpaque(page)->maxoff < FirstOffsetNumber) + hasVoidPage = TRUE; + + GenericXLogFinish(state); + } + else + GenericXLogAbort(state); + } + else + { + OffsetNumber i; + bool isChildHasVoid = FALSE; + + for (i = FirstOffsetNumber; i <= RumPageGetOpaque(page)->maxoff; i++) + { + PostingItem *pitem = (PostingItem *) RumDataPageGetItem(page, i); + + if (rumVacuumPostingTreeLeaves(gvs, attnum, + PostingItemGetBlockNumber(pitem), FALSE, NULL)) + isChildHasVoid = TRUE; + } + + if (isChildHasVoid) + hasVoidPage = TRUE; + } + + /* + * if we have root and theres void pages in tree, then we don't release + * lock to go further processing and guarantee that tree is unused + */ + if (!(isRoot && hasVoidPage)) + { + UnlockReleaseBuffer(buffer); + } + else + { + Assert(rootBuffer); + *rootBuffer = buffer; + } + + return hasVoidPage; +} + +/* + * Delete a posting tree page. + */ +static void +rumDeletePage(RumVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno, + BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot) +{ + Buffer dBuffer; + Buffer lBuffer; + Buffer pBuffer; + Page lPage, + dPage, + parentPage; + BlockNumber rightlink; + GenericXLogState *state; + + state = GenericXLogStart(gvs->index); + + /* + * Lock the pages in the same order as an insertion would, to avoid + * deadlocks: left, then right, then parent. + */ + lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno, + RBM_NORMAL, gvs->strategy); + dBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, deleteBlkno, + RBM_NORMAL, gvs->strategy); + pBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, parentBlkno, + RBM_NORMAL, gvs->strategy); + + LockBuffer(lBuffer, RUM_EXCLUSIVE); + LockBuffer(dBuffer, RUM_EXCLUSIVE); + if (!isParentRoot) /* parent is already locked by + * LockBufferForCleanup() */ + LockBuffer(pBuffer, RUM_EXCLUSIVE); + + /* Unlink the page by changing left sibling's rightlink */ + dPage = GenericXLogRegisterBuffer(state, dBuffer, 0); + rightlink = RumPageGetOpaque(dPage)->rightlink; + + lPage = GenericXLogRegisterBuffer(state, lBuffer, 0); + RumPageGetOpaque(lPage)->rightlink = rightlink; + + /* Delete downlink from parent */ + parentPage = GenericXLogRegisterBuffer(state, pBuffer, 0); +#ifdef USE_ASSERT_CHECKING + do + { + PostingItem *tod = (PostingItem *) RumDataPageGetItem(parentPage, myoff); + + Assert(PostingItemGetBlockNumber(tod) == deleteBlkno); + } while (0); +#endif + RumPageDeletePostingItem(parentPage, myoff); + + /* + * we shouldn't change rightlink field to save workability of running + * search scan + */ + RumPageGetOpaque(dPage)->flags = RUM_DELETED; + + GenericXLogFinish(state); + + if (!isParentRoot) + LockBuffer(pBuffer, RUM_UNLOCK); + ReleaseBuffer(pBuffer); + UnlockReleaseBuffer(lBuffer); + UnlockReleaseBuffer(dBuffer); + + gvs->result->pages_deleted++; +} + +typedef struct DataPageDeleteStack +{ + struct DataPageDeleteStack *child; + struct DataPageDeleteStack *parent; + + BlockNumber blkno; /* current block number */ + BlockNumber leftBlkno; /* rightest non-deleted page on left */ + bool isRoot; +} DataPageDeleteStack; + +/* + * scans posting tree and deletes empty pages + */ +static bool +rumScanToDelete(RumVacuumState *gvs, BlockNumber blkno, bool isRoot, DataPageDeleteStack *parent, OffsetNumber myoff) +{ + DataPageDeleteStack *me; + Buffer buffer; + Page page; + bool meDelete = FALSE; + + if (isRoot) + { + me = parent; + } + else + { + if (!parent->child) + { + me = (DataPageDeleteStack *) palloc0(sizeof(DataPageDeleteStack)); + me->parent = parent; + parent->child = me; + me->leftBlkno = InvalidBlockNumber; + } + else + me = parent->child; + } + + buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, blkno, + RBM_NORMAL, gvs->strategy); + page = BufferGetPage(buffer); + + Assert(RumPageIsData(page)); + + if (!RumPageIsLeaf(page)) + { + OffsetNumber i; + + me->blkno = blkno; + for (i = FirstOffsetNumber; i <= RumPageGetOpaque(page)->maxoff; i++) + { + PostingItem *pitem = (PostingItem *) RumDataPageGetItem(page, i); + + if (rumScanToDelete(gvs, PostingItemGetBlockNumber(pitem), FALSE, me, i)) + i--; + } + } + + if (RumPageGetOpaque(page)->maxoff < FirstOffsetNumber) + { + /* we never delete the left- or rightmost branch */ + if (me->leftBlkno != InvalidBlockNumber && !RumPageRightMost(page)) + { + Assert(!isRoot); + rumDeletePage(gvs, blkno, me->leftBlkno, me->parent->blkno, myoff, me->parent->isRoot); + meDelete = TRUE; + } + } + + ReleaseBuffer(buffer); + + if (!meDelete) + me->leftBlkno = blkno; + + return meDelete; +} + +static void +rumVacuumPostingTree(RumVacuumState *gvs, OffsetNumber attnum, BlockNumber rootBlkno) +{ + Buffer rootBuffer = InvalidBuffer; + DataPageDeleteStack root, + *ptr, + *tmp; + + if (rumVacuumPostingTreeLeaves(gvs, attnum, rootBlkno, TRUE, &rootBuffer) == FALSE) + { + Assert(rootBuffer == InvalidBuffer); + return; + } + + memset(&root, 0, sizeof(DataPageDeleteStack)); + root.leftBlkno = InvalidBlockNumber; + root.isRoot = TRUE; + + vacuum_delay_point(); + + rumScanToDelete(gvs, rootBlkno, TRUE, &root, InvalidOffsetNumber); + + ptr = root.child; + while (ptr) + { + tmp = ptr->child; + pfree(ptr); + ptr = tmp; + } + + UnlockReleaseBuffer(rootBuffer); +} + +/* + * returns modified page or NULL if page isn't modified. + * Function works with original page until first change is occurred, + * then page is copied into temporary one. + */ +static Page +rumVacuumEntryPage(RumVacuumState *gvs, Buffer buffer, BlockNumber *roots, OffsetNumber *attnums, uint32 *nroot) +{ + Page origpage = BufferGetPage(buffer), + tmppage; + OffsetNumber i, + maxoff = PageGetMaxOffsetNumber(origpage); + + tmppage = origpage; + + *nroot = 0; + + for (i = FirstOffsetNumber; i <= maxoff; i++) + { + IndexTuple itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i)); + + if (RumIsPostingTree(itup)) + { + /* + * store posting tree's roots for further processing, we can't + * vacuum it just now due to risk of deadlocks with scans/inserts + */ + roots[*nroot] = RumGetDownlink(itup); + attnums[*nroot] = rumtuple_get_attrnum(&gvs->rumstate, itup); + (*nroot)++; + } + else if (RumGetNPosting(itup) > 0) + { + /* + * if we already create temporary page, we will make changes in + * place + */ + Size cleanedSize; + Pointer cleaned = NULL; + uint32 newN = + rumVacuumPostingList(gvs, rumtuple_get_attrnum(&gvs->rumstate, itup), + RumGetPosting(itup), RumGetNPosting(itup), &cleaned, + IndexTupleSize(itup) - RumGetPostingOffset(itup), + &cleanedSize); + + if (RumGetNPosting(itup) != newN) + { + OffsetNumber attnum; + Datum key; + RumNullCategory category; + + /* + * Some ItemPointers was deleted, so we should remake our + * tuple + */ + + if (tmppage == origpage) + { + /* + * On first difference we create temporary page in memory + * and copies content in to it. + */ + tmppage = PageGetTempPageCopy(origpage); + + /* set itup pointer to new page */ + itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i)); + } + + attnum = rumtuple_get_attrnum(&gvs->rumstate, itup); + key = rumtuple_get_key(&gvs->rumstate, itup, &category); + /* FIXME */ + itup = RumFormTuple(&gvs->rumstate, attnum, key, category, + cleaned, cleanedSize, newN, true); + pfree(cleaned); + PageIndexTupleDelete(tmppage, i); + + if (PageAddItem(tmppage, (Item) itup, IndexTupleSize(itup), i, false, false) != i) + elog(ERROR, "failed to add item to index page in \"%s\"", + RelationGetRelationName(gvs->index)); + + pfree(itup); + } + } + } + + return (tmppage == origpage) ? NULL : tmppage; +} + +IndexBulkDeleteResult * +rumbulkdelete(IndexVacuumInfo *info, + IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, + void *callback_state) +{ + Relation index = info->index; + BlockNumber blkno = RUM_ROOT_BLKNO; + RumVacuumState gvs; + Buffer buffer; + BlockNumber rootOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))]; + OffsetNumber attnumOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))]; + uint32 nRoot; + + gvs.index = index; + gvs.callback = callback; + gvs.callback_state = callback_state; + gvs.strategy = info->strategy; + initRumState(&gvs.rumstate, index); + + /* first time through? */ + if (stats == NULL) + { + /* Yes, so initialize stats to zeroes */ + stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); + /* and cleanup any pending inserts */ + rumInsertCleanup(&gvs.rumstate, true, stats); + } + + /* we'll re-count the tuples each time */ + stats->num_index_tuples = 0; + gvs.result = stats; + + buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, + RBM_NORMAL, info->strategy); + + /* find leaf page */ + for (;;) + { + Page page = BufferGetPage(buffer); + IndexTuple itup; + + LockBuffer(buffer, RUM_SHARE); + + Assert(!RumPageIsData(page)); + + if (RumPageIsLeaf(page)) + { + LockBuffer(buffer, RUM_UNLOCK); + LockBuffer(buffer, RUM_EXCLUSIVE); + + if (blkno == RUM_ROOT_BLKNO && !RumPageIsLeaf(page)) + { + LockBuffer(buffer, RUM_UNLOCK); + continue; /* check it one more */ + } + break; + } + + Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber); + + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber)); + blkno = RumGetDownlink(itup); + Assert(blkno != InvalidBlockNumber); + + UnlockReleaseBuffer(buffer); + buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, + RBM_NORMAL, info->strategy); + } + + /* right now we found leftmost page in entry's BTree */ + + for (;;) + { + Page page = BufferGetPage(buffer); + Page resPage; + uint32 i; + + Assert(!RumPageIsData(page)); + + resPage = rumVacuumEntryPage(&gvs, buffer, rootOfPostingTree, attnumOfPostingTree, &nRoot); + + blkno = RumPageGetOpaque(page)->rightlink; + + if (resPage) + { + GenericXLogState *state; + + state = GenericXLogStart(index); + page = GenericXLogRegisterBuffer(state, buffer, 0); + PageRestoreTempPage(resPage, page); + GenericXLogFinish(state); + UnlockReleaseBuffer(buffer); + } + else + { + UnlockReleaseBuffer(buffer); + } + + vacuum_delay_point(); + + for (i = 0; i < nRoot; i++) + { + rumVacuumPostingTree(&gvs, attnumOfPostingTree[i], rootOfPostingTree[i]); + vacuum_delay_point(); + } + + if (blkno == InvalidBlockNumber) /* rightmost page */ + break; + + buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, + RBM_NORMAL, info->strategy); + LockBuffer(buffer, RUM_EXCLUSIVE); + } + + return gvs.result; +} + +IndexBulkDeleteResult * +rumvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) +{ + Relation index = info->index; + bool needLock; + BlockNumber npages, + blkno; + BlockNumber totFreePages; + RumState rumstate; + GinStatsData idxStat; + + /* + * In an autovacuum analyze, we want to clean up pending insertions. + * Otherwise, an ANALYZE-only call is a no-op. + */ + if (info->analyze_only) + { + if (IsAutoVacuumWorkerProcess()) + { + initRumState(&rumstate, index); + rumInsertCleanup(&rumstate, true, stats); + } + return stats; + } + + /* + * Set up all-zero stats and cleanup pending inserts if rumbulkdelete + * wasn't called + */ + if (stats == NULL) + { + stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); + initRumState(&rumstate, index); + rumInsertCleanup(&rumstate, true, stats); + } + + memset(&idxStat, 0, sizeof(idxStat)); + + /* + * XXX we always report the heap tuple count as the number of index + * entries. This is bogus if the index is partial, but it's real hard to + * tell how many distinct heap entries are referenced by a RUM index. + */ + stats->num_index_tuples = info->num_heap_tuples; + stats->estimated_count = info->estimated_count; + + /* + * Need lock unless it's local to this backend. + */ + needLock = !RELATION_IS_LOCAL(index); + + if (needLock) + LockRelationForExtension(index, ExclusiveLock); + npages = RelationGetNumberOfBlocks(index); + if (needLock) + UnlockRelationForExtension(index, ExclusiveLock); + + totFreePages = 0; + + for (blkno = RUM_ROOT_BLKNO; blkno < npages; blkno++) + { + Buffer buffer; + Page page; + + vacuum_delay_point(); + + buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno, + RBM_NORMAL, info->strategy); + LockBuffer(buffer, RUM_SHARE); + page = (Page) BufferGetPage(buffer); + + if (PageIsNew(page) || RumPageIsDeleted(page)) + { + Assert(blkno != RUM_ROOT_BLKNO); + RecordFreeIndexPage(index, blkno); + totFreePages++; + } + else if (RumPageIsData(page)) + { + idxStat.nDataPages++; + } + else if (!RumPageIsList(page)) + { + idxStat.nEntryPages++; + + if (RumPageIsLeaf(page)) + idxStat.nEntries += PageGetMaxOffsetNumber(page); + } + + UnlockReleaseBuffer(buffer); + } + + /* Update the metapage with accurate page and entry counts */ + idxStat.nTotalPages = npages; + rumUpdateStats(info->index, &idxStat); + + /* Finally, vacuum the FSM */ + IndexFreeSpaceMapVacuum(info->index); + + stats->pages_free = totFreePages; + + if (needLock) + LockRelationForExtension(index, ExclusiveLock); + stats->num_pages = RelationGetNumberOfBlocks(index); + if (needLock) + UnlockRelationForExtension(index, ExclusiveLock); + + return stats; +} diff --git a/rumvalidate.c b/rumvalidate.c new file mode 100644 index 0000000000..bf608a5723 --- /dev/null +++ b/rumvalidate.c @@ -0,0 +1,290 @@ +/*------------------------------------------------------------------------- + * + * rumvalidate.c + * Opclass validator for RUM. + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/amvalidate.h" +#include "access/htup_details.h" +#include "catalog/pg_amop.h" +#include "catalog/pg_amproc.h" +#include "catalog/pg_opclass.h" +#include "catalog/pg_opfamily.h" +#include "catalog/pg_type.h" +#include "utils/builtins.h" +#include "utils/catcache.h" +#include "utils/syscache.h" + +#include "rum.h" + +/* + * Validator for a RUM opclass. + */ +bool +rumvalidate(Oid opclassoid) +{ + bool result = true; + HeapTuple classtup; + Form_pg_opclass classform; + Oid opfamilyoid; + Oid opcintype; + Oid opckeytype; + char *opclassname; + HeapTuple familytup; + Form_pg_opfamily familyform; + char *opfamilyname; + CatCList *proclist, + *oprlist; + List *grouplist; + OpFamilyOpFuncGroup *opclassgroup; + int i; + ListCell *lc; + + /* Fetch opclass information */ + classtup = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclassoid)); + if (!HeapTupleIsValid(classtup)) + elog(ERROR, "cache lookup failed for operator class %u", opclassoid); + classform = (Form_pg_opclass) GETSTRUCT(classtup); + + opfamilyoid = classform->opcfamily; + opcintype = classform->opcintype; + opckeytype = classform->opckeytype; + if (!OidIsValid(opckeytype)) + opckeytype = opcintype; + opclassname = NameStr(classform->opcname); + + /* Fetch opfamily information */ + familytup = SearchSysCache1(OPFAMILYOID, ObjectIdGetDatum(opfamilyoid)); + if (!HeapTupleIsValid(familytup)) + elog(ERROR, "cache lookup failed for operator family %u", opfamilyoid); + familyform = (Form_pg_opfamily) GETSTRUCT(familytup); + + opfamilyname = NameStr(familyform->opfname); + + /* Fetch all operators and support functions of the opfamily */ + oprlist = SearchSysCacheList1(AMOPSTRATEGY, ObjectIdGetDatum(opfamilyoid)); + proclist = SearchSysCacheList1(AMPROCNUM, ObjectIdGetDatum(opfamilyoid)); + + /* Check individual support functions */ + for (i = 0; i < proclist->n_members; i++) + { + HeapTuple proctup = &proclist->members[i]->tuple; + Form_pg_amproc procform = (Form_pg_amproc) GETSTRUCT(proctup); + bool ok; + + /* + * All RUM support functions should be registered with matching + * left/right types + */ + if (procform->amproclefttype != procform->amprocrighttype) + { + ereport(INFO, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("rum opfamily %s contains support procedure %s with cross-type registration", + opfamilyname, + format_procedure(procform->amproc)))); + result = false; + } + + /* + * We can't check signatures except within the specific opclass, since + * we need to know the associated opckeytype in many cases. + */ + if (procform->amproclefttype != opcintype) + continue; + + /* Check procedure numbers and function signatures */ + switch (procform->amprocnum) + { + case GIN_COMPARE_PROC: + ok = check_amproc_signature(procform->amproc, INT4OID, false, + 2, 2, opckeytype, opckeytype); + break; + case GIN_EXTRACTVALUE_PROC: + /* Some opclasses omit nullFlags */ + ok = check_amproc_signature(procform->amproc, INTERNALOID, false, + 5, 5, opcintype, INTERNALOID, + INTERNALOID, INTERNALOID, + INTERNALOID); + break; + case GIN_EXTRACTQUERY_PROC: + /* Some opclasses omit nullFlags and searchMode */ + ok = check_amproc_signature(procform->amproc, INTERNALOID, false, + 7, 7, opcintype, INTERNALOID, + INT2OID, INTERNALOID, INTERNALOID, + INTERNALOID, INTERNALOID); + break; + case GIN_CONSISTENT_PROC: + /* Some opclasses omit queryKeys and nullFlags */ + ok = check_amproc_signature(procform->amproc, BOOLOID, false, + 6, 8, INTERNALOID, INT2OID, + opcintype, INT4OID, + INTERNALOID, INTERNALOID, + INTERNALOID, INTERNALOID); + break; + case GIN_COMPARE_PARTIAL_PROC: + ok = check_amproc_signature(procform->amproc, INT4OID, false, + 4, 4, opckeytype, opckeytype, + INT2OID, INTERNALOID); + break; + case GIN_TRICONSISTENT_PROC: + ok = check_amproc_signature(procform->amproc, CHAROID, false, + 7, 7, INTERNALOID, INT2OID, + opcintype, INT4OID, + INTERNALOID, INTERNALOID, + INTERNALOID); + break; + case RUM_CONFIG_PROC: + ok = check_amproc_signature(procform->amproc, VOIDOID, false, + 1, 1, INTERNALOID); + break; + case RUM_PRE_CONSISTENT_PROC: + ok = check_amproc_signature(procform->amproc, BOOLOID, false, + 8, 8, INTERNALOID, INT2OID, + opcintype, INT4OID, + INTERNALOID, INTERNALOID, + INTERNALOID, INTERNALOID); + break; + case RUM_ORDERING_PROC: + ok = check_amproc_signature(procform->amproc, FLOAT8OID, false, + 9, 9, INTERNALOID, INT2OID, + opcintype, INT4OID, + INTERNALOID, INTERNALOID, + INTERNALOID, INTERNALOID, + INTERNALOID); + break; + default: + ereport(INFO, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("rum opfamily %s contains function %s with invalid support number %d", + opfamilyname, + format_procedure(procform->amproc), + procform->amprocnum))); + result = false; + continue; /* don't want additional message */ + } + + if (!ok) + { + ereport(INFO, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("rum opfamily %s contains function %s with wrong signature for support number %d", + opfamilyname, + format_procedure(procform->amproc), + procform->amprocnum))); + result = false; + } + } + + /* Check individual operators */ + for (i = 0; i < oprlist->n_members; i++) + { + HeapTuple oprtup = &oprlist->members[i]->tuple; + Form_pg_amop oprform = (Form_pg_amop) GETSTRUCT(oprtup); + + /* TODO: Check that only allowed strategy numbers exist */ + if (oprform->amopstrategy < 1 || oprform->amopstrategy > 63) + { + ereport(INFO, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("rum opfamily %s contains operator %s with invalid strategy number %d", + opfamilyname, + format_operator(oprform->amopopr), + oprform->amopstrategy))); + result = false; + } + + /* rum doesn't support ORDER BY operators */ + if (oprform->amoppurpose != AMOP_SEARCH || + OidIsValid(oprform->amopsortfamily)) + { + ereport(INFO, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("rum opfamily %s contains invalid ORDER BY specification for operator %s", + opfamilyname, + format_operator(oprform->amopopr)))); + result = false; + } + + /* Check operator signature --- same for all rum strategies */ + if (!check_amop_signature(oprform->amopopr, BOOLOID, + oprform->amoplefttype, + oprform->amoprighttype)) + { + ereport(INFO, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("rum opfamily %s contains operator %s with wrong signature", + opfamilyname, + format_operator(oprform->amopopr)))); + result = false; + } + } + + /* Now check for inconsistent groups of operators/functions */ + grouplist = identify_opfamily_groups(oprlist, proclist); + opclassgroup = NULL; + foreach(lc, grouplist) + { + OpFamilyOpFuncGroup *thisgroup = (OpFamilyOpFuncGroup *) lfirst(lc); + + /* Remember the group exactly matching the test opclass */ + if (thisgroup->lefttype == opcintype && + thisgroup->righttype == opcintype) + opclassgroup = thisgroup; + + /* + * There is not a lot we can do to check the operator sets, since each + * RUM opclass is more or less a law unto itself, and some contain + * only operators that are binary-compatible with the opclass datatype + * (meaning that empty operator sets can be OK). That case also means + * that we shouldn't insist on nonempty function sets except for the + * opclass's own group. + */ + } + + /* Check that the originally-named opclass is complete */ + for (i = 1; i <= RUMNProcs; i++) + { + if (opclassgroup && + (opclassgroup->functionset & (((uint64) 1) << i)) != 0) + continue; /* got it */ + if (i == GIN_COMPARE_PARTIAL_PROC) + continue; /* optional method */ + if (i == GIN_CONSISTENT_PROC || i == GIN_TRICONSISTENT_PROC) + continue; /* don't need both, see check below loop */ + if (i == RUM_PRE_CONSISTENT_PROC) + continue; + ereport(INFO, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("rum opclass %s is missing support function %d", + opclassname, i))); + result = false; + } + if (!opclassgroup || + ((opclassgroup->functionset & (1 << GIN_CONSISTENT_PROC)) == 0 && + (opclassgroup->functionset & (1 << GIN_TRICONSISTENT_PROC)) == 0)) + { + ereport(INFO, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("rum opclass %s is missing support function %d or %d", + opclassname, + GIN_CONSISTENT_PROC, GIN_TRICONSISTENT_PROC))); + result = false; + } + + + ReleaseCatCacheList(proclist); + ReleaseCatCacheList(oprlist); + ReleaseSysCache(familytup); + ReleaseSysCache(classtup); + + return result; +} diff --git a/sql/rum.sql b/sql/rum.sql new file mode 100644 index 0000000000..93eb42f282 --- /dev/null +++ b/sql/rum.sql @@ -0,0 +1,81 @@ +CREATE EXTENSION rum; + +CREATE TABLE test_rum( t text, a tsvector ); + +CREATE TRIGGER tsvectorupdate +BEFORE UPDATE OR INSERT ON test_rum +FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger('a', 'pg_catalog.english', 't'); + +CREATE INDEX rumidx ON test_rum USING rum (a rum_tsvector_ops); + +\copy test_rum(t) from 'data/rum.data'; + +SET enable_seqscan=off; + +explain (costs off) +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever|wrote'); +explain (costs off) +SELECT * FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever|wrote') +ORDER BY a >< to_tsquery('pg_catalog.english', 'ever|wrote'); +explain (costs off) +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', + 'def <-> fgr'); + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever|wrote'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'have&wish'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'knew&brain'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'among'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'structure&ancient'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', '(complimentary|sight)&(sending|heart)'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', + 'def <-> fgr'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', + 'def <2> fgr'); +SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way')), * + FROM test_rum + WHERE a @@ to_tsquery('pg_catalog.english', 'way') + ORDER BY a >< to_tsquery('pg_catalog.english', 'way'); +SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way & (go | half)')), * + FROM test_rum + WHERE a @@ to_tsquery('pg_catalog.english', 'way & (go | half)') + ORDER BY a >< to_tsquery('pg_catalog.english', 'way & (go | half)'); + +INSERT INTO test_rum (t) VALUES ('foo bar foo the over foo qq bar'); +INSERT INTO test_rum (t) VALUES ('345 qwerty copyright'); +INSERT INTO test_rum (t) VALUES ('345 qwerty'); +INSERT INTO test_rum (t) VALUES ('A fat cat has just eaten a rat.'); + +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'bar'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'qwerty&345'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', '345'); +SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'rat'); + +SELECT a FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'bar') ORDER BY a; + +DELETE FROM test_rum; + +SELECT count(*) from test_rum; + +CREATE TABLE tst (i int4, t tsvector); +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(1,100000) i; +CREATE INDEX tstidx ON tst USING rum (t rum_tsvector_ops); + +DELETE FROM tst WHERE i = 1; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(10001,11000) i; + +DELETE FROM tst WHERE i = 2; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(11001,12000) i; + +DELETE FROM tst WHERE i = 3; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(12001,13000) i; + +DELETE FROM tst WHERE i = 4; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(13001,14000) i; + +DELETE FROM tst WHERE i = 5; +VACUUM tst; +INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(14001,15000) i; diff --git a/t/001_wal.pl b/t/001_wal.pl new file mode 100644 index 0000000000..053aecadb5 --- /dev/null +++ b/t/001_wal.pl @@ -0,0 +1,81 @@ +# Test generic xlog record work for rum index replication. +use strict; +use warnings; +use PostgresNode; +use TestLib; +use Test::More tests => 31; + +my $node_master; +my $node_standby; + +# Run few queries on both master and standby and check their results match. +sub test_index_replay +{ + my ($test_name) = @_; + + # Wait for standby to catch up + my $applname = $node_standby->name; + my $caughtup_query = + "SELECT pg_current_xlog_location() <= write_location FROM pg_stat_replication WHERE application_name = '$applname';"; + $node_master->poll_query_until('postgres', $caughtup_query) + or die "Timed out while waiting for standby 1 to catch up"; + + my $queries = qq(SET enable_seqscan=off; +SET enable_bitmapscan=on; +SET enable_indexscan=on; +SELECT * FROM tst WHERE t \@@ to_tsquery('simple', 'qscfq'); +SELECT * FROM tst WHERE t \@@ to_tsquery('simple', 'ztcow'); +SELECT * FROM tst WHERE t \@@ to_tsquery('simple', 'jqljy'); +SELECT * FROM tst WHERE t \@@ to_tsquery('simple', 'lvnex'); +); + + # Run test queries and compare their result + my $master_result = $node_master->psql("postgres", $queries); + my $standby_result = $node_standby->psql("postgres", $queries); + + is($master_result, $standby_result, "$test_name: query result matches"); +} + +# Initialize master node +$node_master = get_new_node('master'); +$node_master->init(allows_streaming => 1); +$node_master->start; +my $backup_name = 'my_backup'; + +# Take backup +$node_master->backup($backup_name); + +# Create streaming standby linking to master +$node_standby = get_new_node('standby'); +$node_standby->init_from_backup($node_master, $backup_name, + has_streaming => 1); +$node_standby->start; + +# Create some rum index on master +$node_master->psql("postgres", "CREATE EXTENSION rum;"); +$node_master->psql("postgres", "CREATE TABLE tst (i int4, t tsvector);"); +$node_master->psql("postgres", "INSERT INTO tst SELECT i%10, + to_tsvector('simple', array_to_string(array( + select substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', trunc(random() * 52)::integer + 1, 1) + FROM generate_series(i, i + 4)), '')) + FROM generate_series(1,100000) i;"); +$node_master->psql("postgres", "CREATE INDEX rumidx ON tst USING rum (t rum_tsvector_ops);"); + +# Test that queries give same result +test_index_replay('initial'); + +# Run 10 cycles of table modification. Run test queries after each modification. +for my $i (1..10) +{ + $node_master->psql("postgres", "DELETE FROM tst WHERE i = $i;"); + test_index_replay("delete $i"); + $node_master->psql("postgres", "VACUUM tst;"); + test_index_replay("vacuum $i"); + my ($start, $end) = (100001 + ($i - 1) * 10000, 100000 + $i * 10000); + $node_master->psql("postgres", "INSERT INTO tst SELECT i%10, + to_tsvector('simple', array_to_string(array( + select substr('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', trunc(random() * 52)::integer + 1, 1) + FROM generate_series(i, i + 4)), '')) + FROM generate_series($start,$end) i;"); + test_index_replay("insert $i"); +} From a527bfb38f8e460291380703e840ebc8d5e5c054 Mon Sep 17 00:00:00 2001 From: Zakirov Artur Date: Fri, 6 May 2016 15:17:41 +0300 Subject: [PATCH 02/13] Fix the README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f3d7743c6e..3d0b9519f8 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ -# RUM - RUM access methods +# RUM - RUM access method ## Introduction -The **rum** module provides access methods to work with RUM index. It is based +The **rum** module provides access method to work with RUM index. It is based on the GIN access methods code. ## License From 8b0dbc08fba807244a16fb894ae4efc0bc29a3ff Mon Sep 17 00:00:00 2001 From: Zakirov Artur Date: Fri, 6 May 2016 19:50:31 +0300 Subject: [PATCH 03/13] Fix the README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3d0b9519f8..7523676811 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ CREATE INDEX rumidx ON test_rum USING rum (a rum_tsvector_ops); And we can execute the following queries: ```sql -=# SELECT t, a >< to_tsquery('english', 'beautiful | place') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'beautiful | place') order by a >< to_tsquery('english', 'beautiful | place'); +=> SELECT t, a >< to_tsquery('english', 'beautiful | place') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'beautiful | place') order by a >< to_tsquery('english', 'beautiful | place'); t | rank ---------------------------------+----------- The situation is most beautiful | 0.0303964 @@ -75,7 +75,7 @@ And we can execute the following queries: It looks like a beautiful place | 0.0607927 (3 rows) -=# SELECT t, a >< to_tsquery('english', 'place | situation') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'place | situation') order by a >< to_tsquery('english', 'place | situation'); +=> SELECT t, a >< to_tsquery('english', 'place | situation') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'place | situation') order by a >< to_tsquery('english', 'place | situation'); t | rank ---------------------------------+----------- The situation is most beautiful | 0.0303964 From 7cb0e9f879b882b6687f38fcf010f2a03650c73e Mon Sep 17 00:00:00 2001 From: Artur Zakirov Date: Fri, 13 May 2016 17:34:55 +0300 Subject: [PATCH 04/13] Fix rum.h header --- rum.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rum.h b/rum.h index b922f36148..9eccf65d8f 100644 --- a/rum.h +++ b/rum.h @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * bloom.h + * rum.h * Exported definitions for RUM index. * * Portions Copyright (c) 2015-2016, Postgres Professional From 0d26686eb7b45147456f759e8bd054749bbd526b Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Sun, 15 May 2016 20:54:08 +0300 Subject: [PATCH 05/13] Initial. --- Makefile | 2 +- rum--1.0.sql | 31 ++++ rumtsquery.c | 468 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 500 insertions(+), 1 deletion(-) create mode 100644 rumtsquery.c diff --git a/Makefile b/Makefile index 97180053b2..c14a31b4e0 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # contrib/rum/Makefile MODULE_big = rum -OBJS = rumsort.o rum_ts_utils.o \ +OBJS = rumsort.o rum_ts_utils.o rumtsquery.o \ rumbtree.o rumbulk.o rumdatapage.o \ rumentrypage.o rumfast.o rumget.o ruminsert.o \ rumscan.o rumutil.o rumvacuum.o rumvalidate.o $(WIN32RES) diff --git a/rum--1.0.sql b/rum--1.0.sql index 2556d17f7b..81d7a2d1c9 100644 --- a/rum--1.0.sql +++ b/rum--1.0.sql @@ -64,3 +64,34 @@ AS FUNCTION 8 rum_tsquery_pre_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), FUNCTION 9 rum_tsquery_distance(internal,smallint,tsvector,int,internal,internal,internal,internal,internal), STORAGE text; + +CREATE FUNCTION ruminv_extract_tsquery(tsquery,internal,internal,internal,internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION ruminv_extract_tsvector(tsvector,internal,smallint,internal,internal,internal,internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION ruminv_tsvector_consistent(internal, smallint, tsvector, integer, internal, internal, internal, internal) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION ruminv_tsquery_config(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE OPERATOR CLASS rum_tsquery_ops +DEFAULT FOR TYPE tsquery USING rum +AS + OPERATOR 1 @@ (tsquery, tsvector), + FUNCTION 1 gin_cmp_tslexeme(text, text), + FUNCTION 2 ruminv_extract_tsquery(tsquery,internal,internal,internal,internal), + FUNCTION 3 ruminv_extract_tsvector(tsvector,internal,smallint,internal,internal,internal,internal), + FUNCTION 4 ruminv_tsvector_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), + FUNCTION 7 ruminv_tsquery_config(internal), + STORAGE text; diff --git a/rumtsquery.c b/rumtsquery.c new file mode 100644 index 0000000000..65290af569 --- /dev/null +++ b/rumtsquery.c @@ -0,0 +1,468 @@ +/*------------------------------------------------------------------------- + * + * rumtsquery.c + * Inverted fulltext search: indexing tsqueries. + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/pg_type.h" +#include "tsearch/ts_type.h" +#include "tsearch/ts_utils.h" +#include "utils/array.h" +#include "utils/builtins.h" + +#include "rum.h" + +typedef struct QueryItemWrap +{ + QueryItemType type; + int8 oper; + bool not; + int operandsCount, + operandsAllocated; + struct QueryItemWrap *operands; + struct QueryItemWrap *parent; + int distance, + length; + int sum; + int num; +} QueryItemWrap; + +static QueryItemWrap * +add_child(QueryItemWrap *parent) +{ + QueryItemWrap *result; + if (!parent) + { + result = (QueryItemWrap *) palloc0(sizeof(QueryItemWrap)); + } + else + { + parent->operandsCount++; + while (parent->operandsCount > parent->operandsAllocated) + { + if (parent->operandsAllocated > 0) + { + parent->operandsAllocated *= 2; + parent->operands = (QueryItemWrap *) repalloc(parent->operands, parent->operandsAllocated * sizeof(*parent->operands)); + } + else + { + parent->operandsAllocated = 4; + parent->operands = (QueryItemWrap *) palloc(parent->operandsAllocated * sizeof(*parent->operands)); + } + } + result = &parent->operands[parent->operandsCount - 1]; + memset(result, 0, sizeof(*result)); + result->parent = parent; + } + return result; +} + +static QueryItemWrap * +make_query_item_wrap(QueryItem *item, QueryItemWrap *parent, bool not) +{ + if (item->type == QI_VAL) + { + QueryOperand *operand = (QueryOperand *) item; + QueryItemWrap *wrap = add_child(parent); + wrap->type = QI_VAL; + wrap->distance = operand->distance; + wrap->length = operand->length; + wrap->not = not; + return wrap; + } + + switch (item->qoperator.oper) + { + case OP_NOT: + return make_query_item_wrap(item + 1, parent, !not); + + case OP_AND: + case OP_OR: + { + uint8 oper = item->qoperator.oper; + if (not) + oper = (oper == OP_AND) ? OP_OR : OP_AND; + + if (!parent || oper != parent->oper) + { + QueryItemWrap *wrap = add_child(parent); + + wrap->type = QI_OPR; + wrap->oper = oper; + + make_query_item_wrap(item + item->qoperator.left, wrap, not); + make_query_item_wrap(item + 1, wrap, not); + return wrap; + } + else + { + make_query_item_wrap(item + item->qoperator.left, parent, not); + make_query_item_wrap(item + 1, parent, not); + return NULL; + } + } + case OP_PHRASE: + default: + elog(ERROR, "Invalid tsquery operator"); + } + + /* not reachable, but keep compiler quiet */ + return NULL; +} + +static int +calc_wraps(QueryItemWrap *wrap, int *num) +{ + int i, notCount = 0, result; + + for (i = 0; i < wrap->operandsCount; i++) + { + if (wrap->operands[i].not) + notCount++; + } + + if (wrap->type == QI_OPR) + { + wrap->num = (*num)++; + if (wrap->oper == OP_AND) + wrap->sum = notCount + 1 - wrap->operandsCount; + if (wrap->oper == OP_OR) + wrap->sum = notCount; + } + else if (wrap->type == QI_VAL) + { + return 1; + } + + result = 0; + for (i = 0; i < wrap->operandsCount; i++) + result += calc_wraps(&wrap->operands[i], num); + return result; +} + +#define MAX_ENCODED_LEN 5 + +/* + * Varbyte-encode 'val' into *ptr. *ptr is incremented to next integer. + */ +static void +encode_varbyte(uint32 val, unsigned char **ptr) +{ + unsigned char *p = *ptr; + + while (val > 0x7F) + { + *(p++) = 0x80 | (val & 0x7F); + val >>= 7; + } + *(p++) = (unsigned char) val; + + *ptr = p; +} + +/* + * Decode varbyte-encoded integer at *ptr. *ptr is incremented to next integer. + */ +static uint32 +decode_varbyte(unsigned char **ptr) +{ + uint32 val; + unsigned char *p = *ptr; + uint32 c; + + c = *(p++); + val = c & 0x7F; + if (c & 0x80) + { + c = *(p++); + val |= (c & 0x7F) << 7; + if (c & 0x80) + { + c = *(p++); + val |= (c & 0x7F) << 14; + if (c & 0x80) + { + c = *(p++); + val |= (c & 0x7F) << 21; + if (c & 0x80) + { + c = *(p++); + val |= (c & 0x7F) << 28; + } + } + } + } + + *ptr = p; + + return val; +} + +typedef struct +{ + Datum *addInfo; + bool *addInfoIsNull; + Datum *entries; + int index; + char *operand; +} ExtractContext; + +static void +extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) +{ + if (wrap->type == QI_VAL) + { + bytea *addinfo = (bytea *) palloc(VARHDRSZ + level * MAX_ENCODED_LEN); + unsigned char *ptr = (unsigned char *)VARDATA(addinfo); + int index = context->index; + + context->entries[index] = PointerGetDatum(cstring_to_text_with_len(context->operand + wrap->distance, wrap->length)); + + while (wrap->parent) + { + QueryItemWrap *parent = wrap->parent; + uint32 sum; + encode_varbyte((uint32) parent->num, &ptr); + sum = (uint32)abs(parent->sum); + sum <<= 2; + if (parent->sum < 0) + sum |= 2; + if (wrap->not) + sum |= 1; + encode_varbyte(sum, &ptr); + wrap = parent; + } + SET_VARSIZE(addinfo, ptr - (unsigned char *)addinfo); + + context->addInfo[index] = PointerGetDatum(addinfo); + context->addInfoIsNull[index] = false; + context->index++; + } + else if (wrap->type == QI_OPR) + { + int i; + for (i = 0; i < wrap->operandsCount; i++) + extract_wraps(&wrap->operands[i], context, level + 1); + } +} + +/*PG_FUNCTION_INFO_V1(rum_process_tsquery); +Datum +rum_process_tsquery(PG_FUNCTION_ARGS) +{ + TSQuery query = PG_GETARG_TSQUERY(0); + QueryItem *item = GETQUERY(query); + QueryItemWrap *wrap = make_query_item_wrap(item, NULL, false); + int num = 1; + + calc_wraps(wrap, &num); + print_wraps(wrap, , 0); + + PG_RETURN_VOID(); +}*/ + +PG_FUNCTION_INFO_V1(ruminv_extract_tsquery); +Datum +ruminv_extract_tsquery(PG_FUNCTION_ARGS) +{ + TSQuery query = PG_GETARG_TSQUERY(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + Datum **addInfo = (Datum **) PG_GETARG_POINTER(3); + bool **addInfoIsNull = (bool **) PG_GETARG_POINTER(4); + Datum *entries = NULL; + QueryItem *item = GETQUERY(query); + QueryItemWrap *wrap; + ExtractContext context; + int num = 1; + + wrap = make_query_item_wrap(item, NULL, false); + *nentries = calc_wraps(wrap, &num); + + entries = (Datum *) palloc(sizeof(Datum) * (*nentries)); + *addInfo = (Datum *) palloc(sizeof(Datum) * (*nentries)); + *addInfoIsNull = (bool *) palloc(sizeof(bool) * (*nentries)); + + context.addInfo = *addInfo; + context.addInfoIsNull = *addInfoIsNull; + context.entries = entries; + context.operand = GETOPERAND(query); + context.index = 0; + + extract_wraps(wrap, &context, 0); + +/* elog(NOTICE, "%d", *nentries); + for (i = 0; i < *nentries; i++) + { + elog(NOTICE, "%s", text_to_cstring(DatumGetPointer((entries)[i]))); + }*/ + + PG_FREE_IF_COPY(query, 0); + PG_RETURN_POINTER(entries); +} + +PG_FUNCTION_INFO_V1(ruminv_extract_tsvector); +Datum +ruminv_extract_tsvector(PG_FUNCTION_ARGS) +{ + TSVector vector = PG_GETARG_TSVECTOR(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + + /* StrategyNumber strategy = PG_GETARG_UINT16(2); */ + bool **ptr_partialmatch = (bool **) PG_GETARG_POINTER(3); + Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4); + + /* bool **nullFlags = (bool **) PG_GETARG_POINTER(5); */ + int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); + Datum *entries = NULL; + + *nentries = vector->size; + if (vector->size > 0) + { + int i; + WordEntry *we = ARRPTR(vector); + + *extra_data = NULL; + *ptr_partialmatch = NULL; + *searchMode = GIN_SEARCH_MODE_DEFAULT; + + entries = (Datum *) palloc(sizeof(Datum) * vector->size); + for (i = 0; i < vector->size; i++) + { + text *txt; + + txt = cstring_to_text_with_len(STRPTR(vector) + we->pos, we->len); + entries[i] = PointerGetDatum(txt); + } + } + PG_FREE_IF_COPY(vector, 0); + PG_RETURN_POINTER(entries); +} + +typedef struct +{ + int sum; + int parent; + bool not; +} TmpNode; + +PG_FUNCTION_INFO_V1(ruminv_tsvector_consistent); +Datum +ruminv_tsvector_consistent(PG_FUNCTION_ARGS) +{ + bool *check = (bool *) PG_GETARG_POINTER(0); + /* StrategyNumber strategy = PG_GETARG_UINT16(1); */ + /* TSVector vector = PG_GETARG_TSVECTOR(2); */ + int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ + bool *recheck = (bool *) PG_GETARG_POINTER(5); + Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); + /* bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); */ + bool res = false; + int i, + lastIndex = 0; + TmpNode nodes[256]; + + *recheck = false; + + for (i = 0; i < nkeys; i++) + { + unsigned char *ptr = (unsigned char *)VARDATA_ANY(DatumGetPointer(addInfo[i])), + *ptrEnd; + int size = VARSIZE_ANY_EXHDR(DatumGetPointer(addInfo[i])); + TmpNode *child = NULL; + + if (!check[i]) + continue; + + if (size == 0) + { + res = true; + break; + } + + ptrEnd = ptr + size; + while (ptr < ptrEnd) + { + uint32 num = decode_varbyte(&ptr), + sumVal = decode_varbyte(&ptr); + int sum, index; + bool not; + + not = (sumVal & 1) ? true : false; + sum = sumVal >> 2; + sum = (sumVal & 2) ? (-sum) : (sum); + + index = num - 1; + + if (child) + { + child->parent = index; + child->not = not; + } + + while (num > lastIndex) + { + nodes[lastIndex].parent = -2; + lastIndex++; + } + + if (nodes[index].parent == -2) + { + nodes[index].sum = sum; + nodes[index].parent = -1; + nodes[index].not = false; + } + if (!child) + { + if (not) + nodes[index].sum--; + else + nodes[index].sum++; + } + + child = &nodes[index]; + } + } + + for (i = lastIndex - 1; i >= 0; i--) + { + if (nodes[i].parent != -2) + { + if (nodes[i].sum > 0) + { + if (nodes[i].parent == -1) + { + res = true; + break; + } + else + { + int parent = nodes[i].parent; + nodes[parent].sum += nodes[i].not ? 1 : -1; + } + } + } + } + + elog(NOTICE, "%d", res); + + PG_RETURN_BOOL(res); +} + +PG_FUNCTION_INFO_V1(ruminv_tsquery_config); +Datum +ruminv_tsquery_config(PG_FUNCTION_ARGS) +{ + RumConfig *config = (RumConfig *)PG_GETARG_POINTER(0); + config->addInfoTypeOid = BYTEAOID; + PG_RETURN_VOID(); +} + From 974738a90f3918b418c310e73c141423a2da46d3 Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Sun, 15 May 2016 21:27:39 +0300 Subject: [PATCH 06/13] Fix. --- rumtsquery.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/rumtsquery.c b/rumtsquery.c index 65290af569..2969abe16d 100644 --- a/rumtsquery.c +++ b/rumtsquery.c @@ -16,6 +16,7 @@ #include "tsearch/ts_utils.h" #include "utils/array.h" #include "utils/builtins.h" +#include "utils/bytea.h" #include "rum.h" @@ -225,11 +226,13 @@ extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) int index = context->index; context->entries[index] = PointerGetDatum(cstring_to_text_with_len(context->operand + wrap->distance, wrap->length)); + elog(NOTICE, "%s", text_to_cstring(DatumGetPointer(context->entries[index]))); while (wrap->parent) { QueryItemWrap *parent = wrap->parent; uint32 sum; + elog(NOTICE, "%d %d %d", parent->num, parent->sum, wrap->not); encode_varbyte((uint32) parent->num, &ptr); sum = (uint32)abs(parent->sum); sum <<= 2; @@ -338,7 +341,7 @@ ruminv_extract_tsvector(PG_FUNCTION_ARGS) { text *txt; - txt = cstring_to_text_with_len(STRPTR(vector) + we->pos, we->len); + txt = cstring_to_text_with_len(STRPTR(vector) + we[i].pos, we[i].len); entries[i] = PointerGetDatum(txt); } } @@ -364,7 +367,7 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); - /* bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); */ + bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); bool res = false; int i, lastIndex = 0; @@ -374,14 +377,22 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) for (i = 0; i < nkeys; i++) { - unsigned char *ptr = (unsigned char *)VARDATA_ANY(DatumGetPointer(addInfo[i])), - *ptrEnd; - int size = VARSIZE_ANY_EXHDR(DatumGetPointer(addInfo[i])); + unsigned char *ptr, + *ptrEnd; + int size; TmpNode *child = NULL; if (!check[i]) continue; + if (addInfoIsNull[i]) + elog(ERROR, "Unexpected addInfoIsNull"); + + ptr = (unsigned char *)VARDATA_ANY(DatumGetPointer(addInfo[i])); + size = VARSIZE_ANY_EXHDR(DatumGetPointer(addInfo[i])); + +/* elog(NOTICE, "%d %s", i, DatumGetPointer(DirectFunctionCall1(byteaout, addInfo[i])));*/ + if (size == 0) { res = true; @@ -402,6 +413,8 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) index = num - 1; +/* elog(NOTICE, "a %d %d %d %d", i, index, sum, not);*/ + if (child) { child->parent = index; @@ -432,6 +445,11 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) } } +/* for (i = 0; i < lastIndex; i++) + { + elog(NOTICE, "s %d %d %d %d", i, nodes[i].sum, nodes[i].parent, nodes[i].not); + }*/ + for (i = lastIndex - 1; i >= 0; i--) { if (nodes[i].parent != -2) @@ -446,13 +464,13 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) else { int parent = nodes[i].parent; - nodes[parent].sum += nodes[i].not ? 1 : -1; + nodes[parent].sum += nodes[i].not ? -1 : 1; } } } } - elog(NOTICE, "%d", res); +/* elog(NOTICE, "%d", res);*/ PG_RETURN_BOOL(res); } From 46b48a54d7472def6288abda9105c2b5538a1228 Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Sun, 15 May 2016 23:44:19 +0300 Subject: [PATCH 07/13] Missing .gitignore. --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..5794c855a0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.deps +*.o +*.so \ No newline at end of file From f3c37f56efa7f610016b9a02c9a1ea3b6417e1cf Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Sun, 15 May 2016 23:44:37 +0300 Subject: [PATCH 08/13] Not handling. --- rumtsquery.c | 119 ++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 93 insertions(+), 26 deletions(-) diff --git a/rumtsquery.c b/rumtsquery.c index 2969abe16d..1411faad9d 100644 --- a/rumtsquery.c +++ b/rumtsquery.c @@ -149,6 +149,41 @@ calc_wraps(QueryItemWrap *wrap, int *num) return result; } +static bool +check_allnegative(QueryItemWrap *wrap) +{ + if (wrap->type == QI_VAL) + { + return wrap->not; + } + else if (wrap->oper == OP_AND) + { + int i; + for (i = 0; i < wrap->operandsCount; i++) + { + if (!check_allnegative(&wrap->operands[i])) + return false; + } + return true; + } + else if (wrap->oper == OP_OR) + { + int i; + for (i = 0; i < wrap->operandsCount; i++) + { + if (check_allnegative(&wrap->operands[i])) + return true; + } + return false; + } + else + { + elog(ERROR, "check_allnegative: invalid node"); + return false; + } + +} + #define MAX_ENCODED_LEN 5 /* @@ -221,12 +256,12 @@ extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) { if (wrap->type == QI_VAL) { - bytea *addinfo = (bytea *) palloc(VARHDRSZ + level * MAX_ENCODED_LEN); + bytea *addinfo = (bytea *) palloc(VARHDRSZ + Max(level, 1) * MAX_ENCODED_LEN); unsigned char *ptr = (unsigned char *)VARDATA(addinfo); int index = context->index; context->entries[index] = PointerGetDatum(cstring_to_text_with_len(context->operand + wrap->distance, wrap->length)); - elog(NOTICE, "%s", text_to_cstring(DatumGetPointer(context->entries[index]))); + elog(NOTICE, "%s", text_to_cstring(DatumGetTextP(context->entries[index]))); while (wrap->parent) { @@ -243,6 +278,11 @@ extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) encode_varbyte(sum, &ptr); wrap = parent; } + if (level == 0 && wrap->not) + { + encode_varbyte(1, &ptr); + encode_varbyte(4 | 1, &ptr); + } SET_VARSIZE(addinfo, ptr - (unsigned char *)addinfo); context->addInfo[index] = PointerGetDatum(addinfo); @@ -278,6 +318,7 @@ ruminv_extract_tsquery(PG_FUNCTION_ARGS) { TSQuery query = PG_GETARG_TSQUERY(0); int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + bool **nullFlags = (bool **) PG_GETARG_POINTER(2); Datum **addInfo = (Datum **) PG_GETARG_POINTER(3); bool **addInfoIsNull = (bool **) PG_GETARG_POINTER(4); Datum *entries = NULL; @@ -285,13 +326,26 @@ ruminv_extract_tsquery(PG_FUNCTION_ARGS) QueryItemWrap *wrap; ExtractContext context; int num = 1; + bool extractNull; wrap = make_query_item_wrap(item, NULL, false); *nentries = calc_wraps(wrap, &num); + extractNull = check_allnegative(wrap); + if (extractNull) + (*nentries)++; entries = (Datum *) palloc(sizeof(Datum) * (*nentries)); *addInfo = (Datum *) palloc(sizeof(Datum) * (*nentries)); *addInfoIsNull = (bool *) palloc(sizeof(bool) * (*nentries)); + if (extractNull) + { + int i; + *nullFlags = (bool *) palloc(sizeof(bool) * (*nentries)); + for (i = 0; i < *nentries - 1; i++) + (*nullFlags)[i] = false; + (*nullFlags)[*nentries - 1] = true; + (*addInfoIsNull)[*nentries - 1] = true; + } context.addInfo = *addInfo; context.addInfoIsNull = *addInfoIsNull; @@ -322,29 +376,32 @@ ruminv_extract_tsvector(PG_FUNCTION_ARGS) bool **ptr_partialmatch = (bool **) PG_GETARG_POINTER(3); Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4); - /* bool **nullFlags = (bool **) PG_GETARG_POINTER(5); */ + bool **nullFlags = (bool **) PG_GETARG_POINTER(5); int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); Datum *entries = NULL; - *nentries = vector->size; + *nentries = vector->size + 1; + *extra_data = NULL; + *ptr_partialmatch = NULL; + *searchMode = GIN_SEARCH_MODE_DEFAULT; + + entries = (Datum *) palloc(sizeof(Datum) * (*nentries)); + *nullFlags = (bool *) palloc(sizeof(bool) * (*nentries)); if (vector->size > 0) { int i; WordEntry *we = ARRPTR(vector); - *extra_data = NULL; - *ptr_partialmatch = NULL; - *searchMode = GIN_SEARCH_MODE_DEFAULT; - - entries = (Datum *) palloc(sizeof(Datum) * vector->size); for (i = 0; i < vector->size; i++) { text *txt; txt = cstring_to_text_with_len(STRPTR(vector) + we[i].pos, we[i].len); entries[i] = PointerGetDatum(txt); + (*nullFlags)[i] = false; } } + (*nullFlags)[*nentries - 1] = true; PG_FREE_IF_COPY(vector, 0); PG_RETURN_POINTER(entries); } @@ -368,14 +425,15 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) bool *recheck = (bool *) PG_GETARG_POINTER(5); Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); - bool res = false; + bool res = false, + allFalse = true; int i, lastIndex = 0; TmpNode nodes[256]; *recheck = false; - for (i = 0; i < nkeys; i++) + for (i = 0; i < nkeys - 1; i++) { unsigned char *ptr, *ptrEnd; @@ -385,6 +443,8 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) if (!check[i]) continue; + allFalse = false; + if (addInfoIsNull[i]) elog(ERROR, "Unexpected addInfoIsNull"); @@ -445,26 +505,33 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) } } -/* for (i = 0; i < lastIndex; i++) + if (allFalse && check[nkeys - 1]) { - elog(NOTICE, "s %d %d %d %d", i, nodes[i].sum, nodes[i].parent, nodes[i].not); - }*/ - - for (i = lastIndex - 1; i >= 0; i--) + res = true; + } + else { - if (nodes[i].parent != -2) + /* for (i = 0; i < lastIndex; i++) { - if (nodes[i].sum > 0) + elog(NOTICE, "s %d %d %d %d", i, nodes[i].sum, nodes[i].parent, nodes[i].not); + }*/ + + for (i = lastIndex - 1; i >= 0; i--) + { + if (nodes[i].parent != -2) { - if (nodes[i].parent == -1) - { - res = true; - break; - } - else + if (nodes[i].sum > 0) { - int parent = nodes[i].parent; - nodes[parent].sum += nodes[i].not ? -1 : 1; + if (nodes[i].parent == -1) + { + res = true; + break; + } + else + { + int parent = nodes[i].parent; + nodes[parent].sum += nodes[i].not ? -1 : 1; + } } } } From 5c920b1dba70024eeb2dcd1ac217f5e5d2a35994 Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Mon, 16 May 2016 13:43:33 +0300 Subject: [PATCH 09/13] Fix multiple occurences of the single lexeme. --- rumtsquery.c | 92 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 64 insertions(+), 28 deletions(-) diff --git a/rumtsquery.c b/rumtsquery.c index 1411faad9d..75ebd9abd1 100644 --- a/rumtsquery.c +++ b/rumtsquery.c @@ -256,18 +256,47 @@ extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) { if (wrap->type == QI_VAL) { - bytea *addinfo = (bytea *) palloc(VARHDRSZ + Max(level, 1) * MAX_ENCODED_LEN); - unsigned char *ptr = (unsigned char *)VARDATA(addinfo); + bytea *addinfo; + unsigned char *ptr; int index = context->index; - context->entries[index] = PointerGetDatum(cstring_to_text_with_len(context->operand + wrap->distance, wrap->length)); - elog(NOTICE, "%s", text_to_cstring(DatumGetTextP(context->entries[index]))); + + for (index = 0; index < context->index; index++) + { + text *entry; + entry = DatumGetByteaP(context->entries[index]); + if (VARSIZE_ANY_EXHDR(entry) == wrap->length && + !memcmp(context->operand + wrap->distance, VARDATA_ANY(entry), wrap->length)) + break; + } + + if (index >= context->index) + { + index = context->index; + addinfo = (bytea *) palloc(VARHDRSZ + 2 * Max(level, 1) * MAX_ENCODED_LEN); + ptr = (unsigned char *) VARDATA(addinfo); + context->entries[index] = PointerGetDatum(cstring_to_text_with_len(context->operand + wrap->distance, wrap->length)); + context->addInfo[index] = PointerGetDatum(addinfo); + context->addInfoIsNull[index] = false; + context->index++; + /*ptrEnd = (unsigned char *) VARDATA(addinfo) + VARHDRSZ + 2 * Max(level, 1) * MAX_ENCODED_LEN;*/ + } + else + { + addinfo = DatumGetByteaP(context->addInfo[index]); + addinfo = (bytea *) repalloc(addinfo, + VARSIZE(addinfo) + 2 * Max(level, 1) * MAX_ENCODED_LEN); + context->addInfo[index] = PointerGetDatum(addinfo); + ptr = (unsigned char *) VARDATA(addinfo) + VARSIZE_ANY_EXHDR(addinfo); + /*ptrEnd = (unsigned char *) VARDATA(addinfo) + VARSIZE_ANY_EXHDR(addinfo) + 2 * Max(level, 1) * MAX_ENCODED_LEN;*/ + } + /*elog(NOTICE, "%s", text_to_cstring(DatumGetTextP(context->entries[index])));*/ while (wrap->parent) { QueryItemWrap *parent = wrap->parent; uint32 sum; - elog(NOTICE, "%d %d %d", parent->num, parent->sum, wrap->not); + /*elog(NOTICE, "%d %d %d", parent->num, parent->sum, wrap->not);*/ encode_varbyte((uint32) parent->num, &ptr); sum = (uint32)abs(parent->sum); sum <<= 2; @@ -283,11 +312,9 @@ extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) encode_varbyte(1, &ptr); encode_varbyte(4 | 1, &ptr); } + /*Assert(ptr <= ptrEnd);*/ SET_VARSIZE(addinfo, ptr - (unsigned char *)addinfo); - - context->addInfo[index] = PointerGetDatum(addinfo); - context->addInfoIsNull[index] = false; - context->index++; + /*elog(NOTICE, "%s", DatumGetPointer(DirectFunctionCall1(byteaout, PointerGetDatum(addinfo))));*/ } else if (wrap->type == QI_OPR) { @@ -325,27 +352,19 @@ ruminv_extract_tsquery(PG_FUNCTION_ARGS) QueryItem *item = GETQUERY(query); QueryItemWrap *wrap; ExtractContext context; - int num = 1; + int num = 1, + count; bool extractNull; wrap = make_query_item_wrap(item, NULL, false); - *nentries = calc_wraps(wrap, &num); + count = calc_wraps(wrap, &num); extractNull = check_allnegative(wrap); if (extractNull) - (*nentries)++; + count++; - entries = (Datum *) palloc(sizeof(Datum) * (*nentries)); - *addInfo = (Datum *) palloc(sizeof(Datum) * (*nentries)); - *addInfoIsNull = (bool *) palloc(sizeof(bool) * (*nentries)); - if (extractNull) - { - int i; - *nullFlags = (bool *) palloc(sizeof(bool) * (*nentries)); - for (i = 0; i < *nentries - 1; i++) - (*nullFlags)[i] = false; - (*nullFlags)[*nentries - 1] = true; - (*addInfoIsNull)[*nentries - 1] = true; - } + entries = (Datum *) palloc(sizeof(Datum) * count); + *addInfo = (Datum *) palloc(sizeof(Datum) * count); + *addInfoIsNull = (bool *) palloc(sizeof(bool) * count); context.addInfo = *addInfo; context.addInfoIsNull = *addInfoIsNull; @@ -355,6 +374,20 @@ ruminv_extract_tsquery(PG_FUNCTION_ARGS) extract_wraps(wrap, &context, 0); + count = context.index; + if (extractNull) + { + int i; + + count++; + *nullFlags = (bool *) palloc(sizeof(bool) * count); + for (i = 0; i < count - 1; i++) + (*nullFlags)[i] = false; + (*nullFlags)[count - 1] = true; + (*addInfoIsNull)[count - 1] = true; + } + *nentries = count; + /* elog(NOTICE, "%d", *nentries); for (i = 0; i < *nentries; i++) { @@ -451,7 +484,7 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) ptr = (unsigned char *)VARDATA_ANY(DatumGetPointer(addInfo[i])); size = VARSIZE_ANY_EXHDR(DatumGetPointer(addInfo[i])); -/* elog(NOTICE, "%d %s", i, DatumGetPointer(DirectFunctionCall1(byteaout, addInfo[i])));*/ + /*elog(NOTICE, "%d %s", i, DatumGetPointer(DirectFunctionCall1(byteaout, addInfo[i])));*/ if (size == 0) { @@ -473,7 +506,7 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) index = num - 1; -/* elog(NOTICE, "a %d %d %d %d", i, index, sum, not);*/ + /*elog(NOTICE, "a %d %d %d %d", i, index, sum, not);*/ if (child) { @@ -501,7 +534,10 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) nodes[index].sum++; } - child = &nodes[index]; + if (index == 0) + child = NULL; + else + child = &nodes[index]; } } @@ -511,7 +547,7 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) } else { - /* for (i = 0; i < lastIndex; i++) + /*for (i = 0; i < lastIndex; i++) { elog(NOTICE, "s %d %d %d %d", i, nodes[i].sum, nodes[i].parent, nodes[i].not); }*/ From 4d6d293356c5199513c1d50c8f071f06d053724c Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Mon, 16 May 2016 14:12:02 +0300 Subject: [PATCH 10/13] Basic regression tests. --- Makefile | 2 +- expected/ruminv.out | 269 ++++++++++++++++++++++++++++++++++++++++++++ rumtsquery.c | 18 ++- sql/ruminv.sql | 45 ++++++++ 4 files changed, 327 insertions(+), 7 deletions(-) create mode 100644 expected/ruminv.out create mode 100644 sql/ruminv.sql diff --git a/Makefile b/Makefile index c14a31b4e0..fa53d6a848 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ EXTENSION = rum DATA = rum--1.0.sql PGFILEDESC = "RUM index access method" -REGRESS = rum +REGRESS = rum ruminv ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/expected/ruminv.out b/expected/ruminv.out new file mode 100644 index 0000000000..45d8581d10 --- /dev/null +++ b/expected/ruminv.out @@ -0,0 +1,269 @@ +CREATE TABLE test_invrum(q tsquery); +INSERT INTO test_invrum VALUES ('a|b'::tsquery); +INSERT INTO test_invrum VALUES ('a&b'::tsquery); +INSERT INTO test_invrum VALUES ('!(a|b)'::tsquery); +INSERT INTO test_invrum VALUES ('!(a&b)'::tsquery); +INSERT INTO test_invrum VALUES ('!a|b'::tsquery); +INSERT INTO test_invrum VALUES ('a&!b'::tsquery); +INSERT INTO test_invrum VALUES ('(a|b)&c'::tsquery); +INSERT INTO test_invrum VALUES ('(!(a|b))&c'::tsquery); +INSERT INTO test_invrum VALUES ('(a|b)&(c|d)'::tsquery); +INSERT INTO test_invrum VALUES ('!a'::tsquery); +SELECT * FROM test_invrum WHERE q @@ ''::tsvector; + q +--- +(0 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a'::tsvector; + q +---------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' +(3 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b'::tsvector; + q +---------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + !'a' +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b'::tsvector; + q +------------ + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' +(3 rows) + +SELECT * FROM test_invrum WHERE q @@ 'c'::tsvector; + q +---------------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !( 'a' | 'b' ) & 'c' + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a c'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b c'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) + !'a' +(6 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b c'::tsvector; + q +------------------------------- + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'd'::tsvector; + q +---------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !'a' +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b d'::tsvector; + q +------------------------------- + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'c d'::tsvector; + q +---------------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !( 'a' | 'b' ) & 'c' + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +CREATE INDEX test_invrum_idx ON test_invrum USING rum(q); +SET enable_seqscan = OFF; +SELECT * FROM test_invrum WHERE q @@ ''::tsvector; + q +--- +(0 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a'::tsvector; + q +---------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' +(3 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b'::tsvector; + q +---------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + !'a' +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b'::tsvector; + q +------------ + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' +(3 rows) + +SELECT * FROM test_invrum WHERE q @@ 'c'::tsvector; + q +---------------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !( 'a' | 'b' ) & 'c' + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a c'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b c'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) + !'a' +(6 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b c'::tsvector; + q +------------------------------- + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'd'::tsvector; + q +---------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !'a' +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b d'::tsvector; + q +------------------------------- + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'c d'::tsvector; + q +---------------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !( 'a' | 'b' ) & 'c' + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + diff --git a/rumtsquery.c b/rumtsquery.c index 75ebd9abd1..24892a28cc 100644 --- a/rumtsquery.c +++ b/rumtsquery.c @@ -413,18 +413,20 @@ ruminv_extract_tsvector(PG_FUNCTION_ARGS) int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); Datum *entries = NULL; - *nentries = vector->size + 1; - *extra_data = NULL; - *ptr_partialmatch = NULL; *searchMode = GIN_SEARCH_MODE_DEFAULT; - entries = (Datum *) palloc(sizeof(Datum) * (*nentries)); - *nullFlags = (bool *) palloc(sizeof(bool) * (*nentries)); if (vector->size > 0) { int i; WordEntry *we = ARRPTR(vector); + *nentries = vector->size + 1; + *extra_data = NULL; + *ptr_partialmatch = NULL; + + entries = (Datum *) palloc(sizeof(Datum) * (*nentries)); + *nullFlags = (bool *) palloc(sizeof(bool) * (*nentries)); + for (i = 0; i < vector->size; i++) { text *txt; @@ -433,8 +435,12 @@ ruminv_extract_tsvector(PG_FUNCTION_ARGS) entries[i] = PointerGetDatum(txt); (*nullFlags)[i] = false; } + (*nullFlags)[*nentries - 1] = true; + } + else + { + *nentries = 0; } - (*nullFlags)[*nentries - 1] = true; PG_FREE_IF_COPY(vector, 0); PG_RETURN_POINTER(entries); } diff --git a/sql/ruminv.sql b/sql/ruminv.sql new file mode 100644 index 0000000000..1fbd8cebff --- /dev/null +++ b/sql/ruminv.sql @@ -0,0 +1,45 @@ +CREATE TABLE test_invrum(q tsquery); + +INSERT INTO test_invrum VALUES ('a|b'::tsquery); +INSERT INTO test_invrum VALUES ('a&b'::tsquery); +INSERT INTO test_invrum VALUES ('!(a|b)'::tsquery); +INSERT INTO test_invrum VALUES ('!(a&b)'::tsquery); +INSERT INTO test_invrum VALUES ('!a|b'::tsquery); +INSERT INTO test_invrum VALUES ('a&!b'::tsquery); +INSERT INTO test_invrum VALUES ('(a|b)&c'::tsquery); +INSERT INTO test_invrum VALUES ('(!(a|b))&c'::tsquery); +INSERT INTO test_invrum VALUES ('(a|b)&(c|d)'::tsquery); +INSERT INTO test_invrum VALUES ('!a'::tsquery); + +SELECT * FROM test_invrum WHERE q @@ ''::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'd'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'c d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; + +CREATE INDEX test_invrum_idx ON test_invrum USING rum(q); +SET enable_seqscan = OFF; + +SELECT * FROM test_invrum WHERE q @@ ''::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'd'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'c d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; From b18d5ca47f72afb2f46499ef7ba420e38dce7ab3 Mon Sep 17 00:00:00 2001 From: Alexander Korotkov Date: Mon, 16 May 2016 14:15:08 +0300 Subject: [PATCH 11/13] Check for unsupported tsqueries. --- expected/ruminv.out | 4 ++++ rumtsquery.c | 5 +++++ sql/ruminv.sql | 3 +++ 3 files changed, 12 insertions(+) diff --git a/expected/ruminv.out b/expected/ruminv.out index 45d8581d10..08955cceeb 100644 --- a/expected/ruminv.out +++ b/expected/ruminv.out @@ -267,3 +267,7 @@ SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; ( 'a' | 'b' ) & ( 'c' | 'd' ) (5 rows) +INSERT INTO test_invrum VALUES ('a:*'::tsquery); +ERROR: Indexing of prefix tsqueries isn't supported yet +INSERT INTO test_invrum VALUES ('a <-> b'::tsquery); +ERROR: Indexing of phrase tsqueries isn't supported yet diff --git a/rumtsquery.c b/rumtsquery.c index 24892a28cc..9ecb01598c 100644 --- a/rumtsquery.c +++ b/rumtsquery.c @@ -73,6 +73,10 @@ make_query_item_wrap(QueryItem *item, QueryItemWrap *parent, bool not) { QueryOperand *operand = (QueryOperand *) item; QueryItemWrap *wrap = add_child(parent); + + if (operand->prefix) + elog(ERROR, "Indexing of prefix tsqueries isn't supported yet"); + wrap->type = QI_VAL; wrap->distance = operand->distance; wrap->length = operand->length; @@ -111,6 +115,7 @@ make_query_item_wrap(QueryItem *item, QueryItemWrap *parent, bool not) } } case OP_PHRASE: + elog(ERROR, "Indexing of phrase tsqueries isn't supported yet"); default: elog(ERROR, "Invalid tsquery operator"); } diff --git a/sql/ruminv.sql b/sql/ruminv.sql index 1fbd8cebff..ec836fd165 100644 --- a/sql/ruminv.sql +++ b/sql/ruminv.sql @@ -43,3 +43,6 @@ SELECT * FROM test_invrum WHERE q @@ 'b d'::tsvector; SELECT * FROM test_invrum WHERE q @@ 'a b d'::tsvector; SELECT * FROM test_invrum WHERE q @@ 'c d'::tsvector; SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; + +INSERT INTO test_invrum VALUES ('a:*'::tsquery); +INSERT INTO test_invrum VALUES ('a <-> b'::tsquery); From 376610b61ce9d8e836e09001e2cd0bb9c921eded Mon Sep 17 00:00:00 2001 From: Artur Zakirov Date: Thu, 19 May 2016 20:39:48 +0300 Subject: [PATCH 12/13] A lot of fixes from Teodor Sigaev and Alexander Korotkov --- .gitignore | 4 + Makefile | 7 +- README.md | 4 +- data/tsts.data | 508 ++++++++++++++++++++++++++++++++++ expected/orderby.out | 190 +++++++++++++ expected/rum.out | 8 +- expected/ruminv.out | 273 +++++++++++++++++++ expected/timestamp.out | 132 +++++++++ rum--1.0.sql | 157 ++++++++++- rum.h | 93 +++++-- rum_timestamp.c | 256 ++++++++++++++++++ rum_ts_utils.c | 53 +++- rumbtree.c | 6 +- rumdatapage.c | 128 ++++++++- rumget.c | 96 ++++++- ruminsert.c | 67 ++++- rumscan.c | 118 ++++---- rumsort.c | 6 + rumsort.h | 1 + rumtsquery.c | 600 +++++++++++++++++++++++++++++++++++++++++ rumutil.c | 112 +++++++- rumvalidate.c | 25 +- sql/orderby.sql | 56 ++++ sql/rum.sql | 6 +- sql/ruminv.sql | 48 ++++ sql/timestamp.sql | 41 +++ 26 files changed, 2869 insertions(+), 126 deletions(-) create mode 100644 .gitignore create mode 100644 data/tsts.data create mode 100644 expected/orderby.out create mode 100644 expected/ruminv.out create mode 100644 expected/timestamp.out create mode 100644 rum_timestamp.c create mode 100644 rumtsquery.c create mode 100644 sql/orderby.sql create mode 100644 sql/ruminv.sql create mode 100644 sql/timestamp.sql diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..5e29f4675d --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.deps +*.o +*.so +results \ No newline at end of file diff --git a/Makefile b/Makefile index 97180053b2..10f3a8ad21 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,17 @@ # contrib/rum/Makefile MODULE_big = rum -OBJS = rumsort.o rum_ts_utils.o \ +OBJS = rumsort.o rum_ts_utils.o rumtsquery.o \ rumbtree.o rumbulk.o rumdatapage.o \ rumentrypage.o rumfast.o rumget.o ruminsert.o \ - rumscan.o rumutil.o rumvacuum.o rumvalidate.o $(WIN32RES) + rumscan.o rumutil.o rumvacuum.o rumvalidate.o \ + rum_timestamp.o $(WIN32RES) EXTENSION = rum DATA = rum--1.0.sql PGFILEDESC = "RUM index access method" -REGRESS = rum +REGRESS = rum ruminv timestamp orderby ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/README.md b/README.md index 7523676811..3d0b9519f8 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ CREATE INDEX rumidx ON test_rum USING rum (a rum_tsvector_ops); And we can execute the following queries: ```sql -=> SELECT t, a >< to_tsquery('english', 'beautiful | place') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'beautiful | place') order by a >< to_tsquery('english', 'beautiful | place'); +=# SELECT t, a >< to_tsquery('english', 'beautiful | place') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'beautiful | place') order by a >< to_tsquery('english', 'beautiful | place'); t | rank ---------------------------------+----------- The situation is most beautiful | 0.0303964 @@ -75,7 +75,7 @@ And we can execute the following queries: It looks like a beautiful place | 0.0607927 (3 rows) -=> SELECT t, a >< to_tsquery('english', 'place | situation') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'place | situation') order by a >< to_tsquery('english', 'place | situation'); +=# SELECT t, a >< to_tsquery('english', 'place | situation') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'place | situation') order by a >< to_tsquery('english', 'place | situation'); t | rank ---------------------------------+----------- The situation is most beautiful | 0.0303964 diff --git a/data/tsts.data b/data/tsts.data new file mode 100644 index 0000000000..d77681507c --- /dev/null +++ b/data/tsts.data @@ -0,0 +1,508 @@ +1 2016-05-01 20:21:22.326724 +2 2016-05-01 21:21:22.326724 +3 2016-05-01 22:21:22.326724 +4 2016-05-01 23:21:22.326724 +5 2016-05-02 00:21:22.326724 +6 2016-05-02 01:21:22.326724 +7 2016-05-02 02:21:22.326724 +8 2016-05-02 03:21:22.326724 +9 'b9' 'c9' 'cc' 'cx' 'di' 'ec' 'eo' 'f5' 'fn' 'gd' 'ge' 'gh' 'hc' 'hy' 'i8' 'if' 'iq' 'ir' 'ix' 'ja' 'jl' 'jx' 'km' 'l5' 'l8' 'le' 'mm' 'oc' 'pr' 'qg' 'qo' 'rm' 'ro' 'rv' 'sm' 'td' 'th' 'tm' 'tx' 'u7' 'uc' 'ue' 'uk' 'ux' 'vw' 'wa' 'wb' 'wd' 'wh' 'wr' 'ww' 'xa' 'xj' 'xz' 'y4' 'yd' 'yl' 'ym' 'zf' 'zv' 2016-05-02 04:21:22.326724 +10 'a9' 'ag' 'bh' 'dk' 'dq' 'e3' 'e7' 'ea' 'ee' 'ej' 'et' 'ez' 'f1' 'gh' 'gn' 'gr' 'hb' 'hf' 'hh' 'hz' 'ip' 'iw' 'j7' 'jh' 'ki' 'kp' 'kx' 'ml' 'nu' 'ov' 'ow' 'pg' 'ph' 'pn' 'po' 'pu' 'qd' 'qe' 'qr' 'qs' 'qv' 'r2' 'r3' 'rb' 'rd' 'ry' 's3' 'sd' 'sr' 'su' 'sz' 'tp' 'ty' 'uh' 'un' 'v0' 'v4' 'w0' 'wa' 'wk' 'wq' 'wx' 'xe' 'y6' 'yd' 'yf' 'yj' 'yl' 'zt' 2016-05-02 05:21:22.326724 +11 'de' 'mc' 'q1' 'q8' 'qp' 'qt' 'rq' 'rv' 'sa' 'sn' 'u8' 'vs' 'w9' 'wo' 'wp' 'ww' 'yl' 2016-05-02 06:21:22.326724 +12 '4w' 'ad' 'ak' 'as' 'b3' 'bd' 'cy' 'dt' 'du' 'dy' 'e2' 'e4' 'en' 'es' 'et' 'ew' 'ex' 'f2' 'fl' 'fr' 'ft' 'gv' 'h1' 'ha' 'hd' 'hh' 'hs' 'hv' 'ic' 'io' 'ja' 'jm' 'k2' 'ku' 'kw' 'ld' 'ls' 'lx' 'ma' 'nu' 'ny' 'ov' 'pm' 'ps' 'q6' 'qa' 'qd' 'qo' 'qs' 'qv' 'qy' 'ra' 'rs' 'rt' 'rv' 'rw' 's8' 'sa' 'sw' 'tj' 'ug' 'ui' 'ur' 'ut' 'ux' 'v6' 'vt' 'w4' 'w7' 'wa' 'wb' 'wc' 'wg' 'wh' 'wi' 'wt' 'wu' 'wy' 'wz' 'x2' 'xb' 'xm' 'yg' 'yh' 'yi' 'yu' 'yy' 'z5' 'zg' 'zo' 2016-05-02 07:21:22.326724 +13 '2r' '7g' 'a1' 'ar' 'av' 'b8' 'dj' 'dr' 'e7' 'ee' 'fd' 'fn' 'fp' 'gf' 'gp' 'gq' 'gw' 'jm' 'jv' 'ke' 'ki' 'kl' 'l9' 'lc' 'lg' 'lh' 'nm' 'oc' 'oq' 'px' 'q6' 'qe' 'qh' 'qu' 'rs' 's8' 't0' 'tt' 'tw' 'u2' 'uu' 'uy' 'vr' 'vs' 'wg' 'wt' 'ww' 'x3' 'x5' 'xu' 'yw' 'zh' 'zp' 'zt' 2016-05-02 08:21:22.326724 +14 'a9' 'av' 'aw' 'dj' 'e0' 'ej' 'er' 'eu' 'ez' 'fr' 'gq' 'hh' 'hp' 'ko' 'kq' 'lw' 'n8' 'pe' 'pl' 'pn' 'pp' 'pv' 'q9' 'qc' 'qj' 'qs' 'qw' 'r6' 'rl' 'tq' 'u5' 'uu' 'uz' 'wn' 'wp' 'wr' 'zo' 2016-05-02 09:21:22.326724 +15 '0p' 'ab' 'as' 'az' 'b5' 'bl' 'cn' 'cr' 'ct' 'cu' 'dq' 'dr' 'em' 'eq' 'fe' 'fj' 'fk' 'fy' 'gn' 'gx' 'h9' 'hg' 'hk' 'hr' 'io' 'iy' 'jh' 'la' 'lb' 'll' 'ln' 'lq' 'me' 'mp' 'mr' 'mv' 'ns' 'od' 'op' 'os' 'ov' 'po' 'pv' 'qa' 'qn' 'qp' 'qs' 'qt' 'r0' 'rc' 'rd' 'rl' 's5' 'si' 't7' 'tc' 'th' 'u0' 'um' 'uz' 'vg' 'vo' 'wa' 'wp' 'wu' 'xk' 'yg' 'yh' 'yi' 'yo' 'yy' 'zy' 2016-05-02 10:21:22.326724 +16 'a3' 'a5' 'ai' 'av' 'ay' 'c3' 'cp' 'd3' 'dn' 'du' 'dz' 'e9' 'ed' 'ei' 'ek' 'em' 'eo' 'ev' 'f2' 'gh' 'gl' 'gy' 'h6' 'h8' 'hb' 'he' 'hg' 'hm' 'hp' 'hy' 'hz' 'id' 'ie' 'it' 'ix' 'j2' 'ja' 'jc' 'jf' 'k5' 'ki' 'kk' 'kv' 'l4' 'l6' 'ls' 'm9' 'mg' 'mt' 'nb' 'ng' 'oh' 'ot' 'oz' 'pw' 'py' 'pz' 'q0' 'q3' 'q8' 'qb' 'qh' 'qj' 'qk' 'ql' 'qp' 'qq' 'qr' 'qt' 'qw' 'rb' 'rz' 's3' 'sl' 'tb' 'u0' 'u5' 'uo' 'us' 'vc' 'wg' 'wh' 'wj' 'wr' 'wt' 'wu' 'wy' 'x1' 'y1' 'yh' 'z7' 'zf' 'zs' 'zu' 2016-05-02 11:21:22.326724 +17 'bt' 'c5' 'd1' 'd7' 'g6' 'gk' 'ib' 'iv' 'ml' 'om' 'qd' 'qg' 'ql' 'r3' 'rc' 'u4' 'uh' 'uo' 'wh' 'y0' 'yn' 'yz' 'zo' 2016-05-02 12:21:22.326724 +18 '7a' 'a3' 'dl' 'fo' 'hb' 'ki' 'kk' 'lo' 'pl' 'q5' 'qy' 'r1' 'rj' 'sy' 'tv' 'w3' 'wm' 'wn' 'yj' 'yr' 'zi' 'zq' 2016-05-02 13:21:22.326724 +19 'av' 'aw' 'bl' 'bt' 'cj' 'cx' 'df' 'ea' 'ed' 'ee' 'ef' 'ew' 'f5' 'fq' 'ft' 'fu' 'g6' 'gl' 'gs' 'ha' 'hj' 'hr' 'i8' 'ia' 'ic' 'ir' 'iz' 'j0' 'j5' 'jp' 'ju' 'k6' 'ki' 'lk' 'lz' 'mh' 'nl' 'o4' 'ob' 'oo' 'op' 'pf' 'po' 'pq' 'pr' 'q2' 'q3' 'q6' 'qd' 'qj' 'qk' 'qn' 'qt' 'qw' 'ro' 'rr' 'ru' 'rv' 'sc' 'sw' 'sy' 'sz' 't2' 'tb' 'tg' 'tl' 'tp' 'tv' 'u4' 'u9' 'um' 'uu' 'vj' 'vt' 'vu' 'wg' 'wh' 'wj' 'wo' 'wq' 'wt' 'xd' 'y1' 'yc' 'ye' 'yl' 'yw' 'zb' 'zd' 'zm' 'zr' 2016-05-02 14:21:22.326724 +20 'ja' 'k2' 'ko' 'o6' 'ob' 'ps' 't9' 'tz' 'uk' 'um' 'vv' 2016-05-02 15:21:22.326724 +21 'dq' 'ed' 'el' 'fg' 'fv' 'gh' 'hx' 'i2' 'iv' 'jc' 'la' 'ld' 'nb' 'oa' 'of' 'oi' 'py' 'q7' 'qe' 'qr' 'qs' 'qw' 'qx' 'qz' 'rl' 'rr' 'sp' 't3' 'te' 'ti' 'tu' 'ue' 'w0' 'wa' 'wq' 'wu' 'xn' 2016-05-02 16:21:22.326724 +22 'ah' 'ao' 'aq' 'aw' 'az' 'b5' 'bd' 'bg' 'bj' 'bq' 'bu' 'cc' 'cv' 'dt' 'e0' 'e2' 'e4' 'ec' 'eu' 'fd' 'fi' 'gg' 'gu' 'ha' 'hb' 'he' 'hj' 'hy' 'ih' 'in' 'iv' 'ji' 'js' 'ks' 'l1' 'la' 'lg' 'mh' 'mi' 'mk' 'o7' 'oh' 'oj' 'om' 'on' 'op' 'oq' 'or' 'os' 'p1' 'pe' 'pq' 'pv' 'q8' 'qe' 'qg' 'qj' 'ql' 'qm' 'qn' 'qo' 'qr' 'qs' 'qt' 'qv' 'qy' 'r1' 'r9' 'rb' 'rl' 'ro' 'ru' 'rw' 'ry' 's0' 's6' 'sn' 't0' 't1' 'tr' 'ty' 'u0' 'u8' 'up' 'uv' 'vn' 'wb' 'wg' 'ww' 'x1' 'yb' 'yj' 'ym' 'yx' 'zz' 2016-05-02 17:21:22.326724 +23 'a9' 'aa' 'aq' 'ay' 'bb' 'cg' 'db' 'de' 'do' 'e4' 'e5' 'e9' 'ek' 'ep' 'er' 'fi' 'fp' 'gb' 'hj' 'hk' 'hw' 'ir' 'iy' 'l1' 'lj' 'mb' 'nq' 'oi' 'pj' 'px' 'q9' 'qe' 'qu' 'qw' 'qz' 'rh' 'rl' 'sv' 'tc' 'ts' 'tv' 'uo' 'ur' 'vh' 'vp' 'w7' 'wf' 'wh' 'wo' 'wr' 'xu' 'z4' 'zc' 'zv' 2016-05-02 18:21:22.326724 +24 '6u' 'a5' 'ai' 'au' 'b1' 'be' 'bg' 'by' 'ce' 'co' 'cw' 'db' 'dw' 'e8' 'ec' 'em' 'en' 'er' 'f7' 'fh' 'fu' 'gb' 'gq' 'h4' 'hf' 'hy' 'hz' 'ig' 'iq' 'kj' 'kk' 'kp' 'ky' 'ld' 'lr' 'na' 'nd' 'ny' 'o8' 'o9' 'og' 'oh' 'ok' 'ot' 'ou' 'ow' 'ox' 'p8' 'pl' 'pp' 'ps' 'px' 'q0' 'q1' 'qa' 'qm' 'qr' 'qs' 'qx' 'r9' 'rd' 're' 'rf' 'rl' 'rn' 'rr' 'ru' 'rz' 'sc' 'sn' 'so' 't1' 'tl' 'tr' 'ts' 'tt' 'tx' 'ub' 'um' 'un' 'uo' 'ut' 'vr' 'wa' 'wb' 'wc' 'wj' 'wl' 'wp' 'wq' 'ws' 'wx' 'wz' 'yp' 'yt' 'yy' 'zc' 2016-05-02 19:21:22.326724 +25 '2v' '7j' '7r' 'a7' 'aj' 'av' 'br' 'cg' 'd8' 'ei' 'en' 'ez' 'f5' 'fb' 'go' 'h0' 'hf' 'hk' 'hm' 'i9' 'ia' 'ig' 'ik' 'im' 'iq' 'it' 'iv' 'ja' 'je' 'ji' 'ks' 'lo' 'mo' 'mq' 'nj' 'o5' 'of' 'on' 'ot' 'ox' 'p1' 'p6' 'pb' 'ph' 'pj' 'pn' 'pp' 'pq' 'pr' 'qa' 'qh' 'qj' 'qm' 'qn' 'qo' 'qu' 'r2' 'ro' 's1' 'sa' 'se' 'sp' 't6' 'ti' 'tu' 'u2' 'ug' 'uh' 'ui' 'ur' 'uy' 'v9' 'wl' 'wn' 'wt' 'wv' 'wy' 'wz' 'xm' 'xs' 'ya' 'yi' 'yu' 'z8' 'zl' 2016-05-02 20:21:22.326724 +26 '27' '2e' '2p' 'aa' 'eh' 'en' 'eq' 'eu' 'ew' 'ff' 'g8' 'hv' 'mx' 'oi' 'pd' 'q3' 'qs' 'rl' 'sa' 'sw' 'te' 'tn' 'ty' 'uh' 'uo' 'wb' 'wh' 'wy' 'xx' 'z8' 'zt' 2016-05-02 21:21:22.326724 +27 'c0' 'd4' 'dw' 'ef' 'em' 'j2' 'kj' 'ql' 'rn' 'ta' 'uk' 'vv' 'wf' 'y9' 'zq' 2016-05-02 22:21:22.326724 +28 'by' 'c1' 'da' 'ec' 'ej' 'ew' 'g4' 'gq' 'gt' 'ir' 'jv' 'kl' 'kz' 'l5' 'nm' 'oi' 'on' 'pc' 'q2' 'qx' 'qz' 'rd' 'se' 'sq' 'ti' 'tk' 'to' 'ur' 'w8' 'wm' 'wr' 'y1' 'yn' 'yp' 'zy' 2016-05-02 23:21:22.326724 +29 '6d' '8t' 'bl' 'gu' 'iu' 'kd' 'kj' 'kq' 'nw' 'nx' 'o6' 'oa' 'qk' 'ql' 'rd' 'ri' 'uc' 'vi' 'wy' 'xq' 'z2' 2016-05-03 00:21:22.326724 +30 'ao' 'ei' 'ep' 'fc' 'fm' 'gr' 'ha' 'he' 'i5' 'ia' 'if' 'kh' 'lv' 'ml' 'ms' 'n9' 'nh' 'nl' 'oe' 'of' 'ow' 'pb' 'pu' 'pw' 'q0' 'qr' 'qt' 'qu' 'qv' 'qw' 'ra' 'sv' 'ti' 'u0' 'u4' 'ug' 'uq' 'vx' 'w1' 'wy' 'x1' 'xh' 'xz' 2016-05-03 01:21:22.326724 +31 'ah' 'ai' 'b8' 'ci' 'd2' 'ea' 'ed' 'en' 'et' 'fj' 'fv' 'ht' 'iu' 'iv' 'iw' 'jb' 'jd' 'ky' 'mj' 'nt' 'q1' 'qb' 'qe' 'ql' 'qz' 'ru' 'sg' 'tl' 'tm' 'tn' 'tw' 'ty' 'u1' 'ud' 'us' 'wj' 'y1' 'zd' 'zj' 2016-05-03 02:21:22.326724 +32 '5z' 'ad' 'al' 'ax' 'b4' 'bx' 'd7' 'da' 'dp' 'dz' 'e3' 'ef' 'en' 'es' 'et' 'ev' 'ex' 'f6' 'fj' 'fr' 'ft' 'fv' 'h9' 'hn' 'hx' 'i0' 'ii' 'it' 'jf' 'jk' 'jl' 'ka' 'kr' 'l9' 'lf' 'm3' 'mm' 'nc' 'nd' 'nr' 'o8' 'oe' 'ok' 'om' 'pd' 'ph' 'pz' 'qd' 'qf' 'qo' 'qp' 'qw' 'qx' 'qy' 'r6' 'rc' 'rg' 'rw' 'rx' 's8' 'se' 't3' 'tb' 'tc' 'ti' 'tl' 'to' 'tw' 'ub' 'ue' 'uf' 'uy' 'vf' 'vh' 'vr' 'w0' 'w9' 'wd' 'wf' 'wh' 'wp' 'wu' 'x0' 'x8' 'y2' 'ye' 'yq' 'ys' 'yt' 'z6' 'zf' 'zp' 2016-05-03 03:21:22.326724 +33 'az' 'bl' 'dl' 'ec' 'gg' 'jq' 'o0' 'oj' 'q8' 'qq' 'ta' 'un' 'wb' 'wo' 'xu' 2016-05-03 04:21:22.326724 +34 '9a' 'aa' 'bo' 'br' 'bv' 'bz' 'c1' 'c7' 'cz' 'db' 'dh' 'dj' 'e4' 'e8' 'eh' 'ew' 'gd' 'gg' 'gp' 'gu' 'gx' 'hc' 'ho' 'ht' 'hv' 'ia' 'ii' 'ij' 'ir' 'iw' 'j5' 'jk' 'jv' 'la' 'lc' 'lp' 'lt' 'lw' 'na' 'nt' 'nv' 'nw' 'o8' 'o9' 'ou' 'oy' 'p0' 'pc' 'pq' 'py' 'q4' 'q8' 'qi' 'qk' 'qn' 'qr' 'qt' 'qw' 'r5' 'r8' 're' 'rj' 'rt' 'rv' 'ry' 'rz' 's8' 'sg' 'sv' 'ta' 'td' 'tl' 'tm' 'ul' 'up' 'ut' 'uz' 'vf' 'w1' 'wb' 'wc' 'wd' 'wf' 'wi' 'wl' 'wu' 'x8' 'xg' 'xm' 'y1' 'yl' 'yr' 'ys' 'z8' 2016-05-03 05:21:22.326724 +35 '80' 'ak' 'al' 'an' 'aq' 'as' 'at' 'bg' 'bn' 'bq' 'bx' 'd4' 'db' 'dg' 'dq' 'dv' 'ef' 'ej' 'en' 'eu' 'ex' 'f9' 'fu' 'g1' 'g2' 'g3' 'gj' 'gn' 'gs' 'gt' 'gx' 'h4' 'h8' 'hb' 'hn' 'hx' 'hy' 'i5' 'im' 'ji' 'jo' 'jy' 'k1' 'kb' 'kh' 'kp' 'kv' 'l4' 'lf' 'lt' 'ml' 'mv' 'na' 'ny' 'o2' 'oj' 'oq' 'or' 'os' 'pp' 'q3' 'q8' 'qc' 'qf' 'qp' 'qt' 'qv' 'rc' 'rg' 'rm' 's4' 'sa' 'sk' 'sp' 'su' 'sv' 'sy' 't5' 'te' 'tm' 'tn' 'to' 'tt' 'tw' 'tx' 'tz' 'ue' 'ug' 'ul' 'un' 'uq' 'us' 'v3' 'wd' 'wr' 'wu' 'wy' 'x3' 'x8' 'xk' 'yf' 'yj' 'yk' 'yp' 'z0' 2016-05-03 06:21:22.326724 +36 'cq' 'er' 'hy' 'ie' 'jg' 'ke' 'lw' 'mf' 'p0' 'pe' 'pv' 'qk' 'qt' 'qy' 'sh' 'th' 'ti' 'ue' 'w5' 'wl' 'y4' 'y5' 'yi' 'yy' 'za' 2016-05-03 07:21:22.326724 +37 '3d' 'at' 'aw' 'bt' 'by' 'cg' 'dk' 'do' 'ec' 'eu' 'gv' 'is' 'k9' 'kb' 'kc' 'me' 'mm' 'o5' 'oe' 'pr' 'qa' 'qn' 'rc' 'rl' 'rn' 'rw' 'sc' 'sk' 'st' 't1' 't7' 'td' 'to' 'ue' 'up' 'ur' 'uy' 've' 'wi' 'xf' 'xu' 'yf' 'z9' 'zo' 'zr' 2016-05-03 08:21:22.326724 +38 '0z' 'ae' 'br' 'cr' 'dd' 'di' 'du' 'e7' 'ea' 'eb' 'ee' 'ej' 'em' 'eo' 'fo' 'fs' 'gc' 'gf' 'gj' 'hu' 'iv' 'kc' 'lb' 'nw' 'o9' 'ov' 'oz' 'pq' 'qq' 'ro' 'rr' 'rt' 'ru' 'rw' 's4' 'te' 'tm' 'tq' 'um' 'v7' 'va' 'vi' 'vr' 'wb' 'wg' 'wo' 'wp' 'wr' 'ww' 'wy' 'xv' 'y1' 'y9' 'ya' 'ym' 'yt' 'yu' 'z5' 'zl' 'zs' 2016-05-03 09:21:22.326724 +39 'a7' 'ab' 'ba' 'bv' 'e7' 'ea' 'eh' 'ep' 'er' 'es' 'f4' 'gq' 'hg' 'hi' 'hs' 'ip' 'iw' 'je' 'k0' 'kh' 'kn' 'ko' 'lo' 'ly' 'm6' 'n2' 'o1' 'oh' 'on' 'op' 'os' 'pi' 'q6' 'q9' 'qh' 'qj' 'qm' 'qn' 'qp' 'qs' 's7' 'sa' 'sb' 'sc' 'tn' 'tq' 'tt' 'u4' 'un' 'ux' 've' 'w1' 'wd' 'wf' 'wj' 'wl' 'wr' 'wu' 'wy' 'xf' 'xx' 'y3' 'y9' 'yk' 'zh' 'zn' 'zo' 'zr' 2016-05-03 10:21:22.326724 +40 '19' 'a4' 'a7' 'ae' 'ar' 'as' 'bj' 'bp' 'bv' 'cp' 'd8' 'dp' 'e0' 'e8' 'ea' 'ee' 'ek' 'ep' 'es' 'et' 'ez' 'fp' 'gu' 'h2' 'h4' 'he' 'hp' 'ii' 'im' 'in' 'lc' 'lk' 'lu' 'lv' 'ma' 'od' 'oe' 'p7' 'pm' 'q7' 'qa' 'qe' 'qf' 'qi' 'qr' 'qu' 'rc' 'rd' 're' 'rj' 'rl' 'ro' 'tj' 'tw' 'v4' 'vb' 'w7' 'wc' 'wg' 'wi' 'wm' 'wu' 'y4' 'yj' 'yv' 'z1' 'zt' 'zu' 2016-05-03 11:21:22.326724 +41 'at' 'b7' 'be' 'c3' 'cc' 'cd' 'cj' 'e8' 'eb' 'ei' 'fd' 'g8' 'gh' 'go' 'gv' 'if' 'ih' 'j7' 'jy' 'kj' 'lh' 'mc' 'oe' 'og' 'oy' 'oz' 'pm' 'qd' 'qe' 'qo' 'rg' 'rq' 'rx' 'se' 'su' 't3' 't6' 't9' 'u5' 'us' 'vw' 'w4' 'wb' 'wg' 'ws' 'xw' 'yu' 'yx' 'zj' 2016-05-03 12:21:22.326724 +42 '19' '1t' 'aa' 'ah' 'ak' 'an' 'aq' 'bx' 'cc' 'dd' 'ed' 'ee' 'ei' 'ej' 'en' 'er' 'es' 'eu' 'fj' 'fs' 'fy' 'gd' 'gs' 'hw' 'ie' 'ig' 'ix' 'jc' 'jj' 'jk' 'jq' 'kb' 'kv' 'li' 'mr' 'no' 'nq' 'o9' 'oo' 'os' 'oy' 'p2' 'pd' 'ph' 'pn' 'pp' 'ql' 'qr' 'qy' 'r1' 'rb' 'rd' 'rn' 'ru' 's9' 'sf' 'tk' 'to' 'u1' 'u5' 'ub' 'ud' 'uf' 'vh' 'vx' 'w1' 'w8' 'wg' 'wn' 'wq' 'wu' 'wy' 'xc' 'xm' 'xw' 'y8' 'yk' 'yr' 'z3' 'zm' 2016-05-03 13:21:22.326724 +43 '7h' 'an' 'b6' 'cl' 'e1' 'e4' 'en' 'ey' 'ff' 'gn' 'h7' 'ht' 'i5' 'i9' 'ia' 'iw' 'jx' 'kl' 'ku' 'op' 'pe' 'pt' 'qb' 'ql' 'qp' 'qy' 'ri' 'se' 'si' 'ta' 'ti' 'tl' 'tp' 'uo' 'vr' 'w4' 'wn' 'xi' 'xs' 'ym' 2016-05-03 14:21:22.326724 +44 'bv' 'cv' 'dy' 'ec' 'ef' 'eg' 'eh' 'eu' 'ew' 'fc' 'fn' 'fs' 'g9' 'hj' 'hk' 'i3' 'iu' 'ja' 'k6' 'l1' 'la' 'nl' 'oh' 'oi' 'oy' 'pc' 'qe' 'ql' 'qq' 'qs' 'qu' 're' 'sf' 'sq' 'tb' 'up' 'w1' 'w2' 'wg' 'wi' 'xd' 'xt' 'xv' 'yf' 'zn' 'zz' 2016-05-03 15:21:22.326724 +45 '2f' 'af' 'al' 'at' 'bc' 'bo' 'cd' 'd6' 'dk' 'dp' 'dy' 'ea' 'eu' 'ev' 'ey' 'fe' 'ft' 'gd' 'gi' 'ha' 'hm' 'hr' 'i6' 'im' 'is' 'iz' 'jb' 'jt' 'ju' 'li' 'm4' 'my' 'na' 'nm' 'nu' 'oh' 'p4' 'pa' 'pn' 'po' 'qk' 'qz' 'r4' 're' 'rf' 'rs' 'sm' 'tb' 'ti' 'to' 'tw' 'u7' 'ut' 'vz' 'w3' 'w5' 'w9' 'wc' 'wj' 'wq' 'wr' 'x7' 'xg' 'xz' 'ye' 'yx' 'ze' 'zo' 2016-05-03 16:21:22.326724 +46 '2l' '55' 'aq' 'aw' 'cm' 'dq' 'eb' 'fl' 'fo' 'fp' 'fy' 'gk' 'hy' 'ic' 'iu' 'jc' 'jl' 'lc' 'mb' 'nx' 'oe' 'oo' 'oz' 'p5' 'pa' 'pf' 'qr' 'rj' 'sc' 'sz' 'td' 'tz' 'u1' 'vq' 'wu' 'xb' 'xx' 'y0' 'za' 'zc' 2016-05-03 17:21:22.326724 +47 'bt' 'dy' 'gy' 'hg' 'i7' 'it' 'jv' 'lh' 'ox' 'qo' 'ri' 's3' 'ss' 'u1' 'uq' 'wr' 2016-05-03 18:21:22.326724 +48 '3v' 'ab' 'at' 'bn' 'bz' 'cx' 'eh' 'gd' 'hd' 'hr' 'i9' 'j8' 'jg' 'jx' 'lf' 'ng' 'oj' 'ov' 'oz' 'pn' 'ps' 'qq' 'qr' 'ro' 'rt' 'u9' 'up' 'uz' 'yl' 'yw' 'zi' 2016-05-03 19:21:22.326724 +49 '5y' 'af' 'av' 'az' 'cj' 'cp' 'cq' 'dk' 'e6' 'e7' 'ea' 'ec' 'ee' 'ei' 'ek' 'em' 'ga' 'gk' 'gw' 'hc' 'id' 'ie' 'kp' 'kv' 'lw' 'm0' 'mf' 'mn' 'mw' 'ny' 'ob' 'ol' 'p9' 'ph' 'pk' 'pp' 'pw' 'q0' 'qg' 'qi' 'ql' 'qv' 'qx' 'r3' 'rg' 'ry' 'se' 'sh' 'sq' 'sz' 't1' 'tu' 'u2' 'uh' 'uj' 'uk' 'ut' 'w2' 'wb' 'ww' 'wx' 'yf' 'yo' 'zr' 2016-05-03 20:21:22.326724 +50 '18' 'a6' 'af' 'ax' 'du' 'dy' 'e7' 'ei' 'ej' 'es' 'eu' 'fj' 'gt' 'hn' 'hu' 'ky' 'le' 'lo' 'm8' 'nb' 'of' 'oj' 'ok' 'op' 'oz' 'pw' 'q7' 'qi' 'qu' 'r4' 'rp' 's7' 'sk' 'tb' 'tf' 'tp' 'ua' 'um' 'up' 'ur' 'uz' 'w4' 'wc' 'wn' 'wv' 'wx' 'wy' 'yc' 'yq' 'yx' 2016-05-03 21:21:22.326724 +51 'ag' 'ay' 'b6' 'bk' 'c6' 'cp' 'da' 'ea' 'eh' 'es' 'f1' 'fj' 'fv' 'gb' 'gn' 'ic' 'id' 'in' 'iq' 'iw' 'kx' 'ly' 'n7' 'o5' 'oc' 'og' 'op' 'os' 'pa' 'pr' 'q4' 'q6' 'qf' 'qg' 'qh' 'qp' 'qq' 'qu' 'qw' 'qy' 'qz' 'rj' 'rm' 'ro' 's1' 'w2' 'we' 'wl' 'wn' 'wp' 'yj' 'yp' 'yx' 2016-05-03 22:21:22.326724 +52 'cr' 'd3' 'e5' 'ei' 'fb' 'gd' 'gx' 'hm' 'it' 'jp' 'ku' 'lf' 'lh' 'lp' 'ma' 'md' 'nt' 'o9' 'ot' 'pk' 'pm' 'ps' 'qc' 'qi' 'qp' 'qt' 'ra' 'rk' 'rq' 'rt' 'tb' 'tc' 'tg' 'to' 'u8' 'vg' 'vj' 'vt' 'w0' 'wr' 'wx' 'yd' 'zx' 2016-05-03 23:21:22.326724 +53 '4z' 'a8' 'ds' 'dv' 'dx' 'eq' 'f1' 'fs' 'gs' 'h9' 'hm' 'hp' 'hu' 'hz' 'i6' 'ip' 'is' 'j4' 'jf' 'k3' 'ku' 'lc' 'le' 'lj' 'nb' 'o6' 'ob' 'of' 'p6' 'pj' 'pm' 'ps' 'qc' 'qf' 'qh' 'ql' 'qr' 'qu' 'qx' 'r3' 'rf' 'ri' 's1' 's4' 'sr' 'st' 'sw' 'tb' 'tc' 'tg' 'tr' 'tz' 'ug' 'uq' 'uu' 'uv' 'uw' 'uz' 'vl' 'w2' 'w4' 'wq' 'wx' 'xe' 'xv' 'xx' 'y6' 'yg' 'yn' 'yp' 'yr' 2016-05-04 00:21:22.326724 +54 '70' 'ai' 'bj' 'bm' 'cj' 'ct' 'eb' 'ef' 'ek' 'es' 'fg' 'gh' 'gu' 'hg' 'ia' 'iu' 'iw' 'jt' 'ko' 'kq' 'mb' 'n1' 'o1' 'ox' 'pa' 'pr' 'q4' 'qd' 'qo' 'qr' 'qt' 'qw' 'qz' 'r3' 'r5' 'rg' 'ri' 'rj' 'rn' 'rp' 's7' 'sn' 't9' 'tq' 'tx' 'ty' 'uj' 'v0' 'vt' 'vz' 'w1' 'ww' 'xb' 'z6' 'zi' 2016-05-04 01:21:22.326724 +55 'aa' 'al' 'e9' 'hm' 'ir' 'kc' 'l0' 'pi' 'po' 'qa' 'qk' 'r0' 'rd' 'rz' 'se' 'sr' 'vp' 'w6' 'w8' 'yd' 'yk' 2016-05-04 02:21:22.326724 +56 '27' '3p' '6r' 'a8' 'an' 'ap' 'au' 'ay' 'bi' 'c6' 'cy' 'dh' 'ds' 'eg' 'eh' 'ej' 'es' 'eu' 'fm' 'fp' 'fq' 'fx' 'g2' 'gg' 'gv' 'hz' 'ij' 'il' 'iq' 'iw' 'j8' 'j9' 'k3' 'kr' 'lq' 'm7' 'm9' 'mj' 'n0' 'n6' 'nr' 'nx' 'ox' 'pq' 'qc' 'qd' 'qe' 'qg' 'qh' 'qj' 'qk' 'qm' 'qn' 'qr' 'qu' 'qv' 'qw' 'qx' 're' 'rg' 'rr' 'rz' 's1' 'sj' 'sl' 'sy' 't4' 't7' 'tb' 'tr' 'uc' 'un' 'uq' 'ut' 'vf' 'w7' 'w9' 'wg' 'wh' 'wp' 'wq' 'ws' 'wx' 'x8' 'xp' 'xy' 'y0' 'y1' 'ya' 'yu' 'yz' 'z2' 'z5' 'zz' 2016-05-04 03:21:22.326724 +57 'a7' 'a9' 'al' 'aq' 'ar' 'as' 'b2' 'cj' 'cl' 'co' 'cu' 'cv' 'dc' 'e3' 'e5' 'e7' 'ea' 'ee' 'eh' 'en' 'es' 'ev' 'ez' 'fl' 'fp' 'gm' 'hm' 'it' 'iu' 'iv' 'j8' 'jd' 'jo' 'kn' 'ko' 'ky' 'ln' 'mr' 'nc' 'o2' 'ok' 'ot' 'pc' 'pe' 'pz' 'q2' 'qe' 'qi' 'qq' 'qr' 'qs' 'r8' 'rm' 'rn' 'ro' 'rx' 'sz' 'te' 'tn' 'tq' 'tx' 'u1' 'ub' 'ul' 'up' 'uq' 'wf' 'wh' 'wk' 'wl' 'wo' 'wq' 'wt' 'wu' 'wv' 'xh' 'xz' 'y5' 'y9' 'yv' 'yx' 'yz' 'zj' 2016-05-04 04:21:22.326724 +58 'am' 'hw' 'jd' 'na' 'oe' 'pw' 'ql' 'qr' 's9' 'sk' 'u7' 'wa' 'wg' 'wu' 'x6' 'xr' 'yf' 2016-05-04 05:21:22.326724 +59 '2n' 'aa' 'ab' 'ae' 'ah' 'aj' 'as' 'av' 'ax' 'bc' 'be' 'bi' 'by' 'cg' 'ck' 'cm' 'cx' 'dq' 'dr' 'e5' 'ed' 'ef' 'ei' 'em' 'eu' 'fd' 'fq' 'fu' 'gd' 'gl' 'gs' 'he' 'ia' 'iz' 'jc' 'je' 'jt' 'k7' 'km' 'ko' 'l4' 'lh' 'mk' 'nl' 'ny' 'oa' 'oh' 'op' 'p1' 'pj' 'pm' 'ps' 'q1' 'q7' 'qc' 'qg' 'qj' 'qo' 'qr' 'qt' 'qu' 'qv' 'qw' 'rf' 'ri' 'rl' 'rw' 'sf' 'su' 't4' 'tc' 'tn' 'to' 'tq' 'tr' 'tt' 'tv' 'tz' 'u0' 'u6' 'ub' 'ug' 'up' 'uv' 'ux' 'uy' 'vv' 'w2' 'wx' 'xi' 'xu' 'xw' 'yj' 'yt' 'yw' 'z3' 'z7' 'zb' 2016-05-04 06:21:22.326724 +60 'bq' 'cc' 'cg' 'dc' 'fu' 'ho' 'ja' 'jt' 'km' 'lk' 'ln' 'o2' 'ob' 'ou' 'pd' 'ph' 'pn' 'pt' 'pz' 'q5' 'qa' 'ql' 'qq' 'qv' 'rk' 'rm' 'ry' 'sq' 'su' 'sx' 'uc' 'un' 'uo' 'ur' 'uy' 'w9' 'wj' 'wl' 'wm' 'yk' 'ym' 2016-05-04 07:21:22.326724 +61 'bl' 'bn' 'cy' 'ea' 'ed' 'ef' 'ei' 'em' 'er' 'es' 'fp' 'gb' 'hi' 'hp' 'if' 'io' 'ir' 'kw' 'lg' 'lo' 'ls' 'n8' 'nj' 'np' 'oa' 'of' 'oi' 'p6' 'pw' 'q3' 'q4' 'q8' 'qf' 'qh' 'qo' 'qw' 'rg' 'rt' 'sy' 't3' 'u8' 'ug' 'uo' 'uv' 'w6' 'we' 'wg' 'wl' 'wy' 'x9' 'yy' 'yz' 2016-05-04 08:21:22.326724 +62 'aa' 'af' 'ah' 'am' 'as' 'cd' 'ch' 'dd' 'dg' 'dr' 'e2' 'eb' 'eq' 'er' 'fb' 'fj' 'fk' 'fn' 'fx' 'gk' 'gs' 'h8' 'hg' 'hj' 'ia' 'ij' 'in' 'jw' 'l9' 'lj' 'lw' 'nk' 'nt' 'o1' 'ob' 'oo' 'p5' 'pd' 'pk' 'qq' 'sb' 'sd' 'sg' 'u4' 'wl' 'wq' 'wr' 'wu' 'xc' 'xj' 'xt' 'yq' 'zl' 2016-05-04 09:21:22.326724 +63 '10' '2p' '6r' 'a2' 'bb' 'c9' 'dq' 'dy' 'ec' 'eh' 'ej' 'ek' 'es' 'fl' 'g1' 'g9' 'gd' 'h1' 'h2' 'hj' 'hn' 'hw' 'ip' 'iz' 'jg' 'jl' 'l5' 'le' 'lk' 'me' 'mi' 'o0' 'o6' 'oa' 'oe' 'ox' 'pd' 'pm' 'pr' 'py' 'q1' 'qo' 'qq' 'qz' 'r3' 'rs' 'rw' 's7' 'sl' 'sw' 't2' 't4' 'td' 'tj' 'u3' 'ud' 'ur' 'uw' 'vy' 'wi' 'wj' 'wl' 'wq' 'ys' 'yu' 'z7' 'zp' 2016-05-04 10:21:22.326724 +64 'aj' 'cl' 'cy' 'ee' 'i3' 'ir' 'nx' 'oz' 'qm' 'qw' 'r2' 't2' 'u0' 'ub' 'ur' 'wj' 2016-05-04 11:21:22.326724 +65 '3f' '93' 'am' 'an' 'at' 'bj' 'bm' 'bw' 'c6' 'eb' 'ef' 'em' 'eq' 'es' 'g9' 'hc' 'hl' 'hm' 'kg' 'kt' 'mg' 'nq' 'op' 'ox' 'pc' 'py' 'qd' 'qn' 'qz' 'rb' 'rw' 's6' 'se' 'sr' 'sy' 'ts' 'u4' 'ud' 'un' 'w3' 'w8' 'wb' 'wf' 'wg' 'wy' 'x1' 'xl' 'xy' 'yk' 'yo' 'zp' 2016-05-04 12:21:22.326724 +66 'at' 'ec' 'hs' 'ix' 'ks' 'lq' 'ls' 'pk' 'qs' 'qy' 'rc' 'ut' 'xv' 'yo' 'ys' 2016-05-04 13:21:22.326724 +67 'ac' 'al' 'b3' 'bg' 'ci' 'cn' 'ea' 'f4' 'ih' 'ix' 'kt' 'p8' 'pk' 'qd' 'qe' 'qi' 'rb' 'rc' 'ru' 'te' 'th' 'tl' 'tn' 'vb' 'wb' 'ws' 'ww' 'xk' 'xo' 'yb' 'zf' 2016-05-04 14:21:22.326724 +68 'a5' 'ao' 'av' 'b0' 'bh' 'bk' 'cj' 'd0' 'do' 'ds' 'e1' 'ee' 'ep' 'fa' 'fh' 'fv' 'hc' 'hh' 'hq' 'i1' 'if' 'iv' 'ix' 'iy' 'jq' 'jw' 'jz' 'ko' 'll' 'lr' 'lx' 'ns' 'nz' 'og' 'or' 'os' 'oz' 'p0' 'p8' 'pr' 'pv' 'q6' 'q9' 'qb' 'qd' 'qe' 'qh' 'qi' 'qq' 'qt' 'qu' 'qy' 'rd' 'ri' 'rq' 'rr' 'ry' 'sf' 'sg' 'sh' 'si' 'sw' 'sx' 'tc' 'tq' 'u3' 'uh' 'ul' 'us' 'wg' 'wn' 'wx' 'x3' 'xb' 'ya' 'yg' 'yn' 'yo' 'yp' 'yr' 'yw' 2016-05-04 15:21:22.326724 +69 'al' 'ar' 'av' 'bg' 'bh' 'bz' 'e9' 'ed' 'ee' 'ef' 'eh' 'es' 'fl' 'fz' 'gl' 'h1' 'he' 'hf' 'hv' 'hw' 'i1' 'jg' 'kj' 'kn' 'kz' 'lr' 'pp' 'q1' 'qa' 'qf' 'qt' 'qz' 'rg' 'rj' 's5' 'sv' 'sx' 't4' 'th' 'tn' 'ur' 'wz' 'xp' 'yg' 'ym' 'yr' 'ze' 2016-05-04 16:21:22.326724 +70 '26' '4v' 'a1' 'ac' 'af' 'ah' 'au' 'cn' 'd3' 'dn' 'e4' 'ec' 'eg' 'ep' 'er' 'fa' 'fb' 'g6' 'gr' 'h3' 'hb' 'hf' 'hj' 'iu' 'iv' 'j1' 'jk' 'jy' 'k2' 'k8' 'kd' 'lc' 'lf' 'mb' 'nn' 'ot' 'p5' 'pb' 'pl' 'pw' 'py' 'q4' 'q9' 'qf' 'r6' 'ra' 'rl' 'rr' 'ru' 'rz' 'ta' 'te' 'ti' 'tp' 'u1' 'uy' 'w6' 'wb' 'wc' 'wd' 'wh' 'wv' 'wy' 'xo' 'xu' 'yd' 'yh' 'yo' 'yt' 2016-05-04 17:21:22.326724 +71 '8w' 'ao' 'aw' 'bk' 'by' 'c5' 'ch' 'cw' 'd0' 'dh' 'e2' 'eq' 'ev' 'ha' 'hk' 'ii' 'ir' 'iv' 'j8' 'ki' 'kp' 'lm' 'lx' 'nh' 'ns' 'nt' 'o3' 'on' 'ov' 'ow' 'ox' 'pl' 'q3' 'qc' 'qd' 'qe' 'qh' 'qm' 'qo' 'qr' 'qs' 'qt' 'qx' 'r2' 'r8' 'rc' 'rd' 'rh' 'ri' 'rk' 'rq' 'rt' 's3' 'sm' 'sq' 'sz' 't1' 't6' 'tx' 'u2' 'ue' 'up' 'uq' 'ut' 'ux' 'vg' 'vp' 'w3' 'wb' 'wc' 'wd' 'wr' 'xf' 'xp' 'y1' 'ya' 'yf' 'yg' 'yi' 'yn' 'yq' 'zn' 2016-05-04 18:21:22.326724 +72 '1l' 'b5' 'dq' 'fw' 'hz' 'in' 'ml' 'nb' 'nu' 'o2' 'or' 'q7' 'qb' 'qh' 'qt' 'rv' 'so' 'tp' 'tr' 'u0' 'v6' 'wl' 'xb' 2016-05-04 19:21:22.326724 +73 'ah' 'aw' 'e5' 'en' 'ew' 'fi' 'fm' 'gq' 'gt' 'hy' 'ib' 'ie' 'ir' 'j9' 'jr' 'js' 'ny' 'ob' 'or' 'ot' 'oy' 'pt' 'qi' 'rd' 'ri' 'rl' 'ro' 'rt' 'ss' 'tb' 'tg' 'th' 'vy' 'wq' 'wz' 'xt' 'y6' 'y8' 'zf' 'zh' 2016-05-04 20:21:22.326724 +74 '3d' 'ad' 'dr' 'ee' 'ez' 'f4' 'fd' 'fg' 'fu' 'g9' 'gk' 'h9' 'hl' 'iz' 'jd' 'nb' 'o0' 'oo' 'oy' 'qj' 'qt' 'rp' 'ru' 'sv' 'tl' 'tv' 'wf' 'wp' 'wy' 2016-05-04 21:21:22.326724 +75 '48' 'ae' 'au' 'az' 'b2' 'di' 'ep' 'eq' 'ev' 'f7' 'fw' 'gh' 'gj' 'gn' 'hz' 'i5' 'iy' 'jf' 'kg' 'kj' 'kk' 'nc' 'nm' 'o4' 'oc' 'oe' 'of' 'oh' 'ol' 'ov' 'oz' 'pt' 'pv' 'q5' 'q7' 'qb' 'qe' 'qf' 'rh' 'ry' 'se' 'so' 'tv' 'ua' 'vd' 'wb' 'wg' 'wi' 'wp' 'xf' 'xo' 'yl' 'yr' 'yy' 'z0' 2016-05-04 22:21:22.326724 +76 '5c' 'ag' 'ak' 'ao' 'ap' 'at' 'bo' 'br' 'bs' 'bw' 'cv' 'e9' 'et' 'ex' 'ez' 'fj' 'fu' 'fz' 'gm' 'gq' 'gu' 'h8' 'hl' 'i7' 'ic' 'ik' 'il' 'is' 'j1' 'jc' 'k2' 'k3' 'k6' 'k7' 'kf' 'km' 'kr' 'lj' 'mb' 'md' 'nf' 'om' 'ow' 'p4' 'pk' 'pw' 'q2' 'qe' 'qh' 'qn' 'qq' 'qu' 'qz' 'r0' 'ra' 'rp' 'ru' 'rw' 'rz' 'sv' 'ta' 'ti' 'tq' 'ua' 'up' 'us' 'ut' 'uz' 'vd' 'wl' 'wq' 'xg' 'xk' 'y0' 'yd' 'yw' 'zd' 'zq' 2016-05-04 23:21:22.326724 +77 '20' 'ag' 'az' 'b9' 'bg' 'bh' 'c2' 'dd' 'di' 'do' 'dw' 'e1' 'e3' 'e8' 'eb' 'ev' 'fc' 'fj' 'fu' 'fw' 'fx' 'gb' 'gf' 'hu' 'hw' 'hz' 'ih' 'ik' 'iq' 'jc' 'jq' 'ka' 'kf' 'kh' 'kw' 'ky' 'ly' 'lz' 'mc' 'n8' 'o3' 'o7' 'oa' 'os' 'ou' 'pf' 'q5' 'q6' 'qg' 'ql' 'qo' 'qy' 'r2' 'rf' 'rp' 'ru' 'ry' 's1' 's5' 'se' 'si' 'sn' 'su' 'ta' 'tn' 'tu' 'tv' 'tw' 'u9' 'ua' 'uh' 'uu' 'vo' 'w8' 'wa' 'wc' 'we' 'wi' 'wu' 'ww' 'wy' 'x3' 'xl' 'yb' 'yg' 'yk' 'yv' 2016-05-05 00:21:22.326724 +78 'ef' 'ej' 'fn' 'ho' 'ia' 'il' 'iq' 'mj' 'nr' 'p5' 'rm' 'sd' 'ux' 'vi' 'wq' 'y6' 2016-05-05 01:21:22.326724 +79 'ab' 'av' 'c9' 'cs' 'cx' 'dy' 'e4' 'eb' 'ej' 'er' 'ez' 'f0' 'f2' 'ft' 'gk' 'h5' 'i5' 'ic' 'ig' 'io' 'ki' 'kz' 'lj' 'mm' 'of' 'og' 'ov' 'oz' 'p4' 'pl' 'qh' 'qx' 'rw' 's1' 'sf' 'tw' 'uc' 'ui' 'uo' 'uw' 'x4' 'xk' 'ze' 2016-05-05 02:21:22.326724 +80 'ca' 'do' 'h5' 'i9' 'io' 'jk' 'jl' 'kn' 'qq' 'tm' 'ul' 'w9' 'wb' 'wp' 'wt' 'wx' 'y3' 'zd' 2016-05-05 03:21:22.326724 +81 'a2' 'cs' 'ee' 'em' 'gk' 'hv' 'iy' 'kq' 'nc' 'pb' 'q5' 'qy' 'rq' 'rr' 'ts' 'uq' 'vt' 'w0' 'y6' 'yz' 2016-05-05 04:21:22.326724 +82 'av' 'ej' 'el' 'ep' 'fp' 'hh' 'hz' 'l9' 'mr' 'q3' 'qn' 'qq' 'qy' 'rh' 'tw' 'vt' 'vu' 'w5' 'wg' 'wk' 'wo' 'x5' 'y9' 'yk' 'zm' 2016-05-05 05:21:22.326724 +83 'az' 'cx' 'dm' 'do' 'dq' 'e2' 'e5' 'e8' 'eb' 'fh' 'fj' 'fq' 'gj' 'gl' 'hq' 'im' 'iq' 'iz' 'jn' 'ku' 'l1' 'mz' 'nt' 'o6' 'q1' 'qn' 'qo' 'qz' 'r4' 'rh' 'rn' 'rw' 'sn' 'sv' 't7' 'ud' 'ue' 'ur' 'uv' 'w9' 'wh' 'wk' 'wy' 'xq' 'yh' 'zs' 'zw' 2016-05-05 06:21:22.326724 +84 '5o' 'ad' 'aw' 'az' 'bi' 'bo' 'd1' 'db' 'di' 'do' 'e7' 'eb' 'ei' 'em' 'ep' 'eq' 'eu' 'fo' 'gg' 'gw' 'i0' 'ig' 'ih' 'iu' 'j3' 'j4' 'jo' 'js' 'kq' 'lc' 'lo' 'lu' 'm9' 'mi' 'mk' 'mt' 'n4' 'ni' 'o7' 'od' 'ot' 'pc' 'pg' 'pp' 'qc' 'qr' 'qw' 'rd' 'rx' 'se' 'sq' 'sy' 't5' 'ts' 'ub' 'vz' 'wb' 'wl' 'wr' 'wt' 'xe' 'xt' 'yg' 'yr' 'zw' 2016-05-05 07:21:22.326724 +85 'bt' 'c9' 'db' 'dy' 'ee' 'ej' 'eo' 'et' 'fr' 'gm' 'hz' 'ic' 'ik' 'is' 'jk' 'ls' 'lt' 'oj' 'or' 'ox' 'p8' 'pl' 'pz' 'q1' 'qq' 'qt' 'rw' 's7' 't4' 't8' 'to' 'tp' 'u7' 'ub' 'vx' 'w4' 'ws' 'wt' 'wv' 'x8' 'xs' 'xt' 'yg' 'yo' 2016-05-05 08:21:22.326724 +86 '4d' '9f' 'aa' 'ad' 'av' 'b1' 'bb' 'bd' 'bk' 'd2' 'd6' 'd7' 'de' 'do' 'ds' 'dx' 'e4' 'eh' 'en' 'ep' 'ew' 'fd' 'fg' 'fo' 'fp' 'ga' 'gw' 'gz' 'h2' 'h4' 'hc' 'hk' 'ic' 'ih' 'io' 'iv' 'iy' 'jj' 'jt' 'jz' 'kb' 'ke' 'kl' 'lh' 'lm' 'nc' 'ni' 'nl' 'o9' 'oi' 'oo' 'pd' 'pj' 'po' 'pr' 'qa' 'qf' 'qi' 'qm' 'qn' 'qp' 'qt' 'qu' 'rb' 're' 'rf' 'rn' 'rz' 'sz' 't0' 'tg' 'ti' 'ut' 'v9' 'vg' 'wb' 'wc' 'wf' 'wx' 'x0' 'xb' 'xv' 'y6' 'yb' 'yg' 'yh' 'yk' 'ym' 'yn' 'yw' 'yx' 'yz' 'z8' 'zp' 'zs' 'zw' 2016-05-05 09:21:22.326724 +87 '6g' 'ar' 'ce' 'cn' 'ds' 'dt' 'e9' 'eh' 'el' 'ew' 'gt' 'gw' 'gx' 'hb' 'ho' 'i8' 'j8' 'jp' 'jr' 'kb' 'la' 'lr' 'mg' 'mi' 'ml' 'nd' 'ng' 'oj' 'op' 'pb' 'pd' 'pn' 'py' 'qc' 'qe' 'qk' 'qr' 'qt' 'qv' 'qz' 'sm' 'tc' 'tg' 'ti' 'tp' 'ty' 'u8' 'uq' 'uw' 'uy' 've' 'w0' 'wb' 'wd' 'wp' 'x4' 'xm' 'ym' 'z2' 'zh' 'zz' 2016-05-05 10:21:22.326724 +88 'af' 'as' 'b2' 'bl' 'bz' 'ca' 'cd' 'co' 'de' 'dp' 'e4' 'ed' 'en' 'eo' 'eu' 'ft' 'g0' 'gj' 'ha' 'hh' 'hn' 'hy' 'ij' 'jb' 'jj' 'jn' 'l7' 'll' 'lp' 'oh' 'ot' 'pb' 'ph' 'pi' 'pn' 'qc' 'qx' 'r5' 'rb' 'ri' 'sb' 'sd' 'sn' 'sv' 'sw' 't7' 'tb' 'ti' 'ty' 'u7' 'un' 'uo' 'wb' 'we' 'wf' 'wh' 'wo' 'wp' 'yb' 'yc' 'ys' 2016-05-05 11:21:22.326724 +89 '5s' 'aa' 'ar' 'bu' 'c2' 'cw' 'd2' 'ej' 'g8' 'gy' 'ij' 'iv' 'k0' 'l1' 'lb' 'm1' 'od' 'pm' 'q1' 'q4' 'q5' 'qd' 'rn' 'ry' 'sj' 'sm' 'ta' 'th' 'u8' 'vf' 'wr' 'xm' 2016-05-05 12:21:22.326724 +90 'am' 'db' 'dj' 'dn' 'ec' 'eo' 'ev' 'ex' 'fn' 'fw' 'fz' 'gi' 'gn' 'gq' 'ha' 'ho' 'in' 'is' 'ja' 'jj' 'jk' 'js' 'kv' 'lc' 'lz' 'mj' 'o2' 'oa' 'of' 'oi' 'pe' 'pn' 'ps' 'pv' 'py' 'q3' 'qa' 'qb' 'qi' 'qn' 'qo' 'qq' 'qr' 'qv' 'rk' 'rl' 't8' 'tk' 'tv' 'tw' 'ug' 'uk' 'ux' 'uz' 'v7' 'vr' 'wd' 'wf' 'wk' 'wv' 'ya' 'yq' 'yu' 'zp' 'zx' 2016-05-05 13:21:22.326724 +91 'a3' 'ac' 'af' 'ar' 'b4' 'cu' 'd6' 'dx' 'dz' 'eb' 'ej' 'ek' 'eo' 'es' 'eu' 'ff' 'gj' 'go' 'h6' 'ho' 'hx' 'i6' 'ih' 'ir' 'iy' 'jd' 'jg' 'jh' 'k5' 'n0' 'nn' 'oe' 'og' 'pj' 'pk' 'pq' 'px' 'q5' 'qc' 'qf' 'qg' 'qi' 'qm' 'qx' 'rb' 'rd' 're' 'rx' 'sn' 'sq' 'tp' 'uh' 'ul' 'um' 'uo' 'up' 'uq' 'uz' 'vt' 'wb' 'wj' 'wp' 'ws' 'wv' 'xk' 'xo' 'yv' 'yw' 'yx' 2016-05-05 14:21:22.326724 +92 '1l' '24' '2o' 'ad' 'am' 'ao' 'bf' 'bl' 'ca' 'ce' 'cn' 'cw' 'cy' 'd4' 'dm' 'e9' 'ed' 'ee' 'ei' 'ep' 'f0' 'fk' 'fu' 'fw' 'g3' 'g4' 'gm' 'gn' 'gu' 'ha' 'hh' 'id' 'iq' 'it' 'iw' 'ix' 'iy' 'j4' 'j6' 'jo' 'jp' 'js' 'jv' 'k6' 'kb' 'ke' 'kh' 'l9' 'lh' 'lr' 'mr' 'n7' 'n8' 'nd' 'o6' 'of' 'oi' 'op' 'ox' 'p6' 'q2' 'qb' 'qd' 'qi' 'qk' 'qn' 'qq' 'qt' 'qy' 'r2' 're' 'rf' 'rk' 's4' 'sb' 'se' 'sh' 't4' 'tb' 'tn' 'tq' 'ty' 'u4' 'u5' 'ub' 'ue' 'uf' 'uk' 'uo' 'vc' 'vs' 'vu' 'w7' 'wd' 'wh' 'xm' 'yk' 'yv' 'zl' 'zz' 2016-05-05 15:21:22.326724 +93 '1h' '4r' 'a9' 'ag' 'ah' 'ak' 'as' 'at' 'b2' 'be' 'cs' 'd6' 'd7' 'da' 'dg' 'dl' 'dr' 'dy' 'dz' 'e8' 'e9' 'eg' 'eh' 'ek' 'eq' 'es' 'f3' 'f9' 'fs' 'gh' 'go' 'gq' 'gz' 'ha' 'hc' 'hi' 'hp' 'if' 'ig' 'im' 'iv' 'jd' 'jt' 'kp' 'kt' 'l6' 'le' 'lg' 'lo' 'mz' 'ns' 'oc' 'ok' 'p6' 'pa' 'pt' 'pv' 'px' 'q1' 'q5' 'qa' 'qe' 'qg' 'qh' 'qi' 'qn' 'qp' 'qs' 'qu' 'qv' 'qy' 'rh' 'rl' 'rp' 'rv' 'rx' 'rz' 's8' 'sc' 'sr' 'su' 't6' 'tb' 'tq' 'tw' 'u9' 'vr' 'w7' 'wb' 'wj' 'wq' 'ww' 'xr' 'xx' 'yb' 'yl' 'yn' 'yr' 'zw' 2016-05-05 16:21:22.326724 +94 '1g' '3e' 'a5' 'ae' 'ar' 'bt' 'cb' 'ck' 'co' 'd5' 'dh' 'dm' 'dr' 'dt' 'e0' 'e9' 'eh' 'el' 'em' 'f2' 'f3' 'fb' 'ff' 'fg' 'gu' 'hb' 'he' 'hr' 'hx' 'i7' 'ib' 'if' 'ig' 'io' 'iu' 'jc' 'jo' 'jv' 'kh' 'kx' 'lb' 'lh' 'lw' 'lx' 'ly' 'mp' 'oa' 'oc' 'oz' 'pa' 'q9' 'qi' 'qp' 'qq' 'qw' 'r3' 'rb' 'rd' 're' 'rn' 'ry' 'rz' 's5' 'su' 'sz' 'td' 'tf' 'tt' 'u9' 'uh' 'um' 'un' 'va' 'vh' 'vt' 'w7' 'wb' 'wc' 'wg' 'wh' 'wx' 'xd' 'xz' 'y8' 'yf' 'yg' 'yh' 'ym' 'zy' 2016-05-05 17:21:22.326724 +95 '5f' '5i' 'ak' 'cg' 'dh' 'di' 'dk' 'eq' 'f6' 'g1' 'g8' 'ie' 'md' 'o0' 'oj' 'oq' 'pt' 'q7' 'qb' 'qd' 'qe' 'qo' 'ro' 'ru' 'rv' 'rx' 'sp' 'sr' 'tf' 'tp' 'uh' 'ul' 'vs' 'xn' 'xw' 'y2' 'yh' 'yk' 'zg' 'zm' 'zq' 2016-05-05 18:21:22.326724 +96 '82' 'a2' 'ad' 'dp' 'el' 'ep' 'fk' 'gd' 'hj' 'ij' 'lc' 'lm' 'lv' 'ml' 'n2' 'n6' 'o1' 'oi' 'on' 'oo' 'ow' 'pb' 'pe' 'pq' 'q6' 'qi' 'qo' 'qp' 'qw' 'rs' 'rv' 's6' 'sa' 'sk' 't8' 'tl' 'tt' 'tx' 'ub' 'ue' 'uf' 'uj' 'va' 'wl' 'xv' 'y1' 'ye' 'ym' 'yw' 'zm' 2016-05-05 19:21:22.326724 +97 '2s' 'a5' 'aw' 'b5' 'b7' 'ce' 'cq' 'dc' 'dp' 'eb' 'eq' 'ez' 'ff' 'gg' 'gy' 'iw' 'jd' 'jl' 'kw' 'ky' 'mt' 'ng' 'o9' 'od' 'oe' 'po' 'pp' 'qa' 'qe' 'qk' 'ql' 'qp' 'r4' 'r9' 'rb' 'si' 'sl' 'sn' 'sq' 'tg' 'ti' 'tx' 'uh' 'ws' 'xf' 'yv' 'z1' 'zm' 2016-05-05 20:21:22.326724 +98 'cd' 'dl' 'e6' 'en' 'eu' 'gg' 'je' 'kp' 'lv' 'pv' 'q0' 'q3' 'qc' 'qd' 'qo' 'qs' 'qz' 'rw' 'se' 'tx' 'uh' 'uj' 'ul' 'wo' 'ye' 'yi' 'zx' 2016-05-05 21:21:22.326724 +99 'ak' 'at' 'cs' 'di' 'dt' 'ef' 'eq' 'f1' 'f8' 'fb' 'fh' 'fv' 'g4' 'hs' 'j2' 'jo' 'kq' 'lf' 'lq' 'no' 'oa' 'ot' 'q0' 'qh' 'qs' 'qw' 'se' 'so' 'sx' 't3' 'ul' 'un' 'uu' 'wn' 'ws' 'wx' 'y0' 'y3' 'yf' 'zb' 'zc' 2016-05-05 22:21:22.326724 +100 '1k' '1p' 'a6' 'ah' 'ap' 'ay' 'az' 'bi' 'bj' 'cv' 'd1' 'd2' 'd5' 'db' 'dm' 'dn' 'e7' 'eg' 'eh' 'el' 'en' 'eo' 'er' 'f4' 'fy' 'gf' 'gp' 'hd' 'hj' 'hp' 'i2' 'i7' 'ij' 'ik' 'jq' 'jy' 'kg' 'kh' 'kw' 'lr' 'ls' 'n5' 'ne' 'o7' 'od' 'oi' 'oo' 'os' 'oy' 'pd' 'ph' 'pm' 'pw' 'px' 'qa' 'qb' 'qc' 'qk' 'ql' 'qm' 'qn' 'qr' 'qs' 'qu' 'qx' 'r5' 'rh' 'ri' 'rk' 'rn' 'ry' 's0' 'sa' 'si' 'sr' 'st' 'ub' 'uc' 'uf' 'uq' 'ur' 'v9' 'vn' 'w9' 'wd' 'we' 'wk' 'wt' 'wv' 'ww' 'xj' 'xk' 'yh' 'yk' 'zd' 'zw' 'zy' 2016-05-05 23:21:22.326724 +101 'a6' 'aa' 'ae' 'bn' 'c0' 'c7' 'ck' 'df' 'dn' 'ds' 'dt' 'e0' 'ea' 'eo' 'eu' 'f0' 'f6' 'g1' 'gd' 'i4' 'j6' 'ja' 'je' 'jt' 'mv' 'no' 'of' 'os' 'ou' 'ox' 'pi' 'pq' 'pr' 'q6' 'qk' 'ql' 'qv' 'qw' 'rh' 'rm' 'rv' 'sv' 't6' 'u2' 'un' 'ux' 'wc' 'wl' 'wo' 'wp' 'xg' 'y7' 'y8' 'ya' 'yn' 'yx' 'zl' 2016-05-06 00:21:22.326724 +102 '2o' '2w' 'ac' 'an' 'bb' 'be' 'c3' 'db' 'dj' 'dm' 'dw' 'eb' 'ed' 'eh' 'ew' 'ex' 'fb' 'fi' 'g2' 'gh' 'gv' 'h6' 'hw' 'id' 'ie' 'iz' 'j8' 'jk' 'jv' 'kd' 'lk' 'm8' 'ml' 'mr' 'mx' 'np' 'oe' 'on' 'oz' 'pj' 'pk' 'pm' 'pu' 'qb' 'ql' 'qv' 'qx' 'r0' 're' 'rj' 'rq' 'rw' 't3' 'ta' 'tc' 'td' 'th' 'to' 'tu' 'tx' 'ty' 'tz' 'u3' 'uf' 'vp' 'vz' 'wd' 'wh' 'wk' 'wl' 'wx' 'wy' 'wz' 'xe' 'xm' 'xs' 'ye' 'zd' 'zk' 2016-05-06 01:21:22.326724 +103 'a6' 'au' 'aw' 'bj' 'bn' 'cy' 'dg' 'e3' 'ei' 'ek' 'em' 'et' 'ex' 'fi' 'fm' 'g4' 'gm' 'gz' 'hp' 'ia' 'ie' 'jd' 'lg' 'lj' 'm6' 'mi' 'nd' 'oj' 'oo' 'ov' 'px' 'q0' 'q4' 'qj' 'qp' 'qq' 'r1' 'r5' 'rh' 'ro' 'tg' 'tr' 'tu' 'ty' 'u2' 'uk' 'ux' 'we' 'wi' 'wk' 'wq' 'wu' 'xy' 'y0' 2016-05-06 02:21:22.326724 +104 'ab' 'am' 'aq' 'b9' 'bj' 'ck' 'cw' 'e2' 'e8' 'ek' 'eo' 'ew' 'f2' 'ff' 'fh' 'ft' 'gl' 'gs' 'h7' 'i0' 'ik' 'jl' 'jo' 'jq' 'jr' 'kc' 'kg' 'kw' 'mm' 'mo' 'pb' 'pn' 'qe' 'qg' 'qk' 'qr' 'qu' 'r2' 's8' 'sd' 'sq' 'su' 'td' 'tr' 'uh' 'uu' 'v2' 'vl' 'w8' 'wg' 'wm' 'wt' 'xl' 'xq' 'yi' 'yo' 'yq' 2016-05-06 03:21:22.326724 +105 'da' 'e5' 'ec' 'gd' 'gf' 'jj' 'kx' 'ly' 'pd' 'pj' 'q6' 'qk' 'rm' 'sa' 'te' 'ut' 'wa' 'wc' 'wl' 'ws' 'wv' 'zb' 'zk' 'zz' 2016-05-06 04:21:22.326724 +106 'am' 'as' 'ay' 'bl' 'e0' 'e1' 'ea' 'eg' 'f6' 'fc' 'fx' 'ia' 'io' 'jq' 'jx' 'kd' 'ky' 'lb' 'll' 'lr' 'lv' 'me' 'nv' 'o4' 'o7' 'oe' 'ok' 'oq' 'pe' 'pr' 'py' 'q7' 'qg' 'qh' 'qj' 'qk' 'ql' 'qq' 'qr' 'qs' 'r1' 'rg' 'rl' 'rm' 'ro' 'sq' 'sr' 'sz' 'ti' 'ts' 'ue' 'ui' 'uu' 'uz' 'vk' 'y0' 'y2' 'y9' 'ym' 'yt' 'yx' 'z8' 'zd' 2016-05-06 05:21:22.326724 +107 '1d' 'a2' 'af' 'bb' 'dd' 'dg' 'dl' 'du' 'ee' 'ff' 'g0' 'hj' 'hk' 'i4' 'iy' 'jt' 'kq' 'm6' 'mh' 'ov' 'qf' 'qh' 'qq' 'rt' 'sw' 'ta' 'tc' 'we' 'wn' 'wy' 'wz' 'xd' 'xs' 'y2' 'yg' 'yj' 'yk' 'zi' 'zq' 'zw' 2016-05-06 06:21:22.326724 +108 '2e' 'ak' 'bv' 'cb' 'ch' 'cy' 'd9' 'dq' 'dr' 'e8' 'ec' 'ef' 'ek' 'es' 'f1' 'fw' 'gj' 'gw' 'hh' 'hr' 'i3' 'i6' 'ic' 'ji' 'jr' 'jy' 'kr' 'md' 'mu' 'od' 'og' 'oo' 'p9' 'pd' 'q1' 'q3' 'qq' 'qw' 'qy' 'ra' 'rj' 'rl' 'rn' 'sf' 't4' 't5' 't7' 'tl' 'tm' 'ts' 'u4' 'ug' 'ui' 'uw' 'vo' 'ws' 'ww' 'wy' 'yd' 'yo' 'yq' 'zm' 'zp' 'zw' 2016-05-06 07:21:22.326724 +109 'ag' 'ao' 'ay' 'bk' 'bl' 'bw' 'cq' 'dp' 'dv' 'e5' 'ee' 'eu' 'fv' 'fx' 'gg' 'h8' 'ha' 'hk' 'ii' 'im' 'iq' 'it' 'je' 'jf' 'jr' 'js' 'km' 'ks' 'lo' 'lz' 'o8' 'ot' 'pg' 'pl' 'q3' 'q9' 'qa' 'qd' 'qe' 'qf' 'qh' 'qn' 'qq' 'qy' 'qz' 'r7' 're' 'ri' 'rl' 'ro' 'rr' 'rx' 'ry' 'sh' 'sl' 'ss' 'sw' 't5' 'ta' 'te' 'th' 'tr' 'tt' 'tu' 'tv' 'u7' 'uk' 'um' 'up' 'uu' 'vl' 'vw' 'w0' 'wl' 'ws' 'wt' 'ww' 'xe' 'ya' 'yb' 'ye' 'yf' 'za' 2016-05-06 08:21:22.326724 +110 '5o' 'ar' 'at' 'c1' 'ch' 'cr' 'dp' 'dy' 'ed' 'el' 'er' 'f0' 'f7' 'fc' 'fh' 'fm' 'fx' 'g8' 'gd' 'gf' 'gh' 'gl' 'gp' 'gv' 'gy' 'hs' 'ht' 'hv' 'hy' 'i2' 'ia' 'lc' 'lu' 'lv' 'ly' 'mb' 'mq' 'nn' 'nz' 'oa' 'oq' 'ot' 'ox' 'oz' 'p8' 'pc' 'pn' 'py' 'q2' 'q7' 'qe' 'qj' 'qn' 'qs' 'qv' 'qw' 'r1' 'r4' 'rh' 'rn' 'rq' 'rr' 's1' 'sa' 'sd' 'se' 'sr' 't6' 'ta' 'tf' 'ts' 'tw' 'tx' 'uk' 'ul' 'uw' 'v7' 'vu' 'vz' 'w7' 'wc' 'wd' 'wk' 'wl' 'wo' 'wr' 'wv' 'x9' 'xe' 'xj' 'xl' 'xu' 'yi' 'ym' 'yq' 'z1' 'z5' 'zm' 2016-05-06 09:21:22.326724 +111 '2i' 'ap' 'aw' 'bd' 'bs' 'c9' 'db' 'di' 'dl' 'dq' 'dr' 'e5' 'ej' 'el' 'ez' 'f9' 'ga' 'gp' 'hm' 'if' 'im' 'j9' 'k9' 'ka' 'kd' 'kj' 'lc' 'lf' 'lk' 'mf' 'mq' 'mr' 'o3' 'oe' 'oj' 'op' 'p5' 'p7' 'p8' 'pb' 'pv' 'py' 'qc' 'qd' 'qf' 'qi' 'qk' 'qq' 'qu' 'qw' 'qz' 'r2' 'r8' 'r9' 'rj' 'rk' 'ry' 'rz' 'sd' 'sk' 'sm' 't2' 't6' 't7' 'tp' 'tq' 'tt' 'tw' 'u7' 'uh' 'uv' 'vh' 'w0' 'wc' 'we' 'wh' 'wk' 'wl' 'yf' 'yk' 'ys' 'zi' 'zq' 'zs' 2016-05-06 10:21:22.326724 +112 '4u' 'a3' 'ck' 'dw' 'e5' 'fu' 'ij' 'iu' 'jd' 'jp' 'ka' 'kb' 'ld' 'op' 'p1' 'po' 'pw' 'q5' 'q7' 'qf' 'qq' 'qr' 'qv' 'rm' 'sv' 't6' 'td' 'tp' 'uw' 'vb' 'w4' 'w6' 'wq' 'xh' 'y2' 'yf' 'yt' 'zj' 'zz' 2016-05-06 11:21:22.326724 +113 'a6' 'ao' 'aq' 'ba' 'be' 'bg' 'cb' 'ci' 'cj' 'cq' 'cs' 'dc' 'e6' 'ea' 'ei' 'ex' 'fw' 'fy' 'g7' 'gg' 'gq' 'hg' 'ib' 'ig' 'im' 'jz' 'kv' 'me' 'mi' 'mj' 'mp' 'o6' 'oq' 'os' 'ov' 'pd' 'pl' 'pr' 'px' 'qf' 'qg' 'ql' 'qr' 'qz' 'r5' 'rl' 'ro' 'ru' 'rz' 'sy' 'sz' 't7' 'tt' 'u7' 'uf' 'ug' 'uo' 'vt' 'w3' 'w8' 'wb' 'wp' 'ww' 'wy' 'xl' 'xv' 'yc' 'z3' 2016-05-06 12:21:22.326724 +114 'ca' 'cm' 'cr' 'eh' 'fd' 'fp' 'g8' 'ga' 'gh' 'h1' 'iu' 'je' 'jy' 'kh' 'lx' 'o4' 'oi' 'ov' 'ph' 'pm' 'qi' 'qv' 're' 'rg' 'rv' 'ry' 'si' 'sl' 'sv' 'sx' 'tq' 'tz' 'uh' 'uq' 'uy' 'wi' 'wo' 'y7' 'yc' 'yn' 'yy' 2016-05-06 13:21:22.326724 +115 'af' 'bq' 'c9' 'cg' 'eg' 'ei' 'eq' 'ez' 'fo' 'fp' 'gl' 'i6' 'it' 'iz' 'jg' 'ka' 'lf' 'lm' 'lr' 'mv' 'oc' 'oj' 'om' 'or' 'p1' 'p4' 'pt' 'q3' 'qd' 'qe' 'qh' 'qx' 'ra' 'rd' 'sr' 'sz' 'tb' 'tc' 'tj' 'ug' 'uh' 'ux' 'wb' 'wc' 'wj' 'wy' 'xa' 'xb' 'y0' 'yj' 'yp' 'yz' 'zf' 'zl' 'zq' 2016-05-06 14:21:22.326724 +116 '5y' 'a2' 'by' 'c9' 'e6' 'fd' 'fn' 'g7' 'h9' 'hj' 'j9' 'kg' 'kk' 'kn' 'kx' 'ln' 'q3' 'qa' 'qh' 'rb' 'rh' 'sj' 'sn' 'tx' 'tz' 'un' 'us' 've' 'vf' 'vh' 'wf' 'wp' 'wz' 'xj' 'xq' 'yi' 'yz' 2016-05-06 15:21:22.326724 +117 'a3' 'b9' 'c0' 'cg' 'ct' 'cu' 'dy' 'eh' 'el' 'ge' 'gt' 'hh' 'hu' 'if' 'j5' 'kq' 'lb' 'mh' 'o3' 'o6' 'oe' 'oi' 'ow' 'p5' 'ph' 'pw' 'q0' 'qh' 'ql' 'qy' 'r7' 'si' 'sk' 'sv' 'tf' 'tm' 'to' 'tt' 'ty' 'u3' 'uj' 'uo' 'vi' 'vl' 'wc' 'wf' 'xx' 'y0' 'ya' 'yu' 2016-05-06 16:21:22.326724 +118 'aj' 'ak' 'aq' 'ba' 'bd' 'bj' 'bk' 'bx' 'cr' 'cx' 'd0' 'dj' 'dl' 'ef' 'ei' 'ej' 'ek' 'em' 'eq' 'er' 'f3' 'f7' 'fi' 'fp' 'gv' 'h7' 'ha' 'hc' 'hh' 'hk' 'hn' 'ia' 'id' 'im' 'iq' 'is' 'iw' 'ji' 'jk' 'js' 'ju' 'jx' 'l0' 'm1' 'mq' 'ng' 'o8' 'ot' 'ov' 'oy' 'p1' 'pu' 'pv' 'pz' 'qc' 'qh' 'qm' 'qq' 'qs' 'qt' 'qw' 'rc' 'rq' 'rt' 'sd' 't5' 'ta' 'tb' 'tj' 'tp' 'uk' 'un' 'v9' 'vt' 'wa' 'wd' 'wf' 'wm' 'wn' 'wv' 'wz' 'xp' 'ya' 'yg' 'ym' 'yn' 'yr' 'zi' 2016-05-06 17:21:22.326724 +119 'ae' 'ap' 'bi' 'd7' 'ee' 'ej' 'ek' 'ev' 'ew' 'fb' 'gm' 'hf' 'ho' 'hz' 'i6' 'ib' 'id' 'il' 'im' 'io' 'ip' 'ir' 'iv' 'j2' 'jf' 'jl' 'jo' 'kh' 'kk' 'kt' 'lr' 'm4' 'mi' 'nm' 'ns' 'og' 'pv' 'pw' 'q4' 'qd' 'qi' 'r6' 'r8' 'rn' 'rp' 'si' 't7' 'ta' 'td' 'te' 'ti' 'tm' 'u6' 'ub' 'vu' 'w6' 'w8' 'wa' 'wb' 'wc' 'wd' 'wf' 'wh' 'wn' 'wt' 'wz' 'y5' 'ya' 'yg' 'yk' 'yn' 'z3' 2016-05-06 18:21:22.326724 +120 'a6' 'ao' 'az' 'by' 'db' 'dl' 'eg' 'ei' 'el' 'eo' 'fh' 'fv' 'gl' 'h0' 'hl' 'hx' 'i9' 'iq' 'j7' 'jx' 'kg' 'kh' 'l9' 'nz' 'o9' 'oi' 'om' 'on' 'oq' 'or' 'pf' 'pz' 'qf' 'qj' 'r5' 'rq' 'rw' 'sr' 'sx' 'tc' 'tg' 'tj' 'tl' 'to' 'uj' 'un' 'vw' 'w1' 'w5' 'w8' 'wn' 'ws' 'wv' 'x2' 'x3' 'xi' 'y4' 'yd' 'ym' 'ys' 'z7' 'zq' 'zv' 2016-05-06 19:21:22.326724 +121 'az' 'eq' 'fe' 'go' 'gv' 'ig' 'iz' 'ja' 'l4' 'mo' 'nm' 'no' 'of' 'pk' 'q9' 'qb' 'ql' 'qt' 'r7' 'rc' 'rm' 'tg' 'tv' 'u3' 'u7' 'wc' 'x4' 'xw' 'yl' 'zk' 'zn' 2016-05-06 20:21:22.326724 +122 '1f' '2j' '2q' '5c' 'am' 'ap' 'aq' 'az' 'bu' 'bx' 'cr' 'e0' 'e7' 'ea' 'eb' 'ec' 'eg' 'ew' 'ey' 'fh' 'fk' 'g2' 'go' 'h4' 'h8' 'ha' 'hb' 'hm' 'ia' 'ic' 'ie' 'ik' 'iu' 'iv' 'j7' 'ja' 'k9' 'kw' 'lp' 'ly' 'mb' 'ns' 'ny' 'o0' 'of' 'ok' 'oz' 'pe' 'pr' 'qb' 'qd' 'qg' 'qt' 'qx' 'qy' 'rg' 'ro' 'rq' 'rz' 'sh' 'si' 'sl' 'sm' 'td' 'tg' 'tx' 'uh' 'um' 'up' 'uw' 've' 'vl' 'vp' 'vw' 'w2' 'w5' 'wc' 'wf' 'wi' 'wn' 'wq' 'ws' 'wv' 'wy' 'wz' 'x1' 'xr' 'y1' 'zs' 'zz' 2016-05-06 21:21:22.326724 +123 '25' 'cd' 'd2' 'dv' 'eo' 'es' 'f4' 'oc' 'qc' 'sh' 'te' 'tv' 'wd' 'wn' 'wo' 'xp' 'zt' 2016-05-06 22:21:22.326724 +124 '28' 'b1' 'bj' 'bm' 'cz' 'dd' 'ds' 'ed' 'em' 'hh' 'hr' 'ie' 'ii' 'im' 'io' 'jn' 'jw' 'lg' 'lo' 'm6' 'mn' 'mw' 'ny' 'o6' 'p7' 'q4' 'qd' 'qf' 'qn' 'qq' 'qt' 'r1' 'r2' 'rn' 'rt' 'sh' 'ur' 'uv' 'ux' 'vc' 'wb' 'wn' 'xh' 'xx' 'yw' 2016-05-06 23:21:22.326724 +125 '1m' '1s' 'aa' 'b0' 'be' 'cb' 'dc' 'dd' 'dh' 'eq' 'fa' 'ib' 'if' 'ik' 'it' 'jr' 'jz' 'ka' 'lq' 'lu' 'm2' 'o2' 'ob' 'oc' 'of' 'om' 'oz' 'pv' 'q4' 'qb' 'qk' 'qn' 'qs' 'qu' 'r3' 'r9' 'rs' 'rw' 'sg' 'si' 'sv' 't9' 'tw' 'vq' 'vy' 'w2' 'w6' 'w9' 'y0' 'y1' 'ye' 'yq' 'z1' 2016-05-07 00:21:22.326724 +126 '3k' '3v' 'a0' 'ah' 'am' 'an' 'ay' 'bd' 'cs' 'cu' 'cw' 'db' 'dp' 'e3' 'et' 'ey' 'fi' 'gl' 'gq' 'hh' 'hk' 'hx' 'i8' 'ih' 'ii' 'iv' 'iz' 'j5' 'jf' 'jo' 'jr' 'kd' 'km' 'lg' 'li' 'lt' 'm4' 'mo' 'mv' 'mw' 'n3' 'nh' 'nn' 'o9' 'od' 'of' 'oh' 'ok' 'or' 'pc' 'pg' 'pm' 'pn' 'q6' 'qj' 'qn' 'qr' 'qs' 'qw' 'r7' 'r9' 're' 'rm' 'rr' 'rt' 'rz' 'sb' 'sf' 'td' 'tu' 'uc' 'ug' 'uj' 'uu' 'w3' 'wa' 'wc' 'wg' 'wi' 'wm' 'wp' 'ww' 'wx' 'wz' 'xb' 'xi' 'y0' 'y9' 'ya' 'yd' 'yg' 'yh' 'yv' 'yy' 'zo' 'zz' 2016-05-07 01:21:22.326724 +127 'bs' 'ee' 'gz' 'hv' 'ib' 'kc' 'lb' 'nu' 'ps' 'pt' 'qh' 'ud' 'vo' 'vq' 'vu' 'wb' 'wj' 'x3' 'xu' 'yf' 2016-05-07 02:21:22.326724 +128 '12' 'ac' 'ay' 'bc' 'bj' 'bm' 'bo' 'ce' 'cf' 'ck' 'cr' 'db' 'do' 'du' 'dy' 'ea' 'ej' 'ek' 'eo' 'ep' 'et' 'f2' 'fc' 'fl' 'fs' 'fy' 'g9' 'gf' 'hj' 'hk' 'hp' 'i1' 'i5' 'ih' 'ii' 'im' 'jp' 'jx' 'k9' 'kf' 'ky' 'mb' 'mj' 'mk' 'nb' 'nc' 'o0' 'o9' 'oc' 'oj' 'oq' 'pm' 'ps' 'pt' 'pu' 'pv' 'py' 'qd' 'qi' 'qj' 'qk' 'ql' 'qm' 'qs' 'qz' 'ra' 'rc' 'rd' 'rh' 'ri' 'ro' 's0' 's3' 's7' 's9' 'sv' 't2' 't7' 'tm' 'tp' 'tw' 'tx' 'ty' 'u3' 'u6' 'uf' 'ug' 'ui' 'uk' 'ut' 'ux' 'v5' 'w3' 'wk' 'wt' 'xo' 'xq' 'xr' 'xu' 'ye' 'yl' 'yn' 'yt' 'yv' 'zm' 2016-05-07 03:21:22.326724 +129 '4d' '5a' 'ab' 'ae' 'ah' 'ai' 'be' 'cb' 'd6' 'dw' 'ea' 'eu' 'fb' 'fd' 'fe' 'fj' 'fk' 'g0' 'g9' 'gf' 'hc' 'hh' 'hk' 'i4' 'ik' 'iv' 'iz' 'j1' 'j3' 'j5' 'ju' 'jz' 'kc' 'md' 'ng' 'nk' 'o1' 'o9' 'oi' 'om' 'p4' 'pl' 'pn' 'ps' 'q8' 'qa' 'qb' 'qh' 'qj' 'qm' 'qn' 'qo' 'qq' 'qr' 'qt' 'qu' 'qy' 'qz' 'ro' 'rs' 's2' 'sl' 'sm' 'sp' 'ti' 'tn' 'tu' 'tw' 'u7' 'ug' 'ui' 'ul' 'us' 'ut' 'vc' 've' 'vj' 'vk' 'w0' 'w2' 'w8' 'wi' 'wk' 'wl' 'wm' 'wt' 'x6' 'xh' 'xy' 'yr' 'ys' 'yt' 'yy' 'yz' 'zr' 2016-05-07 04:21:22.326724 +130 '1n' 'am' 'b9' 'bc' 'bi' 'dl' 'dv' 'ea' 'eq' 'ey' 'fj' 'g1' 'g3' 'g8' 'ge' 'it' 'iu' 'jh' 'jz' 'kk' 'ln' 'mc' 'mw' 'nf' 'nj' 'o4' 'o9' 'ob' 'ox' 'pq' 'q4' 'qb' 'qk' 'qn' 'qs' 'qt' 'r4' 'ra' 'rg' 'rp' 'rx' 'sj' 'sq' 'ss' 't0' 'tg' 'ty' 'ue' 'ui' 'uj' 'uv' 'v6' 'vr' 'w1' 'w9' 'wj' 'wy' 'xt' 'ym' 'ys' 'yz' 2016-05-07 05:21:22.326724 +131 'a2' 'bx' 'e4' 'em' 'eu' 'ey' 'gd' 'jj' 'k1' 'lm' 'my' 'oz' 'p8' 'pa' 'pj' 'qj' 'qp' 'qy' 'ri' 'rw' 'sj' 'sw' 'tv' 'uj' 'uk' 'um' 'ux' 'vs' 'wd' 'wf' 'xj' 'xs' 'xx' 'xy' 'yd' 2016-05-07 06:21:22.326724 +132 'az' 'bl' 'bo' 'cf' 'gt' 'h0' 'hx' 'iq' 'k2' 'kb' 'oc' 'qg' 'qn' 'qz' 're' 'rl' 'rv' 'xp' 'y8' 'yf' 2016-05-07 07:21:22.326724 +133 'ab' 'ae' 'al' 'am' 'an' 'ap' 'be' 'd2' 'eb' 'ec' 'ep' 'eq' 'fn' 'hp' 'hx' 'i9' 'ie' 'jn' 'kh' 'kv' 'mi' 'mq' 'mv' 'ox' 'oz' 'pn' 'qk' 'qo' 'qr' 'r8' 're' 'rf' 'rp' 't8' 'uj' 'uk' 'up' 'ur' 'vo' 'w7' 'wf' 'wp' 'ws' 'wt' 'ww' 'wx' 'xw' 'yd' 'yj' 'yn' 'yv' 'zr' 2016-05-07 08:21:22.326724 +134 'af' 'ag' 'ak' 'bb' 'cr' 'cy' 'd7' 'db' 'df' 'di' 'eh' 'ew' 'fl' 'fr' 'gd' 'gp' 'hf' 'hk' 'hu' 'ib' 'ik' 'io' 'ix' 'jg' 'k9' 'kc' 'ke' 'm4' 'ma' 'mx' 'mz' 'ob' 'oi' 'oo' 'or' 'ox' 'pg' 'pl' 'pu' 'qc' 'qe' 'qg' 'rl' 'ro' 'rv' 'rz' 'sj' 'sq' 'u4' 've' 'we' 'wi' 'wk' 'wy' 'xd' 'yh' 'yq' 'yv' 'yx' 2016-05-07 09:21:22.326724 +135 '18' 'at' 'c6' 'ca' 'dj' 'dl' 'dx' 'ec' 'ek' 'f4' 'fo' 'fs' 'fz' 'h8' 'hn' 'ik' 'il' 'j4' 'jn' 'ld' 'ln' 'ls' 'lx' 'nu' 'o6' 'os' 'ot' 'ox' 'pa' 'pe' 'pp' 'pw' 'q1' 'qc' 'qd' 'qh' 'qk' 'qq' 'qr' 'rq' 'rr' 'rt' 'rv' 'rz' 's5' 'sd' 'sh' 'sy' 't3' 'tu' 'ty' 'uj' 'uo' 'up' 'uu' 've' 'vl' 'vu' 'wa' 'wd' 'wo' 'wr' 'ws' 'ww' 'xk' 'y0' 'y6' 'yi' 'yq' 'yy' 'z3' 2016-05-07 10:21:22.326724 +136 'b6' 'bq' 'dv' 'e1' 'ez' 'f5' 'fh' 'ik' 'iy' 'jy' 'li' 'm2' 'qe' 'rp' 'te' 'u4' 'u8' 'uo' 'w3' 'w8' 'we' 'wo' 'wu' 'x5' 'yl' 2016-05-07 11:21:22.326724 +137 'ak' 'ar' 'dg' 'ds' 'ep' 'fv' 'ge' 'jd' 'no' 'on' 'q5' 'qd' 'qo' 'qv' 'qx' 'r7' 'ra' 'ru' 'sa' 'ud' 'uo' 'wl' 'ye' 'yl' 2016-05-07 12:21:22.326724 +138 '0w' 'bu' 'df' 'dj' 'du' 'dy' 'dz' 'gj' 'ho' 'je' 'kw' 'm1' 'o3' 'oo' 'pf' 'qh' 'qn' 'qu' 'rf' 'rj' 'ro' 'rv' 'ss' 't4' 'tc' 'tf' 'um' 'uo' 'v6' 'v8' 'wn' 'wo' 'xh' 'y0' 'yz' 'zr' 2016-05-07 13:21:22.326724 +139 '4t' 'ay' 'b3' 'dz' 'eo' 'ep' 'fd' 'fh' 'ht' 'hw' 'i3' 'jl' 'kn' 'op' 'qb' 'qd' 'ql' 'qm' 'qp' 'qy' 'rc' 'rh' 'rs' 'rw' 'sm' 't9' 'td' 'tu' 'tw' 'tz' 'u1' 'ug' 'uh' 'ur' 'vt' 'we' 'wi' 'wn' 'x7' 'y8' 'yn' 'yy' 2016-05-07 14:21:22.326724 +140 '4a' '56' 'ae' 'ao' 'ay' 'bn' 'bx' 'c9' 'cl' 'd0' 'd7' 'en' 'et' 'fj' 'fk' 'fu' 'g4' 'ha' 'ht' 'hv' 'ih' 'ij' 'ir' 'iw' 'k1' 'k7' 'kq' 'kw' 'lg' 'm5' 'me' 'nw' 'oj' 'ou' 'p6' 'pl' 'pq' 'pt' 'qc' 'qg' 'qk' 'ql' 'qm' 'qo' 'qq' 'rj' 's1' 'sd' 'sq' 'sz' 'tb' 'td' 'tr' 'tw' 'ug' 'uj' 'uo' 'ut' 'uy' 'wj' 'wk' 'ws' 'wz' 'x2' 'xx' 'xy' 'y1' 'y4' 'yc' 'yk' 'yt' 'yx' 'zp' 2016-05-07 15:21:22.326724 +141 '3q' 'an' 'cp' 'ej' 'fx' 'gj' 'i3' 'ib' 'ik' 'jt' 'k5' 'n9' 'no' 'os' 'pg' 'qo' 'qp' 'qx' 'rb' 'rk' 'su' 'tk' 'tv' 'u1' 'up' 'w1' 'we' 'wj' 'wr' 'yf' 'yq' 'z5' 'zd' 2016-05-07 16:21:22.326724 +142 '3b' 'am' 'aq' 'ar' 'bm' 'de' 'e7' 'ed' 'eh' 'es' 'ey' 'fx' 'g5' 'gf' 'gs' 'hl' 'i7' 'iy' 'jn' 'k9' 'kf' 'km' 'll' 'ly' 'm0' 'm5' 'mh' 'nm' 'nq' 'of' 'or' 'p5' 'pn' 'pz' 'qp' 'qt' 'qv' 'r0' 'rb' 'sg' 'sv' 'uv' 'w3' 'w8' 'w9' 'wa' 'we' 'wj' 'wn' 'wr' 'yd' 'yk' 'zq' 2016-05-07 17:21:22.326724 +143 'cm' 'd2' 'en' 'ex' 'f4' 'f9' 'fp' 'ga' 'gk' 'hy' 'i3' 'j4' 'j9' 'jb' 'kj' 'ls' 'lw' 'mb' 'mq' 'o9' 'qg' 'qi' 'qm' 'qo' 'qw' 'qy' 'rc' 'rf' 'rj' 'rm' 's8' 'tb' 'tl' 'tp' 'um' 'vg' 'vn' 'wb' 'wf' 'wl' 'xw' 'yl' 'zf' 'zt' 2016-05-07 18:21:22.326724 +144 'aa' 'ap' 'b0' 'c6' 'e1' 'ex' 'fe' 'fu' 'fz' 'g1' 'g8' 'go' 'hl' 'iz' 'k3' 'ls' 'mw' 'mz' 'oa' 'ow' 'p6' 'q5' 'qh' 'qq' 'qx' 'ra' 'rh' 'rp' 'rw' 'tf' 'tk' 'tl' 'uc' 'wi' 'x1' 'xb' 'yb' 'yw' 'yz' 'zm' 2016-05-07 19:21:22.326724 +145 'gs' 'i6' 'i9' 'j2' 'l0' 'oq' 'qx' 'sc' 'xe' 'yu' 2016-05-07 20:21:22.326724 +146 'bt' 'dg' 'dz' 'e4' 'ek' 'ep' 'fi' 'gk' 'gl' 'gu' 'hl' 'ho' 'io' 'ls' 'ni' 'nw' 'pv' 'px' 'qe' 'qf' 'qj' 'ql' 'qu' 'qy' 'rw' 'rx' 'ss' 't7' 'tt' 'ty' 'ud' 'up' 'uq' 'uw' 'wn' 'wy' 'xt' 'yv' 'yx' 'za' 'zv' 'zw' 2016-05-07 21:21:22.326724 +147 '1w' '2r' '9e' '9r' 'ci' 'es' 'f1' 'f4' 'fb' 'fx' 'g6' 'ga' 'gk' 'hj' 'hm' 'hu' 'i5' 'ii' 'jg' 'ji' 'ko' 'ku' 'l2' 'ld' 'lx' 'mt' 'n1' 'no' 'nw' 'o7' 'oi' 'pk' 'qy' 'r9' 'rj' 'rr' 'rt' 's6' 'sd' 't0' 'tc' 'to' 'tp' 'tv' 'ud' 'ux' 'vk' 'wr' 'wy' 'xb' 'xr' 'xt' 'yc' 'yp' 'zy' 2016-05-07 22:21:22.326724 +148 '3k' 'di' 'dp' 'em' 'ew' 'f5' 'hb' 'hn' 'j3' 'k0' 'lk' 'm4' 'mq' 'pr' 'qe' 'qo' 'qy' 'rc' 'ri' 'rt' 'so' 'ts' 've' 'w4' 'w7' 'wl' 'wn' 'wy' 'xf' 'y6' 'yt' 2016-05-07 23:21:22.326724 +149 '7n' 'ai' 'aj' 'ap' 'b9' 'bx' 'cu' 'dn' 'e1' 'e2' 'ed' 'eg' 'eo' 'hu' 'ie' 'ln' 'lq' 'nl' 'oa' 'qa' 'qe' 'qo' 'r5' 'ra' 'ri' 'rm' 'rw' 'ss' 'sx' 't3' 'tz' 'ut' 'uz' 'wd' 'we' 'wn' 'wq' 'wr' 'ws' 2016-05-08 00:21:22.326724 +150 'a0' 'ad' 'ak' 'ao' 'au' 'b0' 'dg' 'dt' 'e9' 'em' 'fg' 'fk' 'fn' 'gq' 'h7' 'hf' 'hm' 'ia' 'id' 'ig' 'iv' 'kv' 'l1' 'lm' 'lz' 'm1' 'mw' 'mz' 'na' 'nh' 'nl' 'nn' 'o1' 'o3' 'om' 'p2' 'p9' 'pj' 'pw' 'pz' 'qk' 'qm' 'qy' 'rs' 'ru' 'ry' 'sf' 'su' 'sw' 'sx' 'td' 'tl' 'tp' 'tw' 'u1' 'ug' 'un' 'uo' 'uq' 'ur' 'v5' 'vk' 'wd' 'wf' 'wj' 'wq' 'wy' 'xg' 'xk' 'xn' 'xq' 'yb' 'z0' 'zt' 'zu' 2016-05-08 01:21:22.326724 +151 '6i' 'a3' 'ac' 'ax' 'br' 'ck' 'dc' 'dj' 'dn' 'dq' 'dt' 'dv' 'eb' 'ed' 'ei' 'en' 'eq' 'ey' 'f4' 'fk' 'fm' 'fp' 'fw' 'gn' 'gw' 'he' 'ho' 'io' 'ja' 'jj' 'jn' 'ju' 'jy' 'kc' 'kk' 'l1' 'l4' 'lt' 'lx' 'm1' 'm7' 'mc' 'o6' 'ol' 'p3' 'pc' 'pe' 'pp' 'pv' 'px' 'q0' 'qb' 'qe' 'qr' 'qs' 'qt' 'qu' 'qv' 'qy' 'rg' 'rq' 's3' 'sm' 'so' 't5' 't9' 'tb' 'tp' 'ty' 'tz' 'u6' 'ua' 'ui' 'uv' 'vm' 'vw' 'w2' 'wa' 'wf' 'wi' 'wj' 'wt' 'ww' 'wz' 'xg' 'y1' 'ya' 'yb' 'yd' 'yg' 'yh' 'yi' 'yk' 'yp' 'yz' 'zb' 'zk' 'zm' 'zo' 2016-05-08 02:21:22.326724 +152 '1r' '2z' '3d' '6d' 'ay' 'bb' 'bh' 'bq' 'bv' 'cn' 'cr' 'cs' 'd5' 'd9' 'dh' 'dq' 'dw' 'dz' 'e5' 'ea' 'eb' 'eh' 'er' 'fu' 'g0' 'ga' 'gc' 'h4' 'hf' 'hh' 'hx' 'ih' 'io' 'jz' 'kk' 'ko' 'li' 'lz' 'n1' 'n4' 'nn' 'nt' 'o8' 'oa' 'oe' 'oh' 'ov' 'oz' 'p2' 'p5' 'pg' 'pt' 'px' 'qa' 'qe' 'qg' 'qm' 'qt' 'qv' 'qx' 'ro' 'rp' 'rv' 's6' 'sh' 'sn' 'sx' 'sz' 't6' 'th' 'tj' 'tu' 'u5' 'uk' 'uq' 'wg' 'wp' 'wv' 'xe' 'xv' 'y4' 'yf' 'yo' 'yx' 'zr' 2016-05-08 03:21:22.326724 +153 '2s' 'a7' 'ae' 'am' 'bx' 'd5' 'de' 'do' 'ds' 'dt' 'e1' 'e3' 'e7' 'ed' 'ee' 'eg' 'ek' 'em' 'es' 'ev' 'fi' 'fw' 'g2' 'gf' 'gs' 'gu' 'gy' 'h7' 'hh' 'hi' 'hm' 'hu' 'ih' 'in' 'io' 'jr' 'js' 'jw' 'k0' 'k5' 'kb' 'l4' 'lu' 'm6' 'm8' 'mc' 'nb' 'od' 'ox' 'pc' 'pg' 'pv' 'py' 'q5' 'qc' 'qe' 'qf' 'qh' 'ql' 'qm' 'qo' 'qq' 'ra' 'rk' 'ro' 'rp' 'rr' 'ru' 'rw' 'se' 'sh' 't9' 'to' 'tq' 'tt' 'tw' 'u1' 'u4' 'ui' 'un' 'uq' 'vr' 'vy' 'vz' 'wh' 'wp' 'ws' 'wt' 'wy' 'xt' 'y8' 'yd' 'zp' 2016-05-08 04:21:22.326724 +154 '1f' '2u' 'ap' 'as' 'ax' 'bb' 'bf' 'cg' 'di' 'du' 'g3' 'g7' 'gk' 'ha' 'hv' 'hw' 'i6' 'jw' 'kg' 'ko' 'lu' 'ob' 'pn' 'qb' 'qg' 'qi' 'qs' 'qw' 'r3' 'r7' 'r9' 'rc' 're' 'rf' 'rn' 'ru' 'rz' 'sh' 'sq' 't7' 'ta' 'td' 'u2' 'ua' 'uy' 'v8' 'va' 'wk' 'wn' 'wu' 'wy' 'xr' 'yi' 'yt' 'za' 2016-05-08 05:21:22.326724 +155 '3w' 'aa' 'ad' 'cj' 'cu' 'cv' 'dv' 'ei' 'ej' 'ep' 'er' 'fc' 'fd' 'g5' 'ga' 'go' 'gr' 'gs' 'hq' 'hx' 'hy' 'i1' 'i5' 'ie' 'ip' 'jh' 'jn' 'jq' 'k7' 'ks' 'l0' 'l6' 'l9' 'lm' 'm5' 'mj' 'ng' 'nh' 'o8' 'od' 'on' 'pq' 'q3' 'q8' 'qb' 'qc' 'qf' 'qp' 'qs' 'r1' 'ra' 'rj' 'rw' 'si' 'sp' 't6' 'ta' 'tc' 'tk' 'tm' 'ty' 'ua' 'ud' 'ug' 'v7' 'vg' 'vm' 'w3' 'wc' 'wi' 'wp' 'wy' 'xk' 'xv' 'yi' 'ym' 'ys' 'zl' 2016-05-08 06:21:22.326724 +156 '2y' 'a3' 'ac' 'ae' 'ar' 'dp' 'ec' 'f8' 'f9' 'ga' 'gq' 'gs' 'hm' 'ib' 'if' 'im' 'j0' 'na' 'pb' 'pf' 'q9' 'qa' 'qu' 'qw' 'ra' 'rj' 'rm' 'rr' 'su' 'tj' 'vw' 'wb' 'wf' 'wk' 'wo' 'ym' 'yu' 2016-05-08 07:21:22.326724 +157 'an' 'cp' 'dq' 'ej' 'ez' 'lj' 'ln' 'nu' 'qy' 'sm' 't8' 'td' 'tg' 'us' 'uw' 'vn' 'y4' 'z9' 2016-05-08 08:21:22.326724 +158 '1r' '2q' 'aa' 'ae' 'ah' 'aq' 'at' 'bx' 'c6' 'ca' 'cl' 'da' 'dp' 'ds' 'e0' 'e6' 'ea' 'eb' 'eg' 'er' 'ev' 'ew' 'fs' 'fw' 'gm' 'gx' 'hl' 'hv' 'hw' 'ib' 'iw' 'jc' 'jn' 'jr' 'ju' 'k0' 'k5' 'k8' 'kf' 'kn' 'l1' 'ln' 'lp' 'ly' 'mz' 'n3' 'nc' 'ng' 'nk' 'o8' 'oe' 'oi' 'ol' 'ot' 'pb' 'pe' 'pk' 'q0' 'qa' 'qe' 'qg' 'qi' 'ql' 'qo' 'qv' 'r0' 'r2' 'r4' 'rg' 'rj' 'rm' 'sv' 'tk' 'to' 'tt' 'u7' 'ud' 'uo' 'ur' 'us' 'uv' 'vf' 'vm' 'vn' 'w6' 'w7' 'wa' 'wb' 'wf' 'wg' 'wi' 'wv' 'ww' 'wx' 'xk' 'xy' 'yg' 'yh' 'ym' 2016-05-08 09:21:22.326724 +159 'd6' 'do' 'e4' 'eg' 'hm' 'i3' 'kg' 'nz' 'ow' 'pc' 'pv' 'q0' 'q4' 'q6' 'qx' 'r0' 'ri' 'sm' 'sn' 'tw' 'u9' 'ul' 'up' 'vk' 'we' 'wm' 'zv' 2016-05-08 10:21:22.326724 +160 'ai' 'cg' 'cs' 'dc' 'dg' 'di' 'dj' 'dk' 'dp' 'du' 'eb' 'ec' 'ee' 'ei' 'ek' 'eu' 'f2' 'fh' 'fm' 'fy' 'hc' 'hm' 'i2' 'ia' 'jj' 'ke' 'kl' 'lb' 'lc' 'ln' 'me' 'nc' 'nf' 'o6' 'oc' 'ok' 'os' 'pc' 'po' 'px' 'q2' 'q8' 'qa' 'qb' 'qd' 'qe' 'qg' 'qi' 'qo' 'qs' 'qt' 'qv' 'qx' 'r4' 'r9' 'ri' 'rn' 'rq' 'rr' 'rz' 's9' 'sf' 'sr' 'su' 'sw' 'sy' 'sz' 't2' 'ti' 'tv' 'ud' 'uv' 'wa' 'wc' 'wi' 'wk' 'wm' 'ws' 'wu' 'wv' 'wz' 'x2' 'xa' 'xf' 'y8' 'yb' 'yd' 'yl' 'yp' 'yr' 'ys' 'z5' 'zh' 2016-05-08 11:21:22.326724 +161 '1i' 'db' 'e4' 'er' 'fh' 'ft' 'hm' 'lg' 'll' 'of' 'og' 'q6' 'qj' 'r0' 're' 'th' 'up' 'ut' 'uw' 'vf' 'wq' 'ws' 'wx' 'zi' 2016-05-08 12:21:22.326724 +162 '2k' '2v' '4h' '7c' 'a3' 'aa' 'al' 'an' 'aq' 'aw' 'b7' 'bb' 'bl' 'bn' 'by' 'ca' 'cf' 'cu' 'dd' 'dn' 'e6' 'ej' 'en' 'et' 'eu' 'ey' 'g8' 'gg' 'go' 'gw' 'hj' 'ho' 'i3' 'ii' 'iy' 'ke' 'kh' 'le' 'lf' 'lu' 'ml' 'ny' 'oa' 'oc' 'og' 'oh' 'os' 'pf' 'pl' 'qb' 'qd' 'qf' 'qj' 'qn' 'qu' 'qw' 'r8' 'rd' 're' 'rh' 'sc' 'sn' 'ss' 't0' 't7' 'tl' 'tt' 'tx' 'tz' 'u9' 'uc' 'ud' 'ul' 'us' 'ux' 'vb' 'vf' 'vg' 'vp' 'w5' 'w6' 'wk' 'wt' 'wy' 'xd' 'xk' 'xm' 'xn' 'xr' 'xx' 'y0' 'yf' 'yl' 'yo' 'yw' 'zv' 2016-05-08 13:21:22.326724 +163 '3z' 'ah' 'ai' 'aq' 'bn' 'ck' 'cm' 'cq' 'ct' 'cy' 'd1' 'db' 'dn' 'e2' 'e7' 'ed' 'ee' 'eh' 'eq' 'er' 'ew' 'fc' 'fn' 'fp' 'fs' 'fw' 'fx' 'gl' 'gp' 'gw' 'ha' 'hq' 'hw' 'hx' 'i2' 'ir' 'j9' 'jb' 'jn' 'jr' 'jx' 'jy' 'kk' 'kl' 'kx' 'kz' 'li' 'm2' 'me' 'mj' 'mr' 'na' 'nu' 'oe' 'ol' 'on' 'po' 'pv' 'q7' 'qa' 'qb' 'qd' 'qh' 'qp' 'qv' 'rh' 'rz' 'sf' 'sp' 't6' 'tl' 'to' 'tx' 'ty' 'ue' 'uf' 'un' 'uu' 'uw' 'vb' 'vq' 'wg' 'wp' 'wt' 'wu' 'xb' 'xp' 'y1' 'y9' 'yi' 'yj' 'ys' 'yu' 'z7' 'zf' 'zt' 2016-05-08 14:21:22.326724 +164 'aa' 'ch' 'cm' 'db' 'dd' 'e7' 'eu' 'f5' 'fr' 'gg' 'ie' 'jc' 'kt' 'na' 'on' 'or' 'po' 'q0' 're' 's3' 'tb' 'tc' 'td' 'tt' 'tv' 'uf' 'wi' 'wq' 'wt' 'y8' 'ys' 2016-05-08 15:21:22.326724 +165 'a5' 'ab' 'ad' 'ao' 'b6' 'ea' 'es' 'ez' 'gm' 'gv' 'i0' 'in' 'jw' 'lh' 'lm' 'my' 'oj' 'ox' 'oy' 'pv' 'q1' 'qf' 'qh' 'qj' 'qo' 'qs' 'qt' 'qw' 'r0' 'ri' 'rk' 'ry' 's4' 'vs' 'wa' 'xy' 'y5' 'yg' 'yl' 'zm' 2016-05-08 16:21:22.326724 +166 'ab' 'an' 'bm' 'bn' 'bp' 'ca' 'd9' 'dc' 'e0' 'e4' 'e5' 'eh' 'er' 'fe' 'fk' 'fv' 'ga' 'ge' 'hy' 'ic' 'ie' 'io' 'ja' 'jb' 'je' 'kp' 'ks' 'ln' 'md' 'ng' 'nr' 'oj' 'oy' 'p5' 'p6' 'p7' 'pe' 'pg' 'pu' 'qa' 'qq' 'qv' 'qw' 'qy' 'rg' 'rj' 'rk' 'sk' 'tf' 'tw' 'ui' 'um' 'uu' 'v9' 'vu' 'wo' 'wp' 'wx' 'wy' 'xf' 'y0' 'yp' 'z6' 'zi' 2016-05-08 17:21:22.326724 +167 '5i' 'a4' 'a8' 'ae' 'af' 'am' 'ap' 'ax' 'bf' 'cr' 'cw' 'dk' 'dz' 'ea' 'eb' 'ec' 'ef' 'ep' 'er' 'et' 'ex' 'fp' 'fv' 'gh' 'hy' 'hz' 'i8' 'ig' 'io' 'iq' 'ji' 'jw' 'kt' 'le' 'lo' 'mi' 'nb' 'nq' 'o1' 'ol' 'oo' 'oq' 'oy' 'q3' 'q8' 'qi' 'qk' 'qo' 'qq' 'qs' 'r0' 'rg' 'rl' 'rt' 's6' 'sb' 'sk' 'sq' 't9' 'ta' 'tb' 'tg' 'tj' 'tu' 'ty' 'un' 'ur' 'uw' 'vi' 'wf' 'wk' 'wo' 'wt' 'wy' 'xj' 'xq' 'y0' 'y1' 'y9' 'yc' 'yn' 'yu' 'zs' 2016-05-08 18:21:22.326724 +168 '4m' 'a3' 'ac' 'ag' 'ai' 'au' 'av' 'bd' 'bu' 'cl' 'd0' 'dc' 'dy' 'ej' 'el' 'eo' 'es' 'eu' 'ex' 'fr' 'fx' 'g6' 'g9' 'gk' 'gq' 'gr' 'gw' 'gx' 'gz' 'hv' 'ii' 'ix' 'jj' 'jy' 'k0' 'kl' 'kn' 'kw' 'l5' 'lo' 'lz' 'm1' 'n5' 'n9' 'nf' 'nm' 'nz' 'ol' 'os' 'ot' 'oy' 'p1' 'p9' 'pa' 'pj' 'pl' 'pp' 'qc' 'qh' 'qj' 'qr' 'qs' 'qw' 'qz' 'rg' 'ri' 'rl' 'rn' 'rp' 'rt' 'rx' 'rz' 'se' 'sl' 'sv' 'sy' 't8' 'td' 'th' 'tr' 'tx' 'u3' 'u7' 'uc' 'ud' 'ul' 'un' 'up' 'uv' 've' 'vz' 'w7' 'wr' 'y0' 'y6' 'yh' 'yo' 'yu' 'yy' 'yz' 'z6' 'zm' 2016-05-08 19:21:22.326724 +169 'cb' 'ib' 'ih' 'jx' 'oy' 'ph' 'tl' 'ty' 'vb' 'vu' 'wu' 2016-05-08 20:21:22.326724 +170 'ae' 'ak' 'ax' 'cm' 'cw' 'cx' 'df' 'dm' 'dw' 'eb' 'ef' 'ei' 'el' 'f1' 'fn' 'gu' 'hf' 'hk' 'ik' 'iw' 'jd' 'ju' 'jz' 'k7' 'ku' 'lq' 'mf' 'mw' 'oa' 'od' 'oe' 'ou' 'ow' 'pc' 'ph' 'pw' 'q0' 'q2' 'qd' 'qq' 'qw' 'qx' 'rj' 'rk' 'rn' 'rx' 's2' 'sm' 'sx' 't9' 'ti' 'tu' 'tw' 'u3' 'ud' 'wf' 'wm' 'wt' 'xl' 'xo' 'xs' 'yh' 'yi' 'ym' 'yr' 'yu' 'yw' 'z2' 'za' 'zf' 'zl' 'zz' 2016-05-08 21:21:22.326724 +171 'a0' 'a6' 'aa' 'bk' 'dk' 'dv' 'e8' 'ff' 'fo' 'go' 'hh' 'hl' 'hu' 'if' 'jx' 'kc' 'kq' 'lr' 'lx' 'n6' 'ni' 'ob' 'ol' 'ql' 'rf' 'ry' 'sm' 'sn' 't7' 'tu' 'tz' 'uc' 'um' 'uv' 'wf' 'wr' 'ws' 'wu' 'ww' 'xi' 'ym' 'z7' 'zo' 'zv' 'zz' 2016-05-08 22:21:22.326724 +172 'ec' 'fd' 'gm' 'it' 'iu' 'kx' 'l1' 'pi' 'q1' 'qe' 'qs' 'ra' 'ri' 'rp' 'tn' 'to' 'vx' 'wh' 'wl' 2016-05-08 23:21:22.326724 +173 '6y' 'aw' 'az' 'cn' 'cs' 'cx' 'd4' 'di' 'dl' 'e7' 'eg' 'eh' 'eo' 'ep' 'et' 'ew' 'ex' 'ey' 'fv' 'fz' 'gu' 'gv' 'hg' 'hl' 'hm' 'iv' 'jc' 'lb' 'me' 'nc' 'nz' 'o2' 'ox' 'pn' 'po' 'pw' 'q4' 'q5' 'qg' 'qq' 'qv' 'qz' 'r7' 're' 'rl' 'rq' 'rs' 'rx' 'sa' 'se' 'ti' 'tj' 'tn' 'tz' 'u6' 'uq' 'ur' 'us' 'v9' 'we' 'wn' 'wp' 'wr' 'ws' 'xo' 'ya' 'ye' 'yg' 'yx' 'zj' 'zl' 2016-05-09 00:21:22.326724 +174 '12' 'ao' 'ed' 'ek' 'ew' 'ey' 'fm' 'gr' 'hc' 'ht' 'io' 'ir' 'jb' 'jw' 'ke' 'ld' 'qj' 'se' 'tm' 'tn' 'tw' 'wv' 'y5' 'yt' 'z6' 2016-05-09 01:21:22.326724 +175 '6f' 'a1' 'ag' 'ak' 'ap' 'au' 'b1' 'b5' 'bi' 'c1' 'cu' 'd5' 'dc' 'dr' 'dv' 'eg' 'ej' 'ek' 'em' 'et' 'fe' 'fr' 'fz' 'ga' 'gb' 'gk' 'gu' 'gv' 'h5' 'hh' 'ho' 'hy' 'ii' 'ik' 'ip' 'iv' 'ja' 'jg' 'jz' 'k0' 'kt' 'm6' 'mj' 'nd' 'o0' 'o1' 'oj' 'or' 'ot' 'ov' 'oz' 'ph' 'pm' 'pv' 'qa' 'qe' 'qf' 'qg' 'qh' 'qj' 'qn' 'qt' 'qy' 'r5' 'rd' 'rg' 'rs' 'ru' 'rz' 's7' 'sf' 'si' 'sl' 'sw' 'sy' 't0' 't3' 'th' 'tn' 'tq' 'tu' 'ub' 'us' 'ux' 'vb' 'vo' 'wa' 'wd' 'wn' 'wq' 'wt' 'ww' 'wx' 'y1' 'yh' 'yo' 'yq' 'yz' 'z7' 'ze' 2016-05-09 02:21:22.326724 +176 '2p' '3w' 'ah' 'at' 'bg' 'bv' 'bz' 'cu' 'd8' 'dw' 'eb' 'ec' 'ei' 'er' 'es' 'fn' 'gl' 'hd' 'hx' 'ij' 'ip' 'ki' 'lh' 'mx' 'my' 'nq' 'o3' 'o7' 'ok' 'or' 'os' 'pd' 'pu' 'q5' 'qa' 'qc' 'qd' 'qg' 'qh' 'ql' 'qs' 'qt' 'qu' 'qx' 'qy' 'rb' 'rd' 'rk' 'rs' 'ru' 'rv' 'rw' 'sn' 'tm' 'vw' 'w9' 'wa' 'wb' 'wm' 'ws' 'xe' 'xo' 'xu' 'y6' 'yh' 'yj' 'z4' 'zg' 2016-05-09 03:21:22.326724 +177 '2y' '4v' 'ac' 'at' 'av' 'bd' 'bs' 'c3' 'ca' 'cf' 'cg' 'cq' 'cw' 'cz' 'db' 'dx' 'e7' 'eg' 'ei' 'el' 'et' 'ey' 'fo' 'fq' 'fw' 'fx' 'gd' 'gl' 'gw' 'h6' 'hd' 'hp' 'hy' 'il' 'ir' 'is' 'j5' 'k4' 'k8' 'kc' 'kp' 'kz' 'l0' 'l1' 'm4' 'm6' 'mn' 'mr' 'nx' 'ov' 'ox' 'pn' 'pq' 'q3' 'qd' 'qj' 'qk' 'qo' 'qw' 'qx' 'rd' 're' 'rg' 'rq' 'rr' 'rx' 'rz' 'sb' 't0' 't3' 't4' 'tc' 'tk' 'tp' 'tr' 'tv' 'u7' 'uf' 'um' 'uo' 'uq' 'va' 'vc' 'vi' 'vy' 'vz' 'w1' 'w6' 'wo' 'wq' 'xh' 'xn' 'y5' 'yo' 'ys' 'yt' 'zs' 'zu' 2016-05-09 04:21:22.326724 +178 '3i' 'a3' 'af' 'cc' 'cs' 'cv' 'd8' 'ei' 'ej' 'ep' 'et' 'ex' 'fb' 'fe' 'fx' 'hc' 'ij' 'jf' 'jp' 'jr' 'js' 'kc' 'km' 'kz' 'lc' 'm7' 'nv' 'ob' 'oc' 'pt' 'q5' 'q7' 'q8' 'qc' 'qm' 'qn' 'qs' 'qy' 'ra' 're' 'rk' 'rx' 's0' 's4' 'se' 'sh' 'sm' 't7' 'tl' 'to' 'tz' 'ue' 'um' 'w3' 'wk' 'wp' 'ya' 'yl' 'yn' 'ys' 2016-05-09 05:21:22.326724 +179 'ah' 'c3' 'd1' 'dh' 'dz' 'ei' 'em' 'ex' 'fe' 'fk' 'g7' 'gz' 'hi' 'hx' 'i3' 'iu' 'j1' 'jm' 'k8' 'kb' 'ku' 'lf' 'lv' 'lz' 'm3' 'mn' 'my' 'nc' 'oj' 'pk' 'qh' 'qn' 'qo' 'qq' 'rf' 'ru' 'ry' 's9' 'sa' 'sc' 'sd' 'se' 'sz' 'td' 'tg' 'tq' 'tx' 'tz' 'vr' 'vu' 'w3' 'wd' 'wl' 'wp' 'x9' 'yb' 'yd' 'yr' 'yt' 2016-05-09 06:21:22.326724 +180 '6t' 'a9' 'db' 'ea' 'ec' 'ez' 'fq' 'gj' 'hb' 'hs' 'lc' 'or' 'p4' 'ph' 'pp' 'qr' 'qx' 'rc' 'rl' 'tn' 'u5' 'w9' 'x1' 2016-05-09 07:21:22.326724 +181 '5l' 'a2' 'ak' 'au' 'cj' 'cx' 'ed' 'eg' 'ej' 'em' 'es' 'ex' 'fs' 'ft' 'hm' 'i9' 'ky' 'l3' 'lz' 'mi' 'o2' 'oc' 'oe' 'ok' 'p2' 'q1' 'q5' 'qo' 'qv' 'qw' 'rf' 'rm' 'rv' 'sc' 'si' 'tx' 'uq' 'v3' 'v4' 'vo' 'w4' 'wf' 'wl' 'wq' 'wt' 'xj' 'yr' 2016-05-09 08:21:22.326724 +182 'a3' 'ag' 'ar' 'au' 'ax' 'be' 'cy' 'd7' 'dd' 'do' 'e3' 'eb' 'eo' 'er' 'ev' 'ey' 'fp' 'gj' 'hk' 'hw' 'hy' 'if' 'ig' 'ii' 'in' 'io' 'iq' 'is' 'kh' 'll' 'n8' 'nd' 'np' 'nz' 'og' 'ot' 'ox' 'oy' 'pe' 'px' 'qa' 'qb' 'qe' 'qf' 'qh' 'qj' 'qm' 'qn' 'qt' 'qu' 'qw' 'qx' 'r6' 'r9' 'rb' 'rc' 'rd' 'rn' 'rq' 'se' 't7' 'tb' 'ti' 'tq' 'u0' 'u6' 'ub' 'ud' 'vd' 'vj' 'vl' 'vo' 'wd' 'wg' 'wh' 'wi' 'wk' 'wo' 'wv' 'wz' 'xy' 'y9' 'yl' 'ym' 'yo' 'yp' 'ys' 'yu' 2016-05-09 09:21:22.326724 +183 'bs' 'cd' 'ci' 'cq' 'dd' 'el' 'ev' 'f4' 'f6' 'f7' 'fh' 'fu' 'hi' 'ib' 'kp' 'nn' 'nu' 'oc' 'os' 'qc' 'qj' 'qm' 'qn' 'qu' 'qw' 'qx' 'r7' 'rc' 'rv' 't1' 'tl' 'tr' 'u0' 'vi' 'wl' 'wp' 'wu' 'xc' 'y2' 'yu' 'zc' 'zi' 'zw' 'zy' 2016-05-09 10:21:22.326724 +184 'a7' 'bq' 'el' 'ey' 'fp' 'fu' 'fx' 'gr' 'hi' 'hl' 'jm' 'ki' 'kp' 'n6' 'o9' 'oj' 'op' 'ou' 'p7' 'pm' 'pp' 'q7' 'qa' 'qc' 'qj' 'qk' 'qm' 'si' 'st' 'tn' 'uc' 'w6' 'wp' 'xu' 'y5' 'yk' 'ys' 'yu' 'zs' 2016-05-09 11:21:22.326724 +185 'al' 'fh' 'fk' 'gy' 'he' 'ie' 'iz' 'lq' 'oh' 'pu' 'q7' 's6' 'sd' 'sr' 'sw' 'uu' 'uz' 'v6' 'ws' 'xo' 2016-05-09 12:21:22.326724 +186 'ch' 'e1' 'fi' 'g8' 'go' 'hf' 'i1' 'ic' 'in' 'it' 'j7' 'jk' 'jl' 'jv' 'nm' 'of' 'oz' 'r8' 'rc' 'rk' 'rp' 'rx' 'sp' 'tb' 'tv' 'tw' 'ul' 'wx' 'zj' 2016-05-09 13:21:22.326724 +187 'ab' 'ac' 'b9' 'br' 'c8' 'eh' 'en' 'eq' 'ev' 'ew' 'f6' 'gd' 'ik' 'j5' 'jm' 'kc' 'ke' 'ok' 'p6' 'pa' 'qv' 'ri' 'rm' 'ro' 'rp' 'rq' 'rx' 'rz' 'se' 'tb' 'td' 'ti' 'tj' 'tn' 'ul' 'wj' 'wp' 'ws' 'yh' 'zb' 'zc' 2016-05-09 14:21:22.326724 +188 'd6' 'e4' 'ev' 'fd' 'i3' 'if' 'j1' 'j5' 'o8' 'oj' 'ok' 'pw' 'qc' 'qd' 'qf' 'qt' 't8' 'tw' 'u1' 'u7' 'wr' 'wv' 'ww' 'yh' 'yn' 'yz' 2016-05-09 15:21:22.326724 +189 '2u' 'au' 'bz' 'cd' 'cj' 'cm' 'cq' 'ct' 'cw' 'd1' 'ds' 'dw' 'dz' 'ec' 'ei' 'eo' 'fk' 'fq' 'fx' 'g6' 'gl' 'gs' 'i5' 'if' 'im' 'iq' 'jd' 'k0' 'k5' 'kr' 'lv' 'lx' 'n5' 'na' 'ny' 'ob' 'ot' 'ox' 'pa' 'pi' 'ps' 'qa' 'qc' 'qg' 'qh' 'qj' 'ql' 'qm' 'qo' 'qr' 'qs' 'r2' 'rh' 'rl' 'rr' 'rw' 'rx' 's0' 's8' 'sb' 'sc' 'sg' 'si' 'sl' 'so' 'sv' 't7' 't9' 'tc' 'te' 'tl' 'tn' 'to' 'tr' 'tx' 'ty' 'un' 'uz' 'vg' 'vw' 'ws' 'wt' 'wu' 'xj' 'xy' 'y0' 'y1' 'ya' 'ye' 'yl' 'yn' 'yr' 'ys' 'yt' 'z3' 'z6' 'zw' 2016-05-09 16:21:22.326724 +190 'bg' 'eh' 'eq' 'gg' 'gh' 'gm' 'gx' 'i7' 'iv' 'm3' 'mv' 'n3' 'o6' 'ox' 'oz' 'pb' 'qk' 'rj' 'rs' 'sk' 'su' 'tg' 'uf' 'uj' 've' 'ww' 'yf' 2016-05-09 17:21:22.326724 +191 'a3' 'ai' 'ap' 'ar' 'at' 'be' 'br' 'bz' 'cw' 'd2' 'd6' 'df' 'dk' 'do' 'dp' 'dr' 'dw' 'e2' 'ef' 'eg' 'ej' 'eq' 'fb' 'fd' 'fq' 'ge' 'gr' 'h0' 'hp' 'i8' 'ih' 'ir' 'jb' 'jd' 'kj' 'kz' 'l4' 'lm' 'lt' 'mj' 'mz' 'nc' 'ni' 'nw' 'of' 'oh' 'ol' 'ot' 'ov' 'p5' 'pf' 'pn' 'pp' 'pv' 'py' 'q2' 'q3' 'q7' 'qb' 'qc' 'qj' 'qo' 'qp' 'qr' 'qs' 'r6' 'ra' 'rc' 'rd' 'rx' 'se' 'sn' 'sr' 'sx' 't2' 't9' 'tm' 'tr' 'uj' 'vh' 'vn' 'vs' 'w0' 'w2' 'w3' 'wd' 'we' 'wj' 'wq' 'wr' 'wv' 'xe' 'xo' 'ya' 'ye' 'yq' 'yt' 'yw' 2016-05-09 18:21:22.326724 +192 '97' 'ac' 'ah' 'bn' 'c8' 'dl' 'ds' 'dw' 'e2' 'eb' 'eg' 'ej' 'ep' 'eu' 'f1' 'fe' 'g9' 'gs' 'h0' 'ha' 'he' 'hh' 'hy' 'i0' 'i8' 'i9' 'ia' 'ij' 'ip' 'jr' 'jt' 'jw' 'kq' 'lo' 'lq' 'm7' 'me' 'na' 'ns' 'o1' 'od' 'oe' 'oi' 'om' 'oz' 'pc' 'pj' 'pk' 'qh' 'qi' 'qj' 'ql' 'qo' 'qq' 'qr' 'qs' 'qt' 'qw' 'qz' 'rc' 're' 'rh' 'ri' 'rr' 'sz' 'tc' 'tf' 'tk' 'tl' 'ue' 'uh' 'uj' 'un' 'uz' 'vq' 'vr' 'vv' 'w3' 'w4' 'we' 'wk' 'wt' 'wx' 'xa' 'xh' 'xl' 'yg' 'yi' 'yj' 'ym' 2016-05-09 19:21:22.326724 +193 'ah' 'ai' 'aj' 'cb' 'cg' 'dd' 'dl' 'dp' 'eb' 'ee' 'ei' 'es' 'ey' 'f5' 'fm' 'fr' 'g0' 'gj' 'gp' 'gq' 'gw' 'hc' 'hf' 'ig' 'ij' 'iu' 'kg' 'ld' 'mr' 'nh' 'o2' 'pp' 'pz' 'qk' 'qr' 'qv' 'qz' 'r4' 'rb' 'rd' 'ro' 'rt' 'rw' 'rz' 'sr' 'sw' 'sy' 't6' 't9' 'tb' 'ts' 'tt' 'uo' 'vg' 'wd' 'wj' 'wl' 'wo' 'wp' 'wq' 'wt' 'ww' 'x8' 'yw' 'yx' 'z3' 'z9' 'zi' 'zz' 2016-05-09 20:21:22.326724 +194 'a9' 'b8' 'de' 'dl' 'eg' 'fb' 'fi' 'fm' 'gj' 'hp' 'hx' 'if' 'ig' 'ii' 'ik' 'io' 'iw' 'ix' 'j1' 'j3' 'j8' 'ji' 'ki' 'kt' 'ld' 'm8' 'mi' 'mj' 'mx' 'mz' 'n2' 'nk' 'oe' 'ok' 'oq' 'os' 'ot' 'pj' 'q4' 'qe' 'ql' 'qp' 'qq' 'r4' 'rl' 'rq' 'rx' 'sf' 'ss' 'td' 'ti' 'tl' 'u4' 'u5' 'uq' 'ux' 'v4' 'vl' 'vs' 'wb' 'wc' 'wg' 'wj' 'wn' 'ww' 'x9' 'xa' 'ye' 'yg' 'yl' 'yn' 2016-05-09 21:21:22.326724 +195 'al' 'ar' 'ax' 'be' 'bu' 'dl' 'do' 'du' 'e5' 'es' 'ey' 'fb' 'fn' 'ga' 'he' 'hh' 'ho' 'i8' 'ic' 'in' 'iy' 'j1' 'j6' 'j9' 'jk' 'jm' 'ko' 'kv' 'kz' 'lk' 'ls' 'nb' 'oe' 'ou' 'pf' 'pj' 'pm' 'pu' 'py' 'q4' 'q7' 'qe' 'ql' 'qo' 'qr' 'qt' 'rh' 'rs' 'ry' 's3' 's6' 'sl' 'sq' 'tm' 'ud' 'uf' 'ur' 'vy' 'w5' 'w8' 'wp' 'wt' 'wx' 'xq' 'yc' 'yd' 'yi' 'yl' 'ym' 2016-05-09 22:21:22.326724 +196 'b0' 'b7' 'bh' 'bt' 'c0' 'cd' 'dv' 'e5' 'ed' 'ef' 'eq' 'ff' 'fj' 'fk' 'ga' 'gt' 'gx' 'hm' 'hx' 'i1' 'ih' 'il' 'j5' 'ja' 'jf' 'jp' 'jt' 'm2' 'nf' 'nt' 'of' 'oo' 'ou' 'p7' 'pa' 'pl' 'po' 'qa' 'qd' 'qr' 'qs' 'qt' 'qx' 'rb' 'rd' 're' 'rf' 'rv' 's0' 'sj' 'sw' 'tf' 'tu' 'tz' 'u2' 'ue' 'uq' 'ur' 'us' 'vw' 'w8' 'wc' 'wo' 'wt' 'x8' 'xo' 'yd' 'zi' 'zq' 2016-05-09 23:21:22.326724 +197 '2d' '2w' 'ag' 'aj' 'am' 'ce' 'd2' 'd4' 'dd' 'dn' 'e0' 'e6' 'ef' 'ej' 'ek' 'er' 'f9' 'fd' 'fe' 'fq' 'ft' 'fv' 'gq' 'gy' 'he' 'ij' 'iq' 'it' 'iy' 'jf' 'jl' 'jr' 'ju' 'kq' 'ku' 'lm' 'nj' 'nw' 'nx' 'oa' 'oe' 'oj' 'p2' 'pa' 'pm' 'pq' 'pv' 'q5' 'q9' 'qb' 'qd' 'qg' 'qj' 'qk' 'ql' 'qm' 'qu' 'qx' 'qz' 'rp' 'rq' 'rs' 's5' 'sk' 'ss' 'sy' 't2' 'ta' 'ts' 'tw' 'tx' 'u5' 'w3' 'w7' 'wa' 'wp' 'ws' 'wu' 'wy' 'wz' 'xz' 'y4' 'yn' 'yy' 'zt' 'zw' 2016-05-10 00:21:22.326724 +198 'ac' 'al' 'am' 'ap' 'ar' 'av' 'c1' 'cl' 'cz' 'dm' 'dp' 'dt' 'du' 'e0' 'es' 'eu' 'ev' 'ez' 'fa' 'fc' 'fj' 'fw' 'fz' 'gm' 'gx' 'ha' 'i6' 'i7' 'if' 'iz' 'j0' 'jd' 'jx' 'k0' 'kr' 'mj' 'mk' 'mp' 'n4' 'nm' 'om' 'ot' 'pl' 'px' 'qa' 'qb' 'qg' 'qk' 'qn' 'qo' 'qr' 'qu' 'r7' 'rc' 'rk' 's3' 's4' 'se' 'ss' 'su' 't8' 'ta' 'te' 'tj' 'tk' 'ts' 'tz' 'ub' 'v2' 'wb' 'wd' 'we' 'wf' 'wj' 'wq' 'wr' 'ws' 'wv' 'wx' 'y6' 'ym' 'yn' 'yq' 'ys' 'yv' 'zt' 2016-05-10 01:21:22.326724 +199 '2o' '4i' '93' 'a2' 'az' 'b5' 'bk' 'cd' 'do' 'ea' 'ed' 'ej' 'el' 'es' 'ez' 'fd' 'fk' 'fu' 'h7' 'hg' 'hr' 'hs' 'hv' 'iz' 'j0' 'j9' 'jb' 'jc' 'jt' 'ln' 'lu' 'lw' 'lz' 'mz' 'ns' 'nv' 'o8' 'oi' 'oq' 'ov' 'oz' 'pr' 'qi' 'qw' 'rb' 'rg' 'ro' 'rq' 'sb' 'sc' 't8' 't9' 'te' 'ti' 'tm' 'uo' 'uz' 'wd' 'wi' 'wn' 'wp' 'ww' 'wx' 'xt' 'xv' 'yb' 'yr' 'zs' 2016-05-10 02:21:22.326724 +200 '1p' 'a7' 'ae' 'b2' 'dd' 'ef' 'eh' 'el' 'em' 'eo' 'fc' 'g0' 'hf' 'hh' 'i8' 'i9' 'in' 'is' 'iy' 'j3' 'ja' 'jl' 'js' 'jt' 'kz' 'lk' 'ng' 'nu' 'o6' 'oc' 'oe' 'om' 'ox' 'pi' 'pp' 'pq' 'ps' 'qc' 'qe' 'qy' 'rh' 'rk' 'rn' 's3' 'sc' 'so' 'st' 'tc' 'tm' 'tp' 'tq' 'vh' 'wa' 'ya' 'zq' 2016-05-10 03:21:22.326724 +201 '4r' 'ae' 'd9' 'da' 'ef' 'em' 'en' 'gi' 'h8' 'jq' 'kf' 'kh' 'kr' 'kv' 'li' 'lq' 'lr' 'm2' 'ny' 'po' 'q0' 'q8' 'qf' 'qg' 'qu' 'qw' 'qx' 'rd' 'rh' 'ry' 'sm' 'st' 't3' 'tc' 'u5' 'un' 'uw' 'wg' 'wt' 'yt' 'z9' 'zc' 2016-05-10 04:21:22.326724 +202 'aa' 'ab' 'ag' 'ar' 'az' 'bp' 'bx' 'cx' 'eb' 'eh' 'ek' 'eo' 'eq' 'ez' 'fl' 'gm' 'gw' 'hd' 'ib' 'ig' 'jz' 'kg' 'lf' 'nw' 'oh' 'ok' 'om' 'oy' 'pg' 'ph' 'pi' 'qb' 'qi' 'qn' 'qs' 'qt' 'qy' 'rl' 'sy' 'tc' 'tt' 'up' 'ur' 'uy' 'w6' 'w8' 'wb' 'wh' 'wv' 'ww' 'y5' 'yi' 'yp' 2016-05-10 05:21:22.326724 +203 'a5' 'al' 'b6' 'bu' 'co' 'd0' 'd4' 'db' 'e0' 'e2' 'eb' 'ei' 'ej' 'el' 'et' 'ez' 'g7' 'gu' 'j6' 'jl' 'k3' 'kf' 'km' 'kw' 'kx' 'lp' 'me' 'mw' 'my' 'n2' 'n5' 'n7' 'nr' 'ny' 'oj' 'q8' 'qb' 'qg' 'qn' 'qq' 'qz' 'ra' 'rk' 'rl' 'rn' 's3' 's6' 't2' 't8' 'tk' 'tm' 'ul' 'uq' 'wg' 'y1' 'yc' 'yn' 'yq' 'yy' 'zr' 2016-05-10 06:21:22.326724 +204 'a2' 'a8' 'ae' 'b3' 'b5' 'bn' 'bq' 'd4' 'd7' 'dh' 'dl' 'dt' 'e6' 'es' 'fa' 'fb' 'ff' 'gd' 'ge' 'h8' 'hj' 'hm' 'hq' 'hw' 'hy' 'im' 'ip' 'kb' 'kh' 'ku' 'lm' 'ly' 'mf' 'nn' 'o0' 'oi' 'ok' 'or' 'pn' 'po' 'pq' 'pt' 'pv' 'q4' 'qc' 'qd' 'qu' 'qv' 'rd' 'rh' 'ri' 'rk' 'rp' 'rr' 'ru' 'si' 'sk' 'sq' 'ss' 'sx' 'sy' 'ta' 'tm' 'tq' 'tx' 'ul' 'um' 'uv' 'vj' 'vt' 'w7' 'wh' 'wi' 'wl' 'wn' 'wp' 'wq' 'ww' 'xb' 'y6' 'yq' 'z2' 'zn' 2016-05-10 07:21:22.326724 +205 '1o' '1v' '4v' 'ab' 'at' 'b5' 'bw' 'bz' 'cm' 'dc' 'dh' 'dj' 'dk' 'dq' 'e1' 'e5' 'ee' 'el' 'em' 'eo' 'ep' 'ew' 'fi' 'fy' 'gd' 'gh' 'gw' 'h7' 'i6' 'ia' 'ii' 'ip' 'is' 'iu' 'jk' 'k5' 'kb' 'mf' 'mp' 'mw' 'mx' 'n1' 'na' 'nm' 'nq' 'ob' 'of' 'pa' 'pb' 'pd' 'pu' 'q4' 'qb' 'qe' 'qf' 'qj' 'qo' 'qr' 're' 'rj' 'ro' 'rt' 'rv' 'sb' 'sp' 'st' 'ub' 'ue' 'um' 'vz' 'w3' 'w7' 'w8' 'wh' 'wo' 'wt' 'x5' 'x9' 'xd' 'xj' 'xm' 'ya' 'yb' 'yg' 'zr' 'zv' 2016-05-10 08:21:22.326724 +206 'ay' 'bg' 'bp' 'cm' 'eg' 'ev' 'ff' 'go' 'hc' 'hl' 'jj' 'l0' 'os' 'q9' 'qc' 'qh' 'qo' 'rt' 'rx' 'so' 'sr' 'to' 'tx' 'uk' 'wb' 'y4' 2016-05-10 09:21:22.326724 +207 'bj' 'dq' 'e2' 'ec' 'fs' 'g8' 'gd' 'iw' 'jt' 'nn' 'ns' 'o9' 'p0' 'tb' 'u7' 'uv' 'wf' 'wr' 'xf' 'z3' 2016-05-10 10:21:22.326724 +208 'ae' 'af' 'ay' 'da' 'dp' 'dq' 'e1' 'gi' 'gk' 'rl' 'sf' 'ta' 'td' 'tf' 'tt' 'tw' 2016-05-10 11:21:22.326724 +209 'ad' 'bs' 'dx' 'fi' 'i5' 'ia' 'j3' 'm5' 'mn' 'nr' 'ox' 'tg' 'un' 'vo' 'wj' 'wt' 'ys' 2016-05-10 12:21:22.326724 +210 'cx' 'dg' 'do' 'dt' 'e7' 'ek' 'ez' 'fn' 'hj' 'hv' 'i2' 'i6' 'is' 'it' 'ka' 'kh' 'l5' 'lg' 'p5' 'qa' 'qf' 'qj' 'qw' 'qz' 'rl' 'sl' 'sr' 't7' 'tf' 'tu' 'uc' 'ud' 'uv' 'vr' 'w0' 'we' 'wf' 'wj' 'ws' 'ww' 'xe' 'xh' 'xn' 'y9' 'yv' 2016-05-10 13:21:22.326724 +211 '1h' '3s' 'ab' 'ae' 'ax' 'b1' 'bz' 'cy' 'dk' 'dq' 'ds' 'du' 'e8' 'ef' 'ej' 'ek' 'ex' 'f1' 'fe' 'ff' 'fn' 'fo' 'ft' 'fx' 'ge' 'go' 'gz' 'h6' 'hz' 'i2' 'iv' 'iy' 'j5' 'j6' 'ke' 'kf' 'lh' 'lr' 'mc' 'mj' 'na' 'ng' 'oh' 'om' 'oy' 'p2' 'pi' 'pk' 'py' 'q3' 'qb' 'qc' 'qg' 'qn' 'qo' 'qq' 'qu' 'qw' 'qx' 'qy' 'qz' 'r1' 'rk' 'rl' 'rq' 'rs' 'rt' 'ry' 'rz' 'sk' 'sl' 'so' 't9' 'td' 'te' 'tn' 'tw' 'tz' 'ud' 'uk' 'uo' 'uq' 'uw' 'ux' 'uy' 'v1' 'vg' 'vq' 'w4' 'w9' 'wa' 'wg' 'wj' 'wm' 'wo' 'wr' 'ww' 'wy' 'xf' 'xg' 'y9' 'yh' 'yi' 'yk' 'ym' 'yq' 'yv' 'zm' 2016-05-10 14:21:22.326724 +212 'aa' 'cb' 'd2' 'dd' 'de' 'e4' 'gd' 'go' 'hc' 'ic' 'in' 'ip' 'js' 'lm' 'o1' 'o3' 'pl' 'ra' 'rx' 'sj' 'ti' 'tu' 'tv' 'wc' 'we' 'wl' 'wq' 'wx' 'xg' 'xi' 'yc' 'yi' 2016-05-10 15:21:22.326724 +213 '8j' 'ao' 'bc' 'bh' 'co' 'cz' 'di' 'dq' 'dr' 'e1' 'ec' 'ei' 'el' 'et' 'eu' 'ex' 'f8' 'g7' 'gl' 'hq' 'hw' 'ib' 'kh' 'kn' 'kt' 'lc' 'md' 'n7' 'oe' 'p0' 'pg' 'pl' 'pm' 'po' 'pr' 'q0' 'q6' 'ql' 'qv' 'qy' 'r7' 'ra' 're' 'rq' 'ru' 'ry' 'sm' 'sn' 't4' 't5' 'tw' 'ty' 'u7' 'ud' 'uh' 'um' 'us' 'ux' 'v0' 'v7' 'vn' 'w6' 'we' 'wi' 'wm' 'x1' 'xo' 'xz' 'yn' 'yo' 'yz' 2016-05-10 16:21:22.326724 +214 '4a' 'aj' 'an' 'aq' 'ax' 'bb' 'bh' 'ci' 'cn' 'd4' 'df' 'ea' 'ef' 'ek' 'en' 'eo' 'ex' 'fc' 'fj' 'fz' 'gb' 'gl' 'h7' 'hg' 'i1' 'i4' 'ia' 'im' 'j9' 'jg' 'ji' 'jj' 'kl' 'l2' 'lb' 'ld' 'lh' 'm1' 'mf' 'n1' 'nd' 'ob' 'od' 'ot' 'ph' 'pk' 'po' 'pv' 'q1' 'q2' 'q4' 'qb' 'qu' 'qz' 'ra' 'rh' 'ro' 'rp' 's1' 'si' 'sl' 'sn' 't7' 't8' 'ta' 'tc' 'ub' 'ue' 'ul' 'v9' 'w7' 'wd' 'xq' 'y5' 'yj' 'yl' 'yp' 'yw' 'z9' 'zb' 'zu' 'zy' 2016-05-10 17:21:22.326724 +215 'a7' 'a9' 'ab' 'af' 'ah' 'bc' 'bg' 'bi' 'c2' 'cj' 'cl' 'dj' 'dn' 'do' 'dv' 'dw' 'e5' 'ec' 'es' 'f4' 'fa' 'fk' 'fv' 'h9' 'hi' 'hu' 'i5' 'ic' 'ig' 'im' 'ir' 'ji' 'jt' 'k2' 'kc' 'kd' 'ki' 'km' 'kz' 'l3' 'lh' 'li' 'lv' 'ml' 'ne' 'ni' 'nt' 'nx' 'o9' 'pm' 'pn' 'pr' 'px' 'q0' 'qa' 'qe' 'qj' 'r6' 'ra' 'rj' 'rn' 'rq' 'ru' 'ry' 's0' 'sq' 'tb' 'to' 'tw' 'ua' 'ub' 'uf' 'ui' 'un' 'uu' 'vi' 'vo' 'vv' 'w5' 'w7' 'w8' 'wo' 'wv' 'wy' 'xy' 'y2' 'ya' 'yh' 'yj' 'yo' 'za' 'zh' 2016-05-10 18:21:22.326724 +216 '16' 'ae' 'al' 'ef' 'eg' 'ew' 'gi' 'ha' 'id' 'ng' 'o3' 'on' 'p9' 'rz' 'uf' 'vg' 'wo' 'wr' 'zh' 2016-05-10 19:21:22.326724 +217 '1v' '20' 'au' 'bo' 'br' 'c9' 'cv' 'dp' 'ep' 'fo' 'gj' 'hb' 'hx' 'iv' 'iw' 'jc' 'jm' 'jw' 'k8' 'kg' 'km' 'kn' 'nd' 'o8' 'oh' 'ok' 'op' 'oy' 'px' 'pz' 'qh' 'qo' 'qp' 'qs' 'qw' 're' 'rr' 'ry' 's3' 'sj' 't7' 't9' 'tt' 'tw' 'u9' 'ue' 'us' 'uu' 'v2' 'w3' 'wa' 'wf' 'wg' 'wt' 'wv' 'xc' 'yi' 'yu' 'yv' 'z4' 'zj' 2016-05-10 20:21:22.326724 +218 'e2' 'ef' 'ev' 'fe' 'ij' 'j8' 'jm' 'kw' 'nb' 'ny' 'o6' 'o7' 'ou' 'pb' 'qd' 'qv' 'rh' 'rp' 's7' 'ti' 'ub' 'uk' 'wh' 'wi' 'wj' 'xj' 'xo' 'yx' 2016-05-10 21:21:22.326724 +219 'ao' 'as' 'bz' 'dc' 'dl' 'dn' 'du' 'dy' 'e3' 'ed' 'ee' 'ej' 'em' 'eo' 'ep' 'eq' 'er' 'fc' 'ft' 'g4' 'gc' 'gm' 'gq' 'gr' 'gy' 'hq' 'jg' 'k6' 'k9' 'ks' 'kz' 'l7' 'lo' 'lu' 'lw' 'ly' 'nb' 'oc' 'oo' 'q9' 'qc' 'qd' 'qk' 'qs' 'qv' 'ro' 'rx' 'ry' 'sk' 'sv' 'sy' 'ti' 'tk' 'ui' 'un' 'uq' 'vl' 'w4' 'wl' 'wm' 'yj' 'yo' 'yu' 'yw' 'zg' 2016-05-10 22:21:22.326724 +220 '7w' 'ch' 'd7' 'eo' 'gw' 'i4' 'lq' 'o6' 'qt' 'y0' 2016-05-10 23:21:22.326724 +221 '0h' '42' 'ak' 'al' 'bf' 'bz' 'co' 'd3' 'dc' 'dt' 'dy' 'ed' 'ee' 'eg' 'ev' 'ew' 'fg' 'fr' 'fz' 'gi' 'gy' 'ha' 'hb' 'hd' 'hl' 'hn' 'hs' 'ht' 'ij' 'io' 'iv' 'jh' 'jl' 'jr' 'kc' 'kj' 'kt' 'ku' 'ky' 'ln' 'ml' 'mu' 'ng' 'nm' 'o0' 'o1' 'of' 'oh' 'or' 'p8' 'q8' 'qb' 'qp' 'qr' 'qt' 'qu' 'qv' 'r8' 'rj' 'rl' 'rp' 'rw' 'sk' 'ss' 'sw' 't9' 'th' 'tl' 'u5' 'uj' 'us' 'vg' 'vt' 'vz' 'w4' 'wb' 'we' 'wl' 'ww' 'wx' 'wy' 'wz' 'xj' 'yb' 'yg' 'yn' 'yp' 'ys' 'yw' 'yz' 'zk' 'zp' 'zu' 2016-05-11 00:21:22.326724 +222 '2i' 'am' 'ar' 'bl' 'by' 'ci' 'da' 'db' 'dc' 'dg' 'ea' 'ed' 'ei' 'ek' 'em' 'en' 'er' 'f2' 'f7' 'fi' 'fj' 'fu' 'fy' 'g0' 'g8' 'gn' 'gr' 'hl' 'hr' 'i2' 'it' 'ix' 'jg' 'jr' 'ju' 'jx' 'lh' 'lj' 'm8' 'n7' 'o1' 'o5' 'ob' 'og' 'ok' 'oz' 'pg' 'pl' 'pz' 'q4' 'q9' 'qf' 'qh' 'qn' 'qt' 'qu' 'qw' 're' 'ri' 'rn' 'ro' 'rp' 'rz' 's7' 'st' 'sz' 't3' 'tk' 'tw' 'u1' 'u5' 'uh' 'uy' 've' 'vh' 'w1' 'x2' 'xi' 'y4' 'ya' 'yi' 'yk' 'yn' 'zb' 'zo' 2016-05-11 01:21:22.326724 +223 'ak' 'bg' 'bk' 'du' 'eb' 'ef' 'el' 'ft' 'gb' 'gc' 'hc' 'ho' 'i6' 'iv' 'ji' 'ke' 'mo' 'oc' 'oh' 'pe' 'pj' 'q6' 'qe' 'qg' 'qs' 'qv' 'r1' 'rr' 'rx' 'tc' 'te' 'tf' 'tk' 'tq' 'ua' 'uq' 'ur' 'wq' 'wx' 'wz' 'xi' 'xq' 'yk' 'yo' 2016-05-11 02:21:22.326724 +224 'ag' 'bm' 'co' 'cr' 'd7' 'dk' 'do' 'e6' 'e9' 'eb' 'ed' 'ef' 'eh' 'ep' 'eq' 'ew' 'f4' 'fd' 'fr' 'gc' 'gg' 'gq' 'gt' 'gv' 'h0' 'h4' 'hi' 'hm' 'i4' 'ib' 'im' 'is' 'j0' 'jc' 'jd' 'jo' 'ka' 'kk' 'kx' 'm3' 'nj' 'nr' 'nu' 'o2' 'ob' 'oe' 'oi' 'om' 'ow' 'oz' 'p3' 'pd' 'pf' 'ph' 'qi' 'qj' 'qu' 'qw' 'qy' 'r2' 'ra' 'rg' 'rj' 'rm' 'rn' 'ro' 'rp' 'rr' 'ry' 'rz' 's3' 'sa' 'so' 't6' 't7' 'ta' 'tc' 'u3' 'ub' 'vj' 'vm' 'vn' 'w7' 'wb' 'wf' 'wk' 'wn' 'x2' 'yi' 'zf' 'zh' 2016-05-11 03:21:22.326724 +225 '2l' 'a4' 'ae' 'ag' 'cb' 'cs' 'cu' 'cy' 'dd' 'dj' 'ed' 'ee' 'ef' 'es' 'ex' 'ey' 'ft' 'fy' 'gs' 'h4' 'ho' 'ht' 'i2' 'i7' 'ia' 'iq' 'iz' 'jz' 'ku' 'kv' 'lq' 'mv' 'of' 'oi' 'op' 'pb' 'pd' 'pl' 'pn' 'pq' 'q0' 'q3' 'qe' 'qi' 'qp' 'qs' 'qy' 'qz' 'r3' 'ra' 'rh' 'rk' 'ry' 'sj' 'st' 'ta' 'tc' 'te' 'tf' 'th' 'ti' 'to' 'tt' 'u2' 'u5' 'vf' 'vx' 'w4' 'w7' 'wm' 'wt' 'ww' 'wx' 'x2' 'xi' 'y1' 'ye' 'yq' 'yv' 'za' 'zo' 'zy' 2016-05-11 04:21:22.326724 +226 '1v' '44' 'aj' 'al' 'am' 'ba' 'bi' 'br' 'bt' 'c0' 'd6' 'dg' 'di' 'e1' 'e4' 'ei' 'ek' 'er' 'et' 'eu' 'ev' 'ex' 'fa' 'fc' 'fe' 'fh' 'fr' 'ga' 'gh' 'gu' 'hp' 'hq' 'ib' 'if' 'io' 'ix' 'jo' 'jv' 'kd' 'l1' 'me' 'mh' 'mn' 'mo' 'mt' 'n7' 'nf' 'nk' 'oe' 'oy' 'pa' 'pg' 'pj' 'pm' 'pn' 'pq' 'py' 'q1' 'qd' 'qe' 'qi' 'qj' 'qr' 'qs' 'qz' 'rb' 'rs' 'su' 'sw' 't5' 'ti' 'tx' 'uc' 'ug' 'ui' 'uq' 'uz' 'v6' 'vp' 'we' 'wf' 'wj' 'wm' 'wx' 'xe' 'y7' 'yt' 'yy' 2016-05-11 05:21:22.326724 +227 'a6' 'ac' 'ao' 'au' 'cf' 'co' 'cr' 'd9' 'da' 'ds' 'du' 'e4' 'ea' 'ew' 'f1' 'fx' 'hr' 'hu' 'ic' 'id' 'ii' 'in' 'iq' 'iy' 'j3' 'jb' 'ji' 'jl' 'kf' 'mi' 'mp' 'o3' 'oe' 'oj' 'on' 'p5' 'ph' 'pk' 'q1' 'q9' 'qa' 'qs' 'qu' 'qv' 'r9' 'rc' 'rj' 'rv' 's1' 'se' 'sl' 'su' 't2' 't5' 'tk' 'u8' 'uv' 'v9' 'vd' 'vi' 'w0' 'w3' 'we' 'wh' 'wm' 'wp' 'xm' 'y0' 'y1' 'y8' 'yf' 'yo' 'yw' 'zv' 2016-05-11 06:21:22.326724 +228 'aw' 'dz' 'el' 'hb' 'iu' 'lc' 'm9' 'r5' 'rt' 'uz' 'x2' 'zp' 2016-05-11 07:21:22.326724 +229 'ap' 'i2' 'k0' 'n4' 'ou' 'px' 'qe' 'qo' 'qr' 't1' 'yz' 2016-05-11 08:21:22.326724 +230 '3p' '5x' 'ag' 'au' 'aw' 'dl' 'eg' 'f2' 'f8' 'fb' 'fn' 'g9' 'ge' 'gt' 'hd' 'i7' 'ia' 'ij' 'is' 'jc' 'ku' 'kx' 'l7' 'lb' 'lq' 'mc' 'o3' 'oj' 'q1' 'qa' 'qg' 'qt' 'r9' 'rg' 'tw' 'tz' 'u6' 'us' 'w2' 'wr' 'wt' 'wu' 'x3' 'xx' 2016-05-11 09:21:22.326724 +231 '4k' 'af' 'ah' 'b7' 'bj' 'by' 'ci' 'df' 'ds' 'eh' 'em' 'eq' 'ey' 'ez' 'fu' 'fv' 'g1' 'ga' 'go' 'gs' 'gy' 'gz' 'ho' 'i0' 'ie' 'ir' 'iv' 'k5' 'kj' 'ks' 'kw' 'l4' 'li' 'm0' 'mo' 'o8' 'ol' 'oq' 'os' 'ox' 'pk' 'pl' 'pq' 'q3' 'q6' 'qa' 'qg' 'qj' 'qn' 'qq' 'qr' 'qu' 'r5' 'r9' 'rb' 'ri' 'rl' 'rs' 'ry' 's7' 's9' 'sa' 'sd' 'sx' 'ta' 'tc' 'td' 'tf' 'tg' 'th' 'ti' 'to' 'tq' 'tv' 'uo' 'ut' 've' 'vt' 'w8' 'wg' 'wm' 'ws' 'wy' 'xy' 'y8' 'ya' 'yc' 'yf' 'ys' 'yu' 'zy' 2016-05-11 10:21:22.326724 +232 '1h' '1j' '1o' '2d' 'ac' 'ak' 'b6' 'cx' 'd3' 'ds' 'dv' 'e7' 'es' 'ev' 'ez' 'fs' 'fx' 'fy' 'gc' 'gh' 'gs' 'gt' 'hk' 'ho' 'i8' 'id' 'ig' 'ii' 'iq' 'j5' 'jd' 'je' 'jg' 'jk' 'jl' 'jo' 'jr' 'kf' 'kl' 'kq' 'lf' 'lh' 'lz' 'mb' 'mu' 'ni' 'nz' 'o4' 'ol' 'p7' 'pl' 'pm' 'pv' 'q5' 'qa' 'qe' 'qh' 'qi' 'ql' 'qq' 'qw' 'ra' 'rb' 'rn' 's5' 'sf' 'sh' 'so' 'sp' 'sq' 'sw' 'tj' 'tu' 'ty' 'u3' 'ue' 'uo' 'ux' 'v8' 'vo' 'vr' 'vz' 'w4' 'w8' 'wa' 'wk' 'wl' 'wo' 'wr' 'wu' 'ww' 'xs' 'xx' 'yb' 'yf' 'yn' 'yt' 'yw' 'zf' 'zg' 'zs' 2016-05-11 11:21:22.326724 +233 '1m' '37' 'a8' 'ab' 'at' 'ce' 'cx' 'd6' 'dk' 'e9' 'eh' 'ei' 'el' 'eo' 'eu' 'ew' 'ex' 'f4' 'fh' 'fr' 'fs' 'fy' 'g6' 'gf' 'gl' 'gm' 'gp' 'h8' 'ha' 'hf' 'hi' 'hl' 'hr' 'ie' 'ir' 'j6' 'jd' 'jr' 'jv' 'kz' 'l1' 'lg' 'ln' 'lp' 'ls' 'm0' 'ma' 'mi' 'mo' 'mu' 'na' 'np' 'o8' 'oc' 'oe' 'oh' 'ot' 'oy' 'ps' 'pw' 'q5' 'qa' 'qe' 'qg' 'qh' 'qi' 'qq' 'qz' 'rd' 'rg' 'ru' 'rv' 's9' 'sr' 'sv' 'sy' 'sz' 'ts' 'ty' 'uc' 'uh' 'uj' 'uk' 'up' 'v9' 'vc' 'vg' 'vt' 'vy' 'wl' 'wm' 'wv' 'wy' 'wz' 'xg' 'yg' 'yv' 'yw' 'yy' 'z0' 2016-05-11 12:21:22.326724 +234 'av' 'bh' 'cd' 'cw' 'dv' 'em' 'gn' 'iw' 'ja' 'ki' 'lc' 'lx' 'my' 'oi' 'ox' 'q0' 'qb' 'qi' 'qn' 'uf' 'ux' 'we' 'wn' 'xd' 'xq' 'y4' 'y9' 'zf' 'zs' 2016-05-11 13:21:22.326724 +235 '11' '3o' '7a' 'au' 'bc' 'bx' 'cg' 'dz' 'e5' 'ea' 'eb' 'ee' 'eg' 'eo' 'er' 'eu' 'g2' 'gq' 'gx' 'i4' 'in' 'ip' 'iv' 'k4' 'kd' 'kw' 'lf' 'ls' 'oq' 'ot' 'oz' 'p7' 'ph' 'pk' 'ps' 'qa' 'qd' 'qf' 'qi' 'qj' 'qr' 'qt' 'r4' 'rb' 'rd' 'rl' 'th' 'tl' 'tx' 'uc' 'uj' 'v6' 'vj' 'vz' 'we' 'wj' 'wl' 'wm' 'wp' 'wy' 'wz' 'xj' 'xk' 'xl' 'ya' 'yj' 'yl' 'yo' 'yw' 'zs' 'zv' 'zw' 2016-05-11 14:21:22.326724 +236 '16' '64' 'aj' 'aw' 'bf' 'c0' 'ca' 'd2' 'd6' 'ec' 'ed' 'eq' 'ew' 'go' 'h2' 'in' 'is' 'j0' 'jd' 'ji' 'kv' 'l5' 'lp' 'lz' 'og' 'pb' 'pw' 'q9' 'qb' 'qe' 'qf' 'qg' 'qn' 'qp' 'qt' 'qy' 'r7' 'rh' 'rw' 'sz' 't8' 'tb' 'te' 'tx' 'ui' 'un' 'uq' 'wa' 'wp' 'wu' 'wy' 'yn' 'ys' 2016-05-11 15:21:22.326724 +237 '41' '7r' 'a2' 'ad' 'aj' 'ak' 'ao' 'as' 'b1' 'b2' 'cn' 'db' 'eb' 'ec' 'ee' 'em' 'ev' 'ex' 'f8' 'fa' 'fo' 'fs' 'fu' 'gh' 'gk' 'gr' 'gx' 'iq' 'iv' 'j9' 'jb' 'jc' 'jr' 'kf' 'lg' 'm1' 'mq' 'mt' 'ne' 'nv' 'o5' 'od' 'os' 'ox' 'pc' 'pj' 'pq' 'q1' 'q6' 'q7' 'q8' 'qk' 'qm' 'qn' 'qp' 'qs' 'rj' 'rk' 's5' 'sb' 'sk' 'sn' 'sv' 't4' 'tb' 'th' 'ti' 'uc' 'uj' 'ut' 'vh' 'vp' 'wg' 'wi' 'wk' 'wp' 'ws' 'wv' 'wx' 'wy' 'x6' 'xa' 'xg' 'xr' 'xy' 'y6' 'yj' 'yo' 'yv' 'yw' 'zt' 'zw' 2016-05-11 16:21:22.326724 +238 '3b' '4p' 'ae' 'aj' 'ap' 'b3' 'bb' 'c0' 'cc' 'cf' 'cn' 'dz' 'e1' 'e9' 'eo' 'ew' 'ey' 'fk' 'fo' 'gd' 'gp' 'hb' 'i8' 'ia' 'ib' 'ie' 'ij' 'iq' 'ir' 'ix' 'jo' 'kp' 'lc' 'ld' 'lp' 'ml' 'on' 'p8' 'pd' 'pf' 'pi' 'pn' 'pr' 'pt' 'pw' 'qe' 'qi' 'qj' 'qp' 'qr' 'qw' 'qz' 'r0' 'rj' 'rn' 'ro' 'rq' 'rr' 's3' 'sg' 'sz' 'tn' 'tw' 'ty' 'ui' 'uj' 'vj' 'vr' 'w5' 'w9' 'wa' 'wp' 'xt' 'ya' 'ym' 'z0' 'zr' 2016-05-11 17:21:22.326724 +239 '1f' '3z' '43' 'am' 'b9' 'bg' 'cc' 'ct' 'cx' 'd0' 'de' 'dm' 'dr' 'e0' 'e8' 'ef' 'en' 'ev' 'fd' 'ff' 'fx' 'ga' 'gm' 'gp' 'gr' 'gs' 'gy' 'h1' 'hi' 'hl' 'hs' 'i4' 'i5' 'ic' 'jb' 'jj' 'kj' 'lh' 'ng' 'ni' 'nn' 'ns' 'nx' 'o1' 'oa' 'oe' 'og' 'p4' 'pd' 'ph' 'pj' 'pl' 'pw' 'q9' 'qa' 'qd' 'qh' 'ql' 'qr' 'qs' 'qw' 'qx' 'ro' 'rs' 'rw' 'ry' 'rz' 'sb' 'sj' 'sm' 'so' 't0' 't6' 'tc' 'ti' 'tk' 'tn' 'uk' 'um' 'uw' 'uy' 'v9' 'vd' 'vg' 'w3' 'wf' 'wg' 'wi' 'wk' 'wm' 'wx' 'xe' 'xm' 'xn' 'y5' 'ye' 'yk' 'yq' 'z3' 'zj' 'zk' 'zt' 'zz' 2016-05-11 18:21:22.326724 +240 '1s' '95' 'ac' 'aq' 'cb' 'ch' 'cq' 'ct' 'cz' 'dg' 'dj' 'eb' 'ed' 'ee' 'eg' 'en' 'eo' 'ep' 'es' 'fc' 'fl' 'fp' 'gu' 'gw' 'hx' 'i0' 'i2' 'ig' 'ih' 'il' 'in' 'iu' 'iy' 'iz' 'jc' 'k7' 'ka' 'kl' 'km' 'kn' 'lz' 'mx' 'n6' 'nc' 'og' 'pt' 'pw' 'q0' 'qa' 'qe' 'qf' 'ql' 'qm' 'qp' 'qt' 'r7' 'rp' 'ru' 'rw' 'sb' 'sd' 't0' 't7' 'ta' 'tl' 'tn' 'tv' 'ty' 'ui' 'vx' 'wa' 'wc' 'wm' 'wp' 'wy' 'xi' 'xj' 'xk' 'yz' 2016-05-11 19:21:22.326724 +241 '6n' 'da' 'dl' 'dx' 'el' 'ew' 'ff' 'fl' 'gc' 'iu' 'jh' 'jt' 'kt' 'lz' 'pd' 'q6' 'qj' 'ql' 'r3' 'ra' 'rn' 'ry' 'sg' 'tf' 'tp' 'tt' 'ub' 'ye' 'yf' 'yl' 'yp' 2016-05-11 20:21:22.326724 +242 '14' '1t' 'aa' 'ab' 'ag' 'aq' 'ax' 'bc' 'cb' 'cm' 'd1' 'db' 'ea' 'ek' 'en' 'ey' 'f0' 'g6' 'g7' 'gx' 'ha' 'ho' 'i1' 'i6' 'i8' 'it' 'iw' 'jl' 'jp' 'ko' 'kt' 'la' 'le' 'mb' 'mk' 'mt' 'n6' 'na' 'ob' 'oc' 'od' 'on' 'op' 'or' 'ot' 'q6' 'qd' 'qf' 'qk' 'qp' 'qq' 'qr' 'qw' 'qy' 'r5' 'rf' 'rk' 'se' 'sp' 'sx' 't0' 'tj' 'tk' 'tq' 'tv' 'tx' 'u0' 'ub' 'ue' 'uf' 'um' 'us' 'uu' 'v9' 'vi' 'wb' 'wo' 'wu' 'wy' 'x9' 'xs' 'xu' 'y0' 'yo' 'zi' 2016-05-11 21:21:22.326724 +243 '1i' 'ap' 'bh' 'ce' 'cp' 'di' 'dm' 'dt' 'es' 'eu' 'f2' 'f3' 'fw' 'fx' 'g5' 'gf' 'gy' 'h2' 'hl' 'i4' 'ii' 'iy' 'kp' 'lh' 'lr' 'me' 'of' 'og' 'ok' 'on' 'oo' 'pb' 'pd' 'pt' 'qf' 'qk' 'qz' 'r4' 'r7' 're' 'ri' 'rj' 'ro' 'rx' 's2' 'tk' 'tl' 'tq' 'tx' 'ub' 'ui' 'us' 'vm' 'w1' 'w7' 'wi' 'wj' 'wq' 'wx' 'xp' 'yg' 'yr' 'yx' 2016-05-11 22:21:22.326724 +244 '3y' 'ak' 'am' 'b8' 'cp' 'd7' 'df' 'di' 'eg' 'em' 'en' 'eo' 'es' 'f6' 'fc' 'gg' 'gt' 'gv' 'hd' 'i3' 'jk' 'k6' 'la' 'lc' 'ol' 'ov' 'ow' 'p5' 'pl' 'qc' 'qf' 'rl' 'rn' 'rt' 'rx' 't1' 'tf' 'tj' 'tz' 'ug' 'uo' 'we' 'wg' 'wk' 'wp' 'wv' 'y7' 2016-05-11 23:21:22.326724 +245 'ae' 'ai' 'di' 'ea' 'eb' 'ec' 'gw' 'h8' 'hb' 'iw' 'iz' 'jf' 'jg' 'ks' 'le' 'p8' 'pl' 'pp' 'qd' 'qg' 'qm' 'qn' 'qu' 'qy' 'r4' 'rf' 'rq' 's1' 'sc' 'sq' 'v8' 'vh' 'vk' 'wd' 'x7' 'y7' 'ym' 'yp' 'yr' 'yw' 2016-05-12 00:21:22.326724 +246 'a7' 'ag' 'aq' 'aw' 'az' 'bf' 'bg' 'ce' 'cp' 'cx' 'dc' 'ek' 'en' 'es' 'fj' 'gb' 'hr' 'hs' 'ie' 'ik' 'is' 'jl' 'jr' 'kc' 'ku' 'l5' 'lj' 'lq' 'lr' 'nu' 'og' 'oj' 'os' 'oy' 'p1' 'p6' 'pc' 'pk' 'pr' 'pu' 'px' 'py' 'q1' 'q4' 'qb' 'qc' 'ql' 'qo' 'qv' 'qz' 'r9' 'ra' 'rg' 'rp' 'sq' 'ts' 'tx' 'u8' 'ue' 'ui' 'uj' 'uk' 'up' 'v4' 'vg' 'vn' 'w2' 'w9' 'wa' 'wk' 'wt' 'y0' 'y9' 'yc' 'ym' 'ys' 'zh' 2016-05-12 01:21:22.326724 +247 'af' 'aq' 'at' 'bh' 'cc' 'd3' 'dd' 'dg' 'dq' 'e1' 'e6' 'ec' 'ee' 'ex' 'ey' 'go' 'gt' 'h3' 'h8' 'hd' 'he' 'hm' 'i7' 'ii' 'ik' 'io' 'ip' 'it' 'iz' 'jg' 'k0' 'k5' 'k7' 'kb' 'ks' 'kw' 'ln' 'lp' 'lx' 'm3' 'nf' 'nh' 'of' 'og' 'oj' 'ok' 'ow' 'p7' 'pd' 'pj' 'pm' 'q1' 'q7' 'qg' 'qh' 'qk' 'qq' 'qr' 'qt' 'qw' 'rr' 'rs' 'ry' 's4' 's7' 'sd' 'si' 'sl' 'sp' 'sw' 'sz' 't6' 't8' 'tc' 'u2' 'um' 'uo' 'ux' 'vb' 'w2' 'wb' 'wi' 'wj' 'ws' 'wt' 'wx' 'x3' 'xf' 'xt' 'yb' 'ym' 'zb' 'ze' 'zm' 2016-05-12 02:21:22.326724 +248 '3g' 'af' 'aj' 'ak' 'ap' 'as' 'au' 'cp' 'cx' 'dh' 'dn' 'dr' 'ds' 'ej' 'en' 'eo' 'et' 'eu' 'ex' 'fq' 'fs' 'ft' 'gw' 'h5' 'i5' 'ix' 'j6' 'jc' 'jg' 'k1' 'kb' 'kh' 'kn' 'kv' 'lj' 'lu' 'mc' 'mi' 'na' 'nq' 'ns' 'o4' 'o7' 'of' 'q5' 'q7' 'qe' 'qh' 'qi' 'qk' 'qr' 'qu' 'qw' 'r9' 'rb' 'ri' 'rx' 's3' 'sf' 'sm' 'so' 'sq' 'ss' 'su' 'sy' 't7' 'ta' 'ti' 'tn' 'tr' 'tx' 'u1' 'u3' 'ue' 'uh' 'up' 'uw' 'uy' 'va' 'w6' 'wg' 'wm' 'wp' 'ws' 'wy' 'x1' 'y4' 'y6' 'yk' 'ys' 'yy' 'zf' 'zv' 2016-05-12 03:21:22.326724 +249 'as' 'dj' 'dq' 'e3' 'ej' 'es' 'fn' 'fp' 'fu' 'ga' 'hb' 'hy' 'jz' 'l6' 'mp' 'ps' 'q4' 'qd' 'qm' 'qw' 'qy' 'qz' 'sa' 'sv' 'td' 'tn' 'tu' 'tx' 'ud' 'ue' 'uq' 'uv' 'v0' 'wi' 'wk' 'wm' 'wx' 'xh' 'y3' 'yd' 'yi' 'yn' 'yx' 'yy' 'zc' 2016-05-12 04:21:22.326724 +250 'ar' 'ei' 'kq' 'ma' 'qa' 'qh' 'qq' 'qz' 'rx' 'st' 2016-05-12 05:21:22.326724 +251 'a1' 'aa' 'c1' 'd7' 'dc' 'df' 'dh' 'e4' 'e9' 'ec' 'es' 'et' 'eu' 'ev' 'f3' 'g2' 'gu' 'he' 'il' 'j1' 'j6' 'jt' 'jv' 'ke' 'm8' 'm9' 'mh' 'ng' 'o8' 'on' 'pe' 'pf' 'pi' 'pn' 'qf' 'qn' 'qu' 'qv' 'qw' 'rh' 'rl' 'rr' 'ru' 'sg' 'sk' 'uk' 'ul' 'ux' 'vd' 'vj' 'wk' 'yd' 'yn' 'yx' 'z0' 2016-05-12 06:21:22.326724 +252 'af' 'as' 'at' 'aw' 'ax' 'ay' 'az' 'b2' 'bf' 'bl' 'ck' 'cs' 'df' 'dn' 'dv' 'ei' 'ek' 'ev' 'fg' 'fm' 'h0' 'hb' 'hk' 'i0' 'ib' 'if' 'ir' 'lk' 'm5' 'mr' 'np' 'om' 'p5' 'p7' 'pl' 'pq' 'q3' 'qh' 'qi' 'qj' 'qo' 'qp' 'qs' 'qt' 'qu' 'qv' 'qy' 'ra' 'sh' 'sm' 'so' 't2' 'ta' 'ti' 'tv' 'tz' 'u1' 'ug' 'um' 'v5' 'vd' 'w1' 'wn' 'wp' 'wr' 'ws' 'ww' 'xk' 'xx' 'y7' 'yf' 'yh' 'yk' 'yt' 'zr' 'zv' 2016-05-12 07:21:22.326724 +253 '3a' '5k' 'a2' 'ah' 'au' 'ba' 'bb' 'bh' 'bu' 'cb' 'cj' 'cv' 'cx' 'df' 'dy' 'e4' 'e6' 'ed' 'ep' 'et' 'ev' 'ez' 'f1' 'fb' 'fl' 'fx' 'fz' 'gg' 'h5' 'hj' 'io' 'iz' 'ja' 'k7' 'kf' 'lu' 'lv' 'md' 'ne' 'nh' 'oh' 'on' 'ow' 'p0' 'p8' 'pc' 'px' 'q3' 'qa' 'qf' 'qg' 'qj' 'qr' 'qs' 'qt' 'qv' 'qx' 'qz' 'r3' 'rm' 'rq' 'rt' 'rv' 'sa' 'sf' 'so' 't1' 't7' 'tb' 'tn' 'tq' 'tr' 'ts' 'tu' 'tz' 'u7' 'uf' 'uk' 'um' 'ut' 'va' 'vj' 'vm' 'vx' 'vz' 'wa' 'we' 'wf' 'xe' 'xg' 'ya' 'yb' 'yu' 'zg' 'zo' 'zt' 'zz' 2016-05-12 08:21:22.326724 +254 'aa' 'av' 'ay' 'bn' 'by' 'cm' 'da' 'dd' 'dj' 'dk' 'dr' 'dz' 'eu' 'ev' 'ez' 'f1' 'f8' 'gr' 'he' 'hp' 'hs' 'hw' 'iz' 'k1' 'kc' 'km' 'ko' 'kt' 'ln' 'ls' 'mx' 'n3' 'nl' 'oe' 'oj' 'om' 'os' 'oy' 'pg' 'pr' 'pt' 'pv' 'qa' 'qc' 'qe' 'qi' 'qj' 'qm' 'qn' 'qq' 'qu' 'qx' 'qz' 'r0' 'r1' 'rf' 'rs' 'rx' 'ry' 's4' 'sg' 't2' 'tq' 'tt' 'tv' 'ty' 'ua' 'uj' 'uq' 'vi' 'vk' 'vx' 'wc' 'we' 'wi' 'wl' 'wo' 'wp' 'wv' 'ww' 'xi' 'xr' 'xs' 'y9' 'yg' 'ym' 'yr' 'yt' 'yz' 'zo' 'zw' 2016-05-12 09:21:22.326724 +255 '8p' 'ao' 'aq' 'c1' 'dh' 'dt' 'e9' 'eo' 'ev' 'fp' 'fu' 'gc' 'gz' 'hk' 'i5' 'id' 'ip' 'iy' 'jg' 'jr' 'k2' 'lo' 'mb' 'mf' 'oi' 'ou' 'qp' 'qr' 'qw' 'qx' 'ra' 'rj' 'tl' 'ui' 'uu' 'uy' 'vb' 'wh' 'yt' 2016-05-12 10:21:22.326724 +256 'aa' 'am' 'av' 'dt' 'du' 'eh' 'em' 'ev' 'ex' 'gu' 'h0' 'i4' 'i8' 'ku' 'la' 'lo' 'lw' 'n0' 'n9' 'on' 'pc' 'pl' 'pn' 'pr' 'q8' 'qc' 'ql' 'qn' 'qz' 'ra' 'rd' 'ry' 'sp' 'tn' 'ts' 'u4' 'vg' 'vs' 'w7' 'wj' 'wm' 'xl' 'yg' 'yh' 'z0' 'zs' 2016-05-12 11:21:22.326724 +257 'ab' 'ah' 'ax' 'bd' 'ca' 'dh' 'e3' 'ea' 'ed' 'ef' 'en' 'eo' 'er' 'ev' 'ex' 'ez' 'f5' 'fh' 'fo' 'fv' 'ga' 'gb' 'gk' 'id' 'ik' 'in' 'ir' 'jp' 'js' 'jv' 'kf' 'kr' 'lx' 'mu' 'mz' 'n5' 'od' 'on' 'pr' 'pt' 'pv' 'qb' 'qe' 'qj' 'ql' 'qq' 'qu' 'qw' 'rd' 'rl' 'rz' 's4' 'sv' 'ta' 'tj' 'tk' 'u8' 'ui' 'uj' 'uk' 'um' 'ux' 'uz' 'v9' 'vh' 'vl' 'w7' 'wb' 'wi' 'wm' 'ws' 'xi' 'ye' 'yg' 'yr' 'yw' 'zj' 'zn' 'zs' 2016-05-12 12:21:22.326724 +258 'an' 'as' 'cm' 'dh' 'dk' 'do' 'ds' 'dv' 'e1' 'eh' 'ek' 'el' 'er' 'ew' 'ff' 'fo' 'fq' 'g2' 'gz' 'h0' 'hi' 'hk' 'hm' 'hs' 'i4' 'ij' 'iy' 'j5' 'jj' 'jk' 'jw' 'kq' 'kx' 'ky' 'lo' 'lp' 'lw' 'ly' 'm2' 'm6' 'md' 'mv' 'ng' 'of' 'om' 'oy' 'p7' 'pr' 'q7' 'qc' 'ql' 'qq' 'qs' 'qy' 'qz' 'r2' 'r9' 'ra' 'rn' 'ro' 'rq' 'rv' 'rx' 'ry' 'rz' 's0' 'si' 'sy' 'sz' 't6' 't7' 'to' 'tt' 'tv' 'tx' 'ub' 'ur' 'uv' 've' 'vj' 'w3' 'w8' 'w9' 'wa' 'wb' 'wc' 'we' 'wm' 'wt' 'wu' 'wx' 'wy' 'y9' 'yd' 'yg' 'ym' 'yp' 'yt' 'yu' 'yy' 2016-05-12 13:21:22.326724 +259 '1i' '3z' 'b6' 'bh' 'bn' 'bu' 'cm' 'do' 'dt' 'ef' 'eg' 'ek' 'eo' 'et' 'ex' 'f9' 'g3' 'gj' 'gl' 'h7' 'hp' 'i8' 'ih' 'im' 'in' 'jn' 'k1' 'k7' 'kd' 'lq' 'ms' 'mv' 'n7' 'nj' 'nt' 'o4' 'o9' 'ov' 'q2' 'qa' 'qh' 'qk' 'ql' 'qp' 'qu' 'qv' 'rj' 's8' 'sf' 'sh' 't7' 't8' 'um' 'un' 'uo' 'ut' 'uu' 'w1' 'wa' 'wf' 'wi' 'wx' 'xj' 'yc' 'yo' 'yt' 2016-05-12 14:21:22.326724 +260 '8d' 'a2' 'af' 'ap' 'as' 'cg' 'cr' 'd4' 'dg' 'du' 'dv' 'eg' 'ei' 'el' 'em' 'fg' 'ft' 'g1' 'gt' 'h1' 'hg' 'i5' 'i8' 'ih' 'im' 'ir' 'iw' 'jd' 'jf' 'js' 'jw' 'jz' 'k8' 'ko' 'ky' 'la' 'lr' 'mq' 'no' 'ox' 'p8' 'p9' 'pb' 'pv' 'pw' 'px' 'q0' 'q1' 'q5' 'q7' 'qd' 'qh' 'qj' 'ql' 'qn' 'qo' 'qq' 'qx' 'qy' 'rc' 'rd' 'rf' 'rh' 'rr' 'rs' 'sd' 'sw' 'sz' 't3' 'tf' 'tn' 'to' 'tv' 'tx' 'ty' 'ua' 'ub' 'ud' 'uk' 'ul' 'us' 'uu' 'ux' 'vl' 'vs' 'wf' 'wg' 'wl' 'wq' 'wt' 'wu' 'wx' 'x6' 'xi' 'yg' 'yh' 'z7' 'ze' 2016-05-12 15:21:22.326724 +261 'ak' 'ar' 'cm' 'cq' 'da' 'df' 'e3' 'ew' 'fe' 'fr' 'gm' 'id' 'io' 'iy' 'jg' 'jt' 'jx' 'kb' 'lf' 'nv' 'od' 'qy' 'r3' 'r4' 'ro' 'rq' 'rw' 'sl' 'ss' 'su' 'sw' 't7' 'th' 'ti' 'tp' 'u9' 'uq' 'v9' 'vq' 'vw' 'we' 'wg' 'wk' 'ws' 'xr' 'yf' 2016-05-12 16:21:22.326724 +262 '5h' 'a2' 'ad' 'ag' 'ar' 'cf' 'ch' 'dd' 'e6' 'ei' 'el' 'em' 'f3' 'fx' 'ga' 'gm' 'h8' 'hi' 'hl' 'hm' 'ix' 'jb' 'l5' 'm2' 'mh' 'mm' 'mu' 'n9' 'nf' 'nq' 'nw' 'o4' 'oe' 'pa' 'pl' 'q1' 'qb' 'qd' 'qi' 'qj' 'qk' 'qu' 'qy' 'r0' 'r3' 'r8' 'rg' 'rj' 'rt' 's1' 'sa' 'ss' 'sx' 'tb' 'tc' 'tj' 'uf' 'uh' 'um' 'uo' 'ut' 'vf' 'vo' 'w4' 'w7' 'wf' 'wk' 'wp' 'wq' 'wr' 'wt' 'wy' 'wz' 'xk' 'xt' 'ye' 'yv' 'zd' 'zp' 'zw' 2016-05-12 17:21:22.326724 +263 'ar' 'c3' 'c6' 'e0' 'e9' 'ed' 'ex' 'fh' 'fi' 'gu' 'ha' 'hr' 'id' 'jr' 'kk' 'kr' 'kz' 'm0' 'mp' 'ng' 'om' 'pa' 'qa' 'qf' 'rj' 'sp' 'to' 'tu' 'ty' 'ub' 'ud' 'ug' 'uk' 'vj' 'vp' 'wf' 'wl' 'wu' 'x6' 'xs' 'y0' 'ya' 2016-05-12 18:21:22.326724 +264 '5z' 'ai' 'al' 'ax' 'cd' 'dr' 'e1' 'ep' 'fi' 'gh' 'gk' 'ha' 'hc' 'ht' 'i5' 'ic' 'id' 'iq' 'iz' 'kk' 'kt' 'ld' 'mi' 'oh' 'ov' 'pn' 'q2' 'q7' 'qm' 'qy' 'ri' 'ru' 'rv' 'sq' 'u8' 'u9' 'un' 'wq' 'wt' 'yd' 'yk' 'ys' 2016-05-12 19:21:22.326724 +265 'aj' 'as' 'aw' 'bc' 'bj' 'bm' 'cd' 'd7' 'd9' 'di' 'ef' 'er' 'es' 'ew' 'fi' 'ft' 'g2' 'gq' 'gx' 'h9' 'he' 'hg' 'hu' 'i8' 'ie' 'ik' 'im' 'iq' 'ix' 'iy' 'jf' 'ji' 'ka' 'kl' 'kt' 'la' 'lf' 'm8' 'm9' 'nh' 'nw' 'o7' 'oi' 'pi' 'ps' 'q6' 'qa' 'qb' 'qf' 'qm' 'qo' 'qq' 'qs' 'r9' 'rb' 'rf' 'rk' 'rx' 'sa' 'sb' 'sp' 't1' 'tb' 'tl' 'tm' 'u6' 'uc' 'um' 'un' 'ur' 'w3' 'wj' 'wu' 'xm' 'xr' 'xy' 'ya' 'yd' 'ye' 'yf' 'yj' 'yo' 'ys' 'zc' 'zg' 'zl' 'zv' 'zz' 2016-05-12 20:21:22.326724 +266 '1a' '4p' 'bn' 'br' 'dk' 'ec' 'en' 'eu' 'gd' 'h0' 'ha' 'hm' 'if' 'il' 'io' 'ip' 'is' 'iz' 'jl' 'jq' 'k6' 'kd' 'kf' 'kn' 'kp' 'ld' 'lf' 'lt' 'n8' 'na' 'nl' 'o1' 'pd' 'pr' 'q8' 'qf' 'qr' 'qw' 're' 'rx' 'sa' 'sf' 'ta' 'tq' 'ux' 'v3' 'w6' 'wp' 'x2' 'y9' 'yi' 'yw' 'yx' 'z5' 'zl' 'zs' 2016-05-12 21:21:22.326724 +267 'c8' 'db' 'dh' 'fh' 'g7' 'gm' 'if' 'ih' 'jd' 'li' 'ms' 'mt' 'no' 'or' 'p7' 'pc' 'qb' 'qm' 'sh' 'tk' 'uf' 'uz' 'vb' 'vp' 'wh' 'wr' 'wv' 'xh' 'xm' 'xp' 'zj' 2016-05-12 22:21:22.326724 +268 '1p' '51' 'aj' 'av' 'bj' 'bn' 'c1' 'dx' 'ex' 'gz' 'he' 'ia' 'ic' 'ip' 'kn' 'mx' 'o6' 'or' 'ql' 'rc' 'wf' 'wi' 'wn' 'y8' 'yr' 2016-05-12 23:21:22.326724 +269 '8l' 'au' 'ay' 'bn' 'bs' 'ch' 'co' 'cu' 'dq' 'dv' 'eq' 'ev' 'ff' 'fp' 'fr' 'fz' 'gb' 'gu' 'hb' 'he' 'hj' 'id' 'ih' 'jb' 'kg' 'lm' 'ls' 'nm' 'oe' 'ot' 'p6' 'pb' 'ps' 'q0' 'q8' 'qg' 'qh' 'qj' 'qo' 'qp' 'qr' 'qt' 'qz' 'r1' 're' 'rh' 'rp' 'ry' 's1' 's4' 'sc' 'sd' 'sn' 'tf' 'ty' 'u4' 'us' 'vt' 'vx' 'wc' 'wd' 'ws' 'wv' 'xp' 'y2' 'y5' 'y6' 'yy' 'zh' 'zx' 2016-05-13 00:21:22.326724 +270 'au' 'bo' 'dz' 'ek' 'eq' 'et' 'fa' 'hw' 'id' 'im' 'kr' 'p4' 'qx' 'rb' 'rx' 'sf' 'tl' 'tx' 'uf' 'ui' 'uw' 'vr' 'wb' 'wn' 'xw' 'yd' 2016-05-13 01:21:22.326724 +271 '5q' '7c' 'a1' 'ac' 'ao' 'az' 'bw' 'cz' 'd8' 'dr' 'dy' 'ec' 'eg' 'ej' 'el' 'es' 'et' 'fa' 'fh' 'g2' 'gm' 'go' 'gp' 'gy' 'h6' 'hd' 'he' 'ho' 'ij' 'ik' 'it' 'jc' 'ji' 'k8' 'ku' 'l8' 'lf' 'mw' 'n8' 'nb' 'nd' 'o3' 'ok' 'ol' 'ot' 'p8' 'pi' 'pl' 'pn' 'ps' 'qd' 'qe' 'qk' 'ql' 'qt' 'qz' 'r0' 'r7' 'rd' 'rg' 'rh' 'rm' 'ro' 'rp' 'ru' 'rz' 'sj' 'sm' 'sr' 'su' 'sv' 'sx' 't1' 'tc' 'tl' 'tq' 'uj' 'um' 'uv' 'uw' 'wa' 'wb' 'wi' 'wo' 'wr' 'ws' 'wv' 'wz' 'xb' 'xg' 'ya' 'yb' 'yh' 'yi' 'ym' 'yn' 'yr' 'yv' 'z6' 'zf' 'zz' 2016-05-13 02:21:22.326724 +272 '1j' 'ag' 'c3' 'cl' 'e4' 'ef' 'eh' 'f5' 'fi' 'gy' 'hx' 'lw' 'oa' 'pu' 'qa' 'qi' 'qt' 'qu' 'rl' 'ro' 'rs' 'sg' 'uq' 'wq' 'ya' 'yh' 'ys' 2016-05-13 03:21:22.326724 +273 '6d' 'ao' 'bo' 'cq' 'e0' 'fi' 'fm' 'h6' 'i2' 'jl' 'kn' 'mj' 'nv' 'oq' 'p6' 'qd' 'qi' 'qn' 'r2' 'r8' 'rd' 'sd' 'sl' 'ta' 'tp' 'ub' 'uh' 'w3' 'wh' 'y1' 'yj' 'zk' 2016-05-13 04:21:22.326724 +274 '2a' '4s' 'c4' 'cd' 'dh' 'dv' 'eh' 'ek' 'ew' 'ex' 'ez' 'fm' 'hh' 'hj' 'ig' 'jf' 'ld' 'ly' 'mg' 'mx' 'o5' 'or' 'pa' 'pv' 'q1' 'qe' 'qj' 'qq' 'qr' 'qw' 'rb' 'rr' 's4' 's9' 'tm' 'uj' 'wc' 'wv' 'x0' 'xt' 'yc' 'ye' 'yh' 'yi' 'yk' 'yy' 2016-05-13 05:21:22.326724 +275 'aa' 'af' 'ee' 'hb' 'ih' 'j2' 'lv' 'mw' 'pp' 'q3' 'rd' 'tb' 'td' 'ua' 'ug' 'up' 'xh' 'yy' 2016-05-13 06:21:22.326724 +276 'ad' 'ar' 'az' 'b7' 'cf' 'cm' 'ct' 'cw' 'cy' 'dh' 'dn' 'ds' 'ef' 'en' 'eo' 'er' 'fh' 'fi' 'fq' 'fr' 'fx' 'gp' 'gq' 'gu' 'gx' 'h8' 'hf' 'hj' 'hk' 'ho' 'hw' 'hy' 'i3' 'i4' 'ik' 'iu' 'iy' 'jj' 'kn' 'l8' 'lb' 'lg' 'lo' 'm7' 'mx' 'nj' 'nt' 'o6' 'ob' 'oh' 'ok' 'ot' 'pr' 'pz' 'q7' 'q8' 'qa' 'qd' 'qg' 'qh' 'qi' 'qn' 'qz' 'r7' 're' 'ri' 'rk' 'ry' 's4' 'sa' 'sd' 'sm' 'sn' 'sp' 'sw' 'sy' 'tf' 'th' 'to' 'tr' 'tv' 'tz' 'u0' 'u1' 'u5' 'ue' 'uk' 'uq' 'vb' 'vp' 'vr' 'vu' 'wa' 'wb' 'wg' 'wi' 'wk' 'wm' 'wt' 'ya' 'yj' 'yl' 'z3' 2016-05-13 07:21:22.326724 +277 'ak' 'bg' 'bj' 'bn' 'co' 'e6' 'e7' 'ec' 'ek' 'el' 'ew' 'ez' 'fo' 'im' 'jm' 'js' 'lc' 'lh' 'ob' 'oi' 'os' 'ot' 'ou' 'p0' 'pu' 'q0' 'qh' 'ra' 'rc' 'rm' 'rr' 'sd' 'sq' 'tc' 'tl' 'wj' 'wy' 'x2' 'xw' 'y6' 'yp' 'yv' 'zj' 2016-05-13 08:21:22.326724 +278 'al' 'ar' 'cl' 'dl' 'dz' 'ej' 'fj' 'gu' 'i2' 'i6' 'ih' 'jx' 'k9' 'ln' 'ls' 'mf' 'mx' 'oi' 'om' 'pn' 'q4' 'qd' 'qe' 'qg' 'qk' 'qp' 'qr' 'qw' 'qx' 'rr' 'sd' 'sf' 'ub' 'uh' 'um' 'uu' 'vg' 'w6' 'wj' 'wp' 'ws' 'yn' 'zc' 'zm' 2016-05-13 09:21:22.326724 +279 'ab' 'ag' 'aw' 'b4' 'd4' 'ds' 'e1' 'ea' 'eb' 'ee' 'ef' 'eh' 'ek' 'el' 'en' 'eu' 'ex' 'fv' 'fx' 'gi' 'gp' 'gt' 'i3' 'ie' 'jd' 'jk' 'jn' 'kv' 'l7' 'n8' 'ns' 'nx' 'o2' 'o3' 'oh' 'os' 'ow' 'p2' 'pr' 'pu' 'q3' 'qd' 'qe' 'qk' 'qq' 'qz' 'r9' 'rb' 'ri' 'rm' 'rr' 'rw' 'ry' 'sc' 'sj' 'sl' 'sx' 'tk' 'tr' 'tw' 'u4' 'ub' 'ud' 'uf' 'uh' 'un' 'uv' 'v6' 'vc' 'vf' 'vi' 'wg' 'wr' 'ws' 'wv' 'x7' 'xf' 'xo' 'y0' 'yi' 'yo' 'yt' 'yy' 'za' 'zm' 'zu' 'zx' 2016-05-13 10:21:22.326724 +280 '1w' '5i' 'a4' 'a6' 'bz' 'de' 'dg' 'dt' 'ed' 'ei' 'ej' 'ey' 'fa' 'ff' 'gg' 'hi' 'hj' 'ho' 'i2' 'ii' 'ij' 'jq' 'jy' 'ka' 'kr' 'kw' 'lv' 'm7' 'nd' 'ot' 'ov' 'oy' 'oz' 'pe' 'pj' 'qb' 'qj' 'qq' 'qt' 'qv' 'r0' 'r3' 'r4' 'ri' 'rj' 'si' 'ta' 'tc' 'ts' 'tx' 'u3' 'uj' 'ur' 'v6' 'vh' 'w3' 'w6' 'wc' 'wg' 'wq' 'x5' 'xv' 'yb' 'yt' 'yx' 'zl' 2016-05-13 11:21:22.326724 +281 'a8' 'ad' 'ai' 'al' 'as' 'av' 'ay' 'az' 'bf' 'bg' 'c6' 'dc' 'dd' 'dz' 'ef' 'eh' 'eu' 'ft' 'g0' 'g4' 'gf' 'gm' 'hq' 'im' 'iw' 'ix' 'j5' 'jj' 'jn' 'jv' 'k1' 'ko' 'ma' 'n0' 'nw' 'o9' 'om' 'op' 'ox' 'p4' 'pa' 'pe' 'pm' 'ps' 'q2' 'qb' 'qc' 'qf' 'qi' 'ql' 'qu' 'qw' 'qy' 'qz' 'rl' 'rn' 'rs' 'ru' 'se' 'sf' 'sm' 't8' 'tc' 'tg' 'uq' 'uz' 'va' 'vj' 'wa' 'we' 'wg' 'wk' 'wq' 'wu' 'wy' 'xb' 'xo' 'y3' 'ye' 'ym' 'ys' 'yt' 'z8' 'zd' 'zo' 2016-05-13 12:21:22.326724 +282 '7k' 'et' 'ew' 'ez' 'g9' 'gk' 'he' 'hp' 'ip' 'jp' 'jq' 'jr' 'jv' 'kb' 'kk' 'ku' 'lx' 'o0' 'o3' 'oa' 'or' 'ou' 'q7' 'qa' 'qf' 'qg' 'qh' 'qo' 'ro' 'rx' 'tl' 'us' 'vk' 'vw' 'wc' 'wp' 'x0' 'xz' 'yg' 'z5' 2016-05-13 13:21:22.326724 +283 '1s' 'a0' 'ac' 'aj' 'am' 'ao' 'aw' 'bi' 'bm' 'by' 'ca' 'cu' 'dc' 'di' 'dp' 'e0' 'e9' 'eg' 'eq' 'fh' 'fi' 'gc' 'gi' 'gq' 'gx' 'h4' 'he' 'hk' 'i2' 'ix' 'jc' 'jl' 'jm' 'k1' 'kf' 'kg' 'kn' 'kq' 'la' 'nj' 'nw' 'o9' 'og' 'p0' 'p6' 'pj' 'pn' 'pp' 'q4' 'qf' 'ql' 'qm' 'qs' 'qt' 'qu' 'qz' 'r5' 'r6' 'ri' 'ro' 'rw' 'sv' 'sx' 't5' 'th' 'tl' 'tp' 'u0' 'ue' 'uf' 'ug' 'un' 'uq' 'vx' 'wd' 'we' 'wg' 'wj' 'wk' 'wp' 'wr' 'ws' 'wt' 'x2' 'xq' 'xt' 'xw' 'xx' 'ya' 'yk' 'yl' 'yr' 'yt' 'za' 'zh' 2016-05-13 14:21:22.326724 +284 '4y' '6h' 'a0' 'a6' 'bo' 'bq' 'co' 'cv' 'dv' 'ec' 'ee' 'eh' 'ei' 'en' 'er' 'ew' 'f1' 'fk' 'fq' 'fy' 'ga' 'gj' 'gp' 'gv' 'gx' 'hv' 'i0' 'i5' 'ij' 'ik' 'in' 'kb' 'ks' 'kw' 'kz' 'la' 'lh' 'lq' 'ls' 'mu' 'nl' 'og' 'oi' 'om' 'pf' 'pu' 'pv' 'q6' 'q9' 'qe' 'qk' 'ql' 'qo' 'qq' 'qu' 'qw' 'r5' 'rb' 'rn' 'rr' 'rt' 's4' 'sc' 'sg' 'si' 'sm' 'sn' 'sp' 'ss' 'tb' 'tc' 'tg' 'tk' 'tm' 'tr' 'tt' 'tu' 'tv' 'u0' 'u3' 'uj' 'un' 'uz' 'vp' 'w6' 'w8' 'wc' 'we' 'wi' 'wt' 'wx' 'xp' 'xz' 'y0' 'y4' 'z0' 'zj' 'zy' 2016-05-13 15:21:22.326724 +285 'ad' 'af' 'ar' 'b0' 'bw' 'c4' 'cn' 'do' 'e0' 'e4' 'em' 'eo' 'fn' 'ga' 'gg' 'gy' 'hf' 'ht' 'id' 'ig' 'ik' 'iv' 'j6' 'mi' 'n5' 'ng' 'ob' 'og' 'ou' 'oy' 'p4' 'pd' 'pg' 'pk' 'po' 'pt' 'pw' 'q3' 'q5' 'qb' 'qr' 'qt' 'r3' 'rc' 'rd' 'rl' 's0' 'sy' 't9' 'tc' 'tm' 'tq' 'ub' 'up' 'uv' 'ux' 'vm' 'vv' 'vw' 'wc' 'wg' 'wk' 'wm' 'x0' 'xp' 'xr' 'xv' 'xx' 'yg' 'ym' 'yw' 2016-05-13 16:21:22.326724 +286 '1d' 'a4' 'ai' 'av' 'b9' 'be' 'c9' 'cc' 'cy' 'de' 'ee' 'g8' 'gg' 'gl' 'gs' 'gu' 'hb' 'kl' 'kr' 'ky' 'mi' 'mz' 'nd' 'og' 'op' 'pr' 'q0' 'q5' 'qk' 'qy' 'r4' 'sk' 'sz' 't1' 't8' 'td' 'tn' 'tx' 'ty' 'tz' 'uc' 'up' 'vq' 'wa' 'ws' 'wv' 'x3' 'yn' 'yq' 'yy' 'zm' 2016-05-13 17:21:22.326724 +287 'a1' 'aj' 'bi' 'cd' 'cw' 'd2' 'e4' 'e9' 'gz' 'ij' 'k1' 'kb' 'kn' 'lw' 'me' 'nj' 'oq' 'p4' 'ph' 'ps' 'q3' 'q4' 'qj' 's7' 'sp' 'sx' 'tc' 'tw' 'v1' 'vj' 'vt' 'w7' 'wa' 'wb' 'wp' 'y1' 'zc' 2016-05-13 18:21:22.326724 +288 'a1' 'a5' 'aw' 'b5' 'cd' 'dz' 'em' 'eq' 'eu' 'fx' 'gk' 'hi' 'hq' 'ju' 'k8' 'kl' 'kw' 'la' 'lk' 'lm' 'm1' 'mj' 'mp' 'nz' 'o6' 'o8' 'oc' 'q0' 'q1' 'qg' 'qh' 'qw' 'rc' 'rd' 'ro' 'rr' 'sg' 'sy' 'th' 'ti' 'tt' 'uk' 'uo' 'ut' 'vy' 'wg' 'wi' 'x7' 'yr' 'yu' 'yx' 'yz' 'zr' 'zz' 2016-05-13 19:21:22.326724 +289 'a2' 'a9' 'ac' 'bw' 'cu' 'dm' 'e8' 'ee' 'ej' 'ep' 'f7' 'fh' 'ga' 'go' 'im' 'it' 'jv' 'kg' 'lb' 'ml' 'oi' 'p5' 'p9' 'pe' 'pf' 'pp' 'qk' 'rg' 'rh' 'sn' 'sv' 't5' 'tj' 'tk' 'tq' 'ty' 'v6' 'vk' 'w2' 'w5' 'wh' 'wy' 'y7' 'yf' 'zq' 'zt' 2016-05-13 20:21:22.326724 +290 '70' 'e0' 'ep' 'ex' 'hh' 'jn' 'kg' 'nq' 'rg' 'rj' 'uf' 'vs' 'ys' 2016-05-13 21:21:22.326724 +291 '4j' 'af' 'ap' 'bn' 'ch' 'dc' 'df' 'e0' 'ef' 'eg' 'er' 'g3' 'hh' 'hi' 'hw' 'i1' 'if' 'ii' 'in' 'iy' 'l1' 'mx' 'og' 'px' 'q3' 'qc' 'qg' 'qt' 'qy' 'r3' 'r6' 'rd' 'rj' 'rz' 'sm' 't2' 't4' 'td' 'tj' 'tm' 'tx' 'ty' 'u9' 'uo' 'up' 'v8' 'wv' 'ye' 'zb' 'zc' 'zi' 'zy' 2016-05-13 22:21:22.326724 +292 'cy' 'ey' 'gr' 'lq' 'n1' 'pp' 'pq' 'qb' 'qe' 'se' 'wb' 2016-05-13 23:21:22.326724 +293 'gn' 'gx' 'hf' 'ji' 'kx' 'nh' 'o6' 'pe' 'q1' 'qt' 'rw' 'sc' 'ss' 'yh' 'zm' 2016-05-14 00:21:22.326724 +294 '2e' '3y' 'ap' 'as' 'bl' 'di' 'dl' 'e9' 'eh' 'er' 'ff' 'fg' 'fh' 'gc' 'gg' 'hp' 'hq' 'i3' 'ih' 'jm' 'kw' 'lc' 'li' 'ls' 'nm' 'ok' 'pc' 'pn' 'q1' 'qf' 'qi' 'qk' 'r0' 'rl' 'sp' 'sy' 'sz' 'ta' 'to' 'ts' 'tv' 'uh' 'uk' 'vb' 'wa' 'wg' 'wi' 'wp' 'yl' 'yq' 'yw' 'yy' 2016-05-14 01:21:22.326724 +295 'af' 'am' 'az' 'ce' 'ci' 'dd' 'e5' 'eb' 'ee' 'eh' 'fe' 'fj' 'fv' 'ge' 'gl' 'hp' 'hx' 'i5' 'i8' 'ia' 'ig' 'jb' 'jm' 'jv' 'k6' 'kg' 'kv' 'li' 'ls' 'lx' 'm2' 'md' 'mz' 'o0' 'oe' 'oy' 'pv' 'q5' 'qh' 'qm' 'qo' 'qy' 'rf' 'ri' 'rp' 'rz' 'su' 'tj' 'tz' 'u2' 'uh' 'ur' 'vg' 'we' 'wi' 'xj' 'yb' 'yx' 'yz' 'z0' 2016-05-14 02:21:22.326724 +296 '1o' 'ah' 'ap' 'ba' 'bn' 'cs' 'cu' 'd2' 'd8' 'dg' 'dr' 'ed' 'ee' 'er' 'et' 'eu' 'ey' 'ff' 'g1' 'hj' 'i3' 'i5' 'il' 'in' 'jd' 'kd' 'ku' 'lm' 'lv' 'mu' 'nu' 'ok' 'ol' 'oo' 'ov' 'oy' 'p8' 'pu' 'qb' 'qq' 'qx' 'r5' 'rm' 'rp' 'rv' 'sh' 'sk' 'sl' 'sr' 'tx' 'uy' 'v3' 'vu' 'w3' 'wa' 'wb' 'wv' 'xh' 'xq' 'ys' 'yt' 'yx' 'z2' 'zi' 2016-05-14 03:21:22.326724 +297 'a2' 'a7' 'aq' 'bv' 'cy' 'd0' 'df' 'dg' 'do' 'ei' 'ek' 'ev' 'ey' 'fg' 'ge' 'gg' 'h4' 'hv' 'im' 'iq' 'ix' 'j4' 'j5' 'jt' 'kv' 'nc' 'o2' 'ou' 'ow' 'ph' 'pz' 'qf' 'qj' 'qm' 'qo' 'qq' 'qs' 'ra' 'rl' 'rz' 'te' 'tj' 'tp' 'tr' 'ts' 'tx' 'u5' 'ue' 'uj' 'uk' 'uo' 'us' 'v7' 'w2' 'wa' 'wu' 'wv' 'ww' 'wz' 'y2' 'y8' 'yy' 'zg' 2016-05-14 04:21:22.326724 +298 '1n' '2h' 'at' 'ax' 'c0' 'cn' 'dc' 'df' 'dg' 'e1' 'e5' 'ea' 'eq' 'ex' 'ez' 'f7' 'f8' 'fa' 'gt' 'gv' 'gy' 'hb' 'i3' 'i8' 'id' 'iw' 'ix' 'jc' 'jg' 'k0' 'kh' 'ky' 'kz' 'lj' 'lm' 'lq' 'mj' 'o8' 'og' 'op' 'ow' 'p5' 'p9' 'qc' 'qe' 'qf' 'qg' 'qo' 'qq' 'qv' 'qz' 'rq' 'rt' 'ru' 's2' 's5' 'sf' 'sl' 'sm' 'st' 'sw' 'tc' 'tl' 'ts' 'tz' 'ud' 'ur' 've' 'wd' 'wj' 'wn' 'wq' 'xa' 'xb' 'yf' 'yj' 'yn' 'yt' 'yv' 'zf' 2016-05-14 05:21:22.326724 +299 '4j' 'bi' 'bk' 'c0' 'c5' 'dh' 'dy' 'e8' 'fw' 'g3' 'g8' 'h6' 'hd' 'hn' 'jl' 'kc' 'kj' 'km' 'll' 'lo' 'nh' 'nj' 'nt' 'ob' 'on' 'oq' 'or' 'ox' 'ph' 'qc' 'qj' 'ql' 'qm' 'qo' 'qu' 'qz' 'sr' 't7' 'tc' 'ug' 'uq' 'wn' 'wr' 'ws' 'yv' 'z3' 'zj' 'zz' 2016-05-14 06:21:22.326724 +300 'ab' 'ap' 'ay' 'b6' 'bg' 'bm' 'bq' 'br' 'ce' 'co' 'dt' 'e4' 'ef' 'ej' 'ek' 'ep' 'fj' 'fq' 'fu' 'g3' 'gg' 'gp' 'hk' 'i3' 'i9' 'je' 'k7' 'ku' 'li' 'lq' 'lt' 'lx' 'mc' 'mz' 'n7' 'nl' 'o1' 'of' 'os' 'pm' 'qc' 'ql' 'qs' 'qw' 'r6' 'rf' 'rj' 'rp' 'ru' 'rx' 'rz' 'tl' 'tw' 'ty' 'u1' 'u3' 'ud' 'un' 'uw' 'vu' 'w6' 'wf' 'wl' 'wq' 'xh' 'xm' 'yc' 'yh' 'yr' 'yw' 'z9' 2016-05-14 07:21:22.326724 +301 '1b' 'cp' 'di' 'ed' 'fv' 'gx' 'hs' 'i8' 'lh' 'lq' 'lz' 'p7' 'p8' 'pc' 'ql' 'qn' 'qw' 're' 'rg' 'tv' 'we' 'wp' 'yr' 'zj' 2016-05-14 08:21:22.326724 +302 'h7' 'in' 'is' 'o7' 'q0' 'qa' 'qq' 'rl' 'rs' 'tu' 'u9' 'v3' 'vr' 'yq' 'zy' 2016-05-14 09:21:22.326724 +303 '6w' 'af' 'ak' 'b7' 'bf' 'bi' 'bv' 'cx' 'dy' 'es' 'ey' 'ez' 'f0' 'fe' 'fw' 'ge' 'gt' 'iw' 'j9' 'jh' 'kh' 'l0' 'l1' 'l8' 'm5' 'nv' 'o6' 'pw' 'qi' 'r0' 'se' 'ta' 'tf' 'th' 'to' 'u8' 'ux' 'vm' 'w4' 'w6' 'wa' 'wt' 'wu' 'xo' 'xu' 'yo' 'z0' 'zc' 2016-05-14 10:21:22.326724 +304 '6q' 'ac' 'ag' 'av' 'aw' 'az' 'bj' 'bq' 'bx' 'cj' 'dc' 'dd' 'dq' 'eg' 'ei' 'ek' 'em' 'f7' 'fg' 'fq' 'fx' 'gb' 'gp' 'hb' 'hk' 'i9' 'ii' 'im' 'ip' 'ir' 'iz' 'jx' 'le' 'lf' 'll' 'lx' 'mp' 'mv' 'of' 'oq' 'pb' 'pg' 'pj' 'py' 'pz' 'qi' 'qk' 'ql' 'qs' 'qu' 'qw' 'r8' 'ra' 'rc' 'rj' 'sj' 'tg' 'tk' 'tl' 'tm' 'tp' 'tw' 'u4' 'uf' 'ui' 'um' 'vu' 'w7' 'wd' 'wn' 'wt' 'wy' 'wz' 'y1' 'y3' 'y9' 'yl' 'yo' 'yq' 'yu' 'zb' 'zu' 'zv' 2016-05-14 11:21:22.326724 +305 '6h' '7f' 'ab' 'aq' 'ax' 'bc' 'bl' 'ce' 'cm' 'dn' 'do' 'e1' 'ec' 'ed' 'en' 'fc' 'fl' 'g1' 'ga' 'ge' 'gn' 'h8' 'i0' 'id' 'iq' 'iw' 'jc' 'ji' 'lt' 'mj' 'nm' 'o0' 'o6' 'oo' 'pa' 'pb' 'pt' 'q6' 'qf' 'qg' 'qz' 'r1' 'r2' 'r8' 'rd' 'rf' 's9' 'se' 't8' 'tc' 'te' 'u3' 'u5' 'uc' 'ug' 'ul' 'us' 'uu' 'v2' 'vz' 'wp' 'x8' 'ye' 'yq' 'yw' 2016-05-14 12:21:22.326724 +306 '5k' 'a1' 'ab' 'an' 'ar' 'as' 'av' 'ax' 'br' 'c2' 'c7' 'd5' 'da' 'dl' 'dr' 'dz' 'e3' 'ec' 'ed' 'ek' 'el' 'em' 'eu' 'ew' 'fd' 'ff' 'fw' 'g8' 'gb' 'gl' 'hh' 'hs' 'hz' 'i6' 'ia' 'ig' 'ii' 'ik' 'iq' 'ix' 'j0' 'j9' 'jf' 'jl' 'jo' 'jw' 'ko' 'kt' 'lm' 'nl' 'nm' 'ov' 'p3' 'p6' 'p7' 'pg' 'pl' 'pn' 'pp' 'qd' 'qe' 'qf' 'qn' 'qo' 'qq' 'qs' 'qt' 'qv' 'r9' 'rf' 'rj' 'rt' 'rw' 'sa' 'sl' 't2' 'tg' 'tk' 'tq' 'ty' 'ua' 'ud' 'vi' 'vm' 'w2' 'w6' 'wb' 'wd' 'wf' 'wi' 'wl' 'wq' 'wr' 'wy' 'x5' 'x8' 'y0' 'y1' 'ys' 'yx' 2016-05-14 13:21:22.326724 +307 'a3' 'a4' 'a5' 'bd' 'co' 'cq' 'd8' 'da' 'dn' 'do' 'du' 'dv' 'ed' 'ei' 'en' 'eo' 'es' 'et' 'f1' 'fa' 'ha' 'hr' 'i6' 'id' 'ie' 'if' 'ir' 'jx' 'kd' 'kh' 'mb' 'mq' 'mv' 'o6' 'oa' 'or' 'oy' 'pl' 'pr' 'q3' 'q8' 'qa' 'qb' 'qc' 'qd' 'qk' 'qs' 'ra' 'rc' 'rp' 'rz' 'se' 'tg' 'tj' 'u2' 'uq' 'uv' 'uy' 'vm' 'vz' 'wh' 'wu' 'xf' 'xs' 'yc' 'ye' 'yl' 'yn' 'z1' 'zf' 2016-05-14 14:21:22.326724 +308 'a3' 'ch' 'cs' 'e1' 'gq' 'gx' 'lz' 'nh' 'os' 'po' 'qs' 'rr' 'tx' 'ud' 'uj' 'uv' 've' 'w0' 'wj' 'xo' 'xz' 2016-05-14 15:21:22.326724 +309 'ab' 'ad' 'al' 'cm' 'cw' 'dh' 'dn' 'dy' 'e8' 'ea' 'ed' 'ej' 'eq' 'er' 'eu' 'ev' 'f5' 'fb' 'fj' 'fr' 'gc' 'gp' 'h2' 'h6' 'hc' 'hw' 'i3' 'i5' 'id' 'ir' 'ix' 'ka' 'kj' 'ko' 'lv' 'lx' 'mj' 'mv' 'nq' 'ns' 'oa' 'od' 'ok' 'om' 'os' 'p1' 'p5' 'pb' 'pg' 'pj' 'pl' 'ps' 'pu' 'q1' 'qb' 'qd' 'qe' 'qj' 'qk' 'ql' 'qr' 'qs' 'rd' 'rl' 'ro' 'rs' 'rx' 's1' 'sf' 'sg' 't0' 'tb' 'te' 'tf' 'tg' 'tl' 'tp' 'tq' 'u3' 'uf' 'ug' 'up' 'uq' 'vx' 'vy' 'w5' 'wa' 'wc' 'wr' 'ws' 'wu' 'wx' 'wy' 'wz' 'yf' 'ym' 'yz' 'ze' 'zs' 'zy' 'zz' 2016-05-14 16:21:22.326724 +310 '4f' 'e4' 'ep' 'fa' 'ff' 'iv' 'j4' 'kw' 'oj' 'pa' 'pw' 'q0' 'rv' 'ry' 't3' 'ul' 'vq' 'w1' 'wj' 'xm' 'ye' 'yu' 2016-05-14 17:21:22.326724 +311 'di' 'ec' 'i4' 'iq' 'iw' 'ko' 'l3' 'mw' 'py' 'qo' 'rx' 'wt' 'yl' 2016-05-14 18:21:22.326724 +312 '1q' '2n' '4p' '8n' 'a3' 'aa' 'bn' 'bq' 'bu' 'cg' 'db' 'dl' 'dx' 'dy' 'e2' 'e3' 'e4' 'ea' 'eq' 'ff' 'fk' 'fn' 'fv' 'gf' 'gh' 'h1' 'hs' 'hu' 'ij' 'in' 'iv' 'kc' 'ke' 'ln' 'm2' 'mc' 'mf' 'o2' 'og' 'oo' 'ov' 'p8' 'pl' 'pm' 'ps' 'pv' 'pz' 'qa' 'qc' 'qe' 'qh' 'qi' 'qj' 'qm' 'qo' 'qq' 'qv' 'qw' 'qy' 're' 'rz' 'sd' 'si' 'sj' 'sp' 'su' 'tc' 'tk' 'tn' 'tq' 'u7' 'ut' 'uv' 'v5' 'vr' 'wb' 'we' 'wm' 'ws' 'wv' 'xc' 'xy' 'y9' 'yj' 'yq' 'yv' 'yw' 'zb' 'zw' 'zz' 2016-05-14 19:21:22.326724 +313 '2a' '5d' 'ag' 'ar' 'at' 'dc' 'fa' 'fh' 'gn' 'gx' 'hh' 'iy' 'kd' 'ke' 'ld' 'nd' 'nx' 'oz' 'qc' 'qd' 'qo' 'qp' 'qu' 'r0' 'rg' 't5' 'tc' 'tg' 'th' 'tk' 'w2' 'w5' 'wt' 'yl' 'zn' 2016-05-14 20:21:22.326724 +314 '30' 'cl' 'dl' 'e7' 'ee' 'ef' 'eh' 'el' 'fk' 'fn' 'fs' 'gi' 'gw' 'i2' 'il' 'l7' 'm6' 'm9' 'mf' 'o8' 'ob' 'ok' 'pc' 'pe' 'qb' 'qe' 'qn' 'qr' 'tu' 'uc' 'ud' 'wp' 'xq' 'ym' 'ys' 2016-05-14 21:21:22.326724 +315 'a7' 'ad' 'aq' 'ck' 'cu' 'd8' 'db' 'do' 'dt' 'e7' 'eg' 'em' 'eq' 'ex' 'ez' 'fl' 'gr' 'h4' 'hd' 'he' 'hg' 'id' 'ie' 'if' 'in' 'io' 'j4' 'j7' 'je' 'jm' 'jo' 'k5' 'kg' 'kj' 'kn' 'l9' 'lb' 'lc' 'ld' 'lf' 'li' 'lj' 'na' 'nz' 'oa' 'ok' 'oo' 'pj' 'px' 'q3' 'q4' 'q6' 'qd' 'qi' 'qm' 'qq' 'qt' 'qx' 'r1' 'rg' 'ri' 'rn' 'sm' 'so' 'su' 'sy' 'tq' 'u5' 'uc' 'ue' 'us' 'w3' 'wc' 'we' 'wj' 'wk' 'wt' 'x8' 'xj' 'yc' 'yy' 2016-05-14 22:21:22.326724 +316 'a1' 'ab' 'ad' 'ag' 'bd' 'bf' 'bo' 'bt' 'cl' 'cn' 'cq' 'cz' 'd4' 'db' 'dc' 'dk' 'dn' 'dq' 'ea' 'el' 'em' 'ev' 'ew' 'ex' 'fr' 'ft' 'ga' 'gc' 'gd' 'gg' 'gl' 'gp' 'hp' 'hu' 'id' 'ij' 'jn' 'kl' 'kt' 'lh' 'lk' 'lm' 'mi' 'mw' 'n4' 'nk' 'oj' 'ok' 'on' 'ph' 'pk' 'pt' 'q0' 'q8' 'qa' 'qe' 'qf' 'qg' 'qh' 'qi' 'qr' 'qt' 'qv' 'qw' 'qx' 'rd' 're' 'rg' 'rq' 'rr' 'sb' 'sg' 'te' 'tj' 'us' 'w5' 'w9' 'wb' 'wf' 'wh' 'wp' 'wq' 'ws' 'wt' 'wu' 'wv' 'y7' 'yb' 'ye' 'yf' 'yq' 'yu' 'yy' 'yz' 'zx' 2016-05-14 23:21:22.326724 +317 'ad' 'am' 'av' 'ax' 'dl' 'dt' 'dw' 'ej' 'ez' 'f6' 'fe' 'g5' 'ga' 'gv' 'hr' 'hx' 'id' 'ie' 'ii' 'im' 'ix' 'jd' 'kn' 'ml' 'nu' 'ol' 'oo' 'pu' 'qd' 'qm' 'qt' 'r5' 'rb' 'rd' 'rg' 'ru' 'sg' 'ub' 'uk' 'vm' 'wl' 'wr' 'wz' 'x4' 'yf' 'yu' 'yv' 'yw' 'yx' 'zt' 2016-05-15 00:21:22.326724 +318 'aa' 'af' 'bp' 'c2' 'cd' 'ci' 'dl' 'du' 'dz' 'es' 'fn' 'fo' 'g7' 'ga' 'gc' 'gj' 'gx' 'hc' 'ht' 'iu' 'jl' 'jm' 'ke' 'kk' 'kr' 'lc' 'le' 'lr' 'lw' 'ms' 'n2' 'nq' 'nw' 'nz' 'o1' 'op' 'oq' 'ot' 'ow' 'p0' 'pb' 'pp' 'pr' 'qc' 'qh' 'qp' 'qr' 'qu' 'qy' 'r3' 'r9' 'rc' 'rk' 'rr' 'ry' 'sb' 'sf' 'sk' 'sl' 't1' 'ta' 'tb' 'tk' 'tz' 'ud' 'ur' 've' 'vn' 'vt' 'vw' 'w6' 'w9' 'wg' 'wk' 'xg' 'yg' 'yh' 'yj' 'ys' 'yt' 'yv' 'yy' 'z3' 'zo' 'zq' 2016-05-15 01:21:22.326724 +319 '4m' 'at' 'be' 'bo' 'bz' 'cv' 'd0' 'd8' 'ea' 'ec' 'ed' 'ex' 'f9' 'ff' 'fk' 'fv' 'gc' 'ge' 'i8' 'ij' 'is' 'k4' 'k6' 'mw' 'ob' 'ov' 'oy' 'pv' 'q1' 'q2' 'q9' 'qc' 'ql' 'r4' 'rn' 'ru' 'rx' 'ry' 'sj' 'tg' 'ts' 'tv' 'uj' 'vf' 'vh' 'w4' 'wc' 'ww' 'x5' 'xf' 'xx' 'y5' 'yg' 'yy' 'zg' 'zn' 'zw' 2016-05-15 02:21:22.326724 +320 'ew' 'fi' 'g2' 'ku' 'od' 'pk' 'tm' 'tq' 'tu' 'uf' 'wn' 'y1' 2016-05-15 03:21:22.326724 +321 '2e' '3u' 'aa' 'an' 'ap' 'bc' 'br' 'bx' 'cc' 'dt' 'ea' 'ei' 'ej' 'em' 'eo' 'ep' 'eq' 'es' 'et' 'ey' 'fb' 'fw' 'gh' 'gs' 'gv' 'hd' 'hg' 'hn' 'i7' 'it' 'jf' 'ka' 'kl' 'l8' 'la' 'm0' 'nu' 'o2' 'oc' 'oy' 'ph' 'po' 'pz' 'q2' 'qg' 'qh' 'qn' 'qp' 'qt' 'r7' 'r8' 'rd' 'rf' 'rj' 'rk' 'sm' 't1' 'tb' 'th' 'ub' 'ui' 'ul' 'um' 'uu' 've' 'w8' 'wj' 'wx' 'y8' 'zf' 'zn' 2016-05-15 04:21:22.326724 +322 'as' 'b1' 'bc' 'br' 'cd' 'dw' 'e2' 'ec' 'ew' 'f1' 'f2' 'f6' 'g0' 'gi' 'gy' 'hf' 'hk' 'if' 'ii' 'is' 'iy' 'j2' 'jp' 'kl' 'ku' 'l4' 'lg' 'lp' 'lv' 'lw' 'nd' 'ng' 'oo' 'q4' 'q6' 'q9' 'qc' 'qh' 'ql' 'qm' 'qo' 'qx' 'qz' 're' 'ro' 'sm' 't5' 'th' 'tt' 'ul' 'um' 'v0' 'vb' 'vf' 'vx' 'w9' 'wc' 'we' 'wp' 'xi' 'xl' 'xt' 'xv' 'y3' 'y6' 'ye' 'yg' 'yq' 'yw' 'yx' 'zs' 'zu' 2016-05-15 05:21:22.326724 +323 '15' '72' 'av' 'ba' 'dt' 'ev' 'hd' 'hu' 'jy' 'lo' 'mo' 'mt' 'oe' 'oi' 'pa' 'qc' 'qe' 'qt' 'r1' 'rg' 'ry' 'td' 'tk' 'us' 'wk' 'wq' 'ww' 'wz' 'x7' 'xq' 'y9' 'yx' 'z3' 2016-05-15 06:21:22.326724 +324 '2b' 'a4' 'aa' 'ay' 'br' 'cf' 'd3' 'dh' 'dw' 'e1' 'e8' 'eg' 'en' 'ep' 'ff' 'fj' 'g2' 'go' 'gq' 'i4' 'i8' 'if' 'jc' 'jj' 'jo' 'k1' 'kk' 'ks' 'la' 'ld' 'lm' 'm4' 'ml' 'o4' 'oe' 'og' 'os' 'oy' 'pc' 'pd' 'pr' 'px' 'q5' 'q6' 'qd' 'qh' 'qj' 'ql' 'qo' 'qt' 'qu' 'qw' 'rb' 're' 'rk' 'rn' 'rr' 'rw' 'sl' 'sm' 'te' 'tf' 'tk' 'u6' 'ui' 'ul' 'uo' 'ur' 'us' 'va' 'vt' 'w7' 'wf' 'wg' 'wm' 'wn' 'ws' 'xo' 'y8' 'yj' 'zh' 2016-05-15 07:21:22.326724 +325 'a1' 'a6' 'aq' 'bk' 'bu' 'ch' 'dd' 'dj' 'dx' 'e7' 'ef' 'ei' 'ek' 'el' 'ez' 'fp' 'ft' 'gb' 'go' 'hd' 'hh' 'hj' 'ho' 'hs' 'ic' 'il' 'ip' 'jh' 'jn' 'jq' 'jx' 'kq' 'kv' 'la' 'm8' 'mv' 'ng' 'oh' 'or' 'p7' 'p9' 'pe' 'pk' 'px' 'q7' 'qc' 'qd' 'qh' 'qj' 'qk' 'qr' 'qx' 'rd' 'rl' 'rq' 'sl' 'sq' 'st' 'sy' 'th' 'tn' 'u9' 'uc' 'ud' 'ug' 'uy' 'uz' 'vl' 'vn' 'wa' 'wb' 'wd' 'wt' 'xi' 'xs' 'xz' 'y5' 'yi' 'yk' 'yt' 'yw' 'z7' 'zg' 2016-05-15 08:21:22.326724 +326 '2j' 'ai' 'al' 'ch' 'dy' 'dz' 'ed' 'ei' 'fg' 'fk' 'gp' 'i0' 'il' 'ip' 'iv' 'ix' 'js' 'lm' 'na' 'p1' 'pq' 'pu' 'qe' 'qi' 'qk' 'ql' 'qq' 'qv' 'qx' 'r0' 'rl' 'rm' 'rv' 'sf' 'sj' 'te' 'tk' 'tq' 'tt' 'tu' 'uc' 'ud' 'um' 'un' 'uq' 'vg' 'vu' 'vz' 'w8' 'wa' 'wf' 'xg' 'yj' 'yw' 'yy' 'z6' 2016-05-15 09:21:22.326724 +327 '6w' 'a1' 'aa' 'ab' 'af' 'ao' 'aq' 'ay' 'cd' 'cg' 'dn' 'dp' 'dy' 'e0' 'e6' 'eb' 'ej' 'fh' 'fi' 'fs' 'g0' 'gc' 'gi' 'gs' 'gv' 'gw' 'hd' 'ho' 'i8' 'i9' 'ia' 'ic' 'ie' 'ik' 'ja' 'jd' 'ji' 'jm' 'k0' 'kj' 'lj' 'm8' 'n0' 'nh' 'nl' 'nn' 'o8' 'of' 'ou' 'oz' 'ph' 'po' 'q0' 'q3' 'q6' 'q7' 'qa' 'qc' 'qd' 'qn' 'qp' 'qq' 'qu' 'qw' 'qz' 'rd' 'rm' 'rn' 'rp' 'rs' 'sj' 'tg' 'tj' 'tr' 'tv' 'u0' 'uf' 'uh' 'uk' 'vv' 'wa' 'wc' 'wf' 'wg' 'wn' 'wo' 'wp' 'wr' 'ww' 'xt' 'y3' 'y7' 'yc' 'yf' 'yp' 'yy' 2016-05-15 10:21:22.326724 +328 '4p' 'bw' 'cq' 'dk' 'dr' 'dx' 'e3' 'e8' 'ex' 'ez' 'g9' 'h9' 'ih' 'in' 'kt' 'lt' 'me' 'o7' 'od' 'oi' 'on' 'p1' 'pu' 'qa' 'qd' 'qm' 'rz' 'sd' 'tn' 'ua' 'uz' 'vg' 'vx' 'wx' 'xy' 'ya' 'yl' 'yx' 2016-05-15 11:21:22.326724 +329 '1m' '2u' 'ai' 'ap' 'bb' 'bd' 'cb' 'dn' 'fl' 'fm' 'fo' 'fu' 'gi' 'gl' 'gs' 'hg' 'hi' 'hm' 'hp' 'i0' 'if' 'ij' 'jn' 'jt' 'ju' 'k8' 'ld' 'lm' 'oe' 'ox' 'pa' 'q7' 'qe' 'qi' 'qo' 'qw' 'rf' 'rg' 'ry' 'sq' 't7' 'tk' 'tn' 'tz' 'u5' 'uc' 'v4' 'vr' 'w4' 'w6' 'wd' 'wo' 'ws' 'wt' 'wu' 'wv' 'x3' 'yl' 2016-05-15 12:21:22.326724 +330 '25' 'ad' 'ao' 'at' 'ch' 'db' 'dz' 'eq' 'fp' 'gq' 'ih' 'iu' 'jo' 'km' 'p1' 'pb' 'qa' 'qb' 'qc' 'qg' 'ql' 'qq' 'qy' 'rb' 'rf' 'rl' 'rw' 'sp' 'ty' 'uk' 'ur' 'wu' 'wv' 'yw' 2016-05-15 13:21:22.326724 +331 '1a' '3r' 'ai' 'b8' 'bp' 'd6' 'do' 'dp' 'du' 'e0' 'e1' 'ek' 'es' 'eu' 'ey' 'ez' 'fq' 'fs' 'fy' 'gd' 'gm' 'h1' 'hu' 'i0' 'is' 'iv' 'iz' 'jd' 'jn' 'jz' 'kc' 'lf' 'lp' 'lt' 'ly' 'm2' 'mb' 'mt' 'n1' 'nr' 'of' 'oi' 'oq' 'ow' 'pe' 'pm' 'pn' 'q0' 'qb' 'qg' 'qk' 'qo' 'qz' 'r4' 'r8' 'ra' 'rn' 'rs' 's7' 'sa' 'sj' 'so' 'su' 'sv' 'sx' 'ti' 'tl' 'ts' 'ty' 'u4' 'up' 'vr' 'w2' 'wg' 'wi' 'wk' 'wl' 'ws' 'xl' 'xx' 'yf' 'yk' 'yn' 'yq' 'yv' 2016-05-15 14:21:22.326724 +332 'aa' 'ah' 'bo' 'dz' 'pd' 'pm' 'qf' 'qw' 'rd' 'rn' 'tf' 'wu' 2016-05-15 15:21:22.326724 +333 'ad' 'ao' 'au' 'az' 'ci' 'dj' 'dv' 'e1' 'ei' 'ej' 'em' 'eq' 'fa' 'fi' 'fj' 'fn' 'fw' 'he' 'i8' 'in' 'io' 'jd' 'jt' 'jv' 'lr' 'mt' 'mx' 'ns' 'nt' 'oc' 'oe' 'oi' 'pz' 'qe' 'ql' 'qt' 'qy' 'rq' 'sc' 'sp' 'te' 'um' 'wo' 'wv' 'x6' 'xu' 'yf' 'yi' 'yo' 'yv' 'yw' 'yz' 'zg' 2016-05-15 16:21:22.326724 +334 '13' '6k' 'ae' 'ah' 'an' 'at' 'aw' 'cc' 'cn' 'du' 'dx' 'ek' 'el' 'em' 'en' 'es' 'ey' 'f2' 'fe' 'fh' 'fk' 'fv' 'g6' 'g9' 'gw' 'hj' 'hn' 'hs' 'hu' 'hv' 'ia' 'im' 'iq' 'ix' 'jh' 'jm' 'ki' 'lj' 'lo' 'lt' 'lw' 'm1' 'nv' 'nw' 'ny' 'oj' 'oo' 'ot' 'ov' 'ox' 'pd' 'pv' 'q1' 'q7' 'qc' 'qd' 'qj' 'ql' 'qp' 'qq' 'qv' 'qw' 'qx' 'r7' 'ry' 'so' 'st' 'su' 'sx' 't9' 'tf' 'th' 'ty' 'uh' 'uo' 'ut' 'uv' 'uy' 'vj' 'vp' 'vu' 'vw' 'w3' 'w5' 'w8' 'wc' 'we' 'wg' 'wi' 'wl' 'wv' 'xh' 'xx' 'yb' 'yd' 'yj' 'yn' 'yo' 'yr' 'yy' 'zk' 'zx' 2016-05-15 17:21:22.326724 +335 'ad' 'av' 'be' 'bi' 'bk' 'bu' 'ce' 'cv' 'd8' 'df' 'dh' 'du' 'e1' 'em' 'ep' 'ex' 'ey' 'f1' 'fd' 'fh' 'fp' 'gn' 'gu' 'h8' 'i8' 'ii' 'il' 'iz' 'j0' 'ji' 'jv' 'jz' 'k2' 'k9' 'ky' 'l9' 'lo' 'm6' 'mc' 'md' 'ng' 'nz' 'ot' 'p2' 'pa' 'pj' 'ps' 'pv' 'q5' 'qd' 'qf' 'qm' 'qq' 'qr' 'qt' 'qv' 'qx' 'rc' 'rh' 'rp' 'rq' 'rs' 'sb' 'sc' 'te' 'to' 'ts' 'tw' 'up' 'vn' 'vw' 'wa' 'wg' 'wh' 'wn' 'wp' 'wz' 'xc' 'xf' 'xt' 'y5' 'yh' 'yn' 'yy' 'yz' 'za' 'zh' 'zy' 2016-05-15 18:21:22.326724 +336 'a1' 'b0' 'd0' 'db' 'ef' 'er' 'ev' 'ew' 'fe' 'fm' 'g8' 'la' 'n5' 'oh' 'os' 'pk' 'pn' 'qq' 'r7' 'sq' 'tw' 'ua' 'uu' 'wa' 'wk' 'wr' 'wu' 'xc' 'yi' 2016-05-15 19:21:22.326724 +337 '2k' '4s' 'd2' 'dk' 'dm' 'ea' 'ej' 'ep' 'et' 'eu' 'fr' 'gk' 'gs' 'hm' 'iu' 'jq' 'kj' 'km' 'lm' 'nb' 'nr' 'o1' 'oc' 'od' 'of' 'oj' 'ox' 'pf' 'pj' 'px' 'q0' 'q2' 'qa' 'qe' 'qh' 'qv' 'r5' 'r9' 'rb' 'rh' 'ti' 'tl' 'tw' 'u2' 'vb' 'vc' 'vs' 'xe' 'xn' 'y7' 'yb' 'yg' 'yy' 'zb' 'zl' 2016-05-15 20:21:22.326724 +338 'a5' 'ae' 'ah' 'as' 'b9' 'bh' 'c5' 'd0' 'dc' 'dd' 'do' 'dp' 'dz' 'eg' 'eh' 'fg' 'fu' 'g3' 'gh' 'gu' 'ha' 'hb' 'hd' 'hg' 'hw' 'i3' 'ib' 'jb' 'je' 'jm' 'js' 'm8' 'ma' 'n4' 'nu' 'nz' 'ob' 'oy' 'qu' 'qz' 'r5' 'rd' 're' 'rr' 'ru' 's5' 'sf' 'sp' 'st' 'tn' 'uz' 'vi' 'vo' 'w9' 'wj' 'wm' 'ws' 'wu' 'wv' 'wy' 'xa' 'xc' 'yl' 'zd' 'zp' 2016-05-15 21:21:22.326724 +339 'gi' 'n3' 'n7' 'om' 'qa' 'r7' 're' 'ug' 'w7' 'wi' 'x4' 'yn' 2016-05-15 22:21:22.326724 +340 'as' 'bb' 'bn' 'ce' 'd4' 'dp' 'ez' 'gu' 'gw' 'hd' 'hp' 'ia' 'ih' 'ij' 'j6' 'jm' 'jz' 'ku' 'kw' 'lh' 'mu' 'nl' 'nu' 'oc' 'pa' 'pm' 'q8' 'qd' 'qg' 'qi' 'qm' 'qn' 'qr' 'qs' 'qw' 'r9' 're' 'ro' 'ru' 'rv' 'rw' 't1' 't3' 'tm' 'u4' 'uf' 'uw' 'wa' 'wf' 'wk' 'xa' 'xs' 'yg' 'yp' 'ys' 'yu' 'yz' 'z4' 2016-05-15 23:21:22.326724 +341 'a3' 'b3' 'bv' 'ck' 'cl' 'cs' 'cv' 'cy' 'dc' 'dl' 'dq' 'dr' 'du' 'eg' 'el' 'en' 'ex' 'fv' 'fx' 'gd' 'gk' 'gu' 'h7' 'ic' 'jf' 'jz' 'lf' 'lq' 'nl' 'nm' 'o6' 'ob' 'ol' 'pe' 'po' 'q2' 'qa' 'qk' 'qo' 'qp' 'qr' 'qv' 'qz' 'r5' 're' 'rk' 'ss' 'sv' 't2' 'tf' 'ti' 'tj' 'ua' 'ud' 'v8' 'vb' 'vg' 'vh' 'vi' 'w1' 'w5' 'wb' 'wd' 'we' 'wf' 'wj' 'wr' 'ws' 'wv' 'xq' 'xs' 'y1' 'ye' 'yi' 'yp' 'yr' 'ys' 'yu' 'yw' 'z7' 'z8' 'zf' 'zq' 2016-05-16 00:21:22.326724 +342 '1l' '2u' 'ao' 'av' 'bb' 'c7' 'c8' 'ca' 'cm' 'cp' 'dc' 'di' 'e2' 'e4' 'eo' 'er' 'et' 'ex' 'g1' 'gb' 'hi' 'hr' 'ht' 'i9' 'id' 'ie' 'jk' 'jq' 'ju' 'k3' 'k5' 'lb' 'lm' 'lv' 'mg' 'no' 'o8' 'oo' 'ot' 'p2' 'pn' 'pw' 'py' 'q6' 'qg' 'ql' 'qo' 'r2' 'rf' 'rk' 'rq' 'ry' 't2' 'tb' 'tn' 'tr' 'tw' 'ub' 'w1' 'wa' 'wb' 'we' 'wi' 'ww' 'xo' 'yk' 'ys' 'yx' 'zg' 2016-05-16 01:21:22.326724 +343 '4u' 'a2' 'aq' 'as' 'b8' 'bu' 'cc' 'cp' 'cv' 'dg' 'e3' 'e9' 'eh' 'fg' 'fh' 'g9' 'gf' 'gk' 'gx' 'hp' 'il' 'jq' 'kc' 'l4' 'lm' 'mt' 'my' 'nw' 'oq' 'pe' 'qe' 'qu' 'qz' 'r1' 'rb' 'sh' 'sx' 't5' 'td' 'tl' 'tr' 'vb' 'vd' 'w6' 'wh' 'wk' 'wp' 'wv' 'y0' 'ye' 'yq' 'z2' 'zj' 'zz' 2016-05-16 02:21:22.326724 +344 '1l' '2t' '2z' 'af' 'az' 'be' 'br' 'cm' 'cu' 'cy' 'd2' 'd4' 'dd' 'dx' 'ec' 'ed' 'g2' 'gs' 'gu' 'id' 'ig' 'in' 'it' 'jh' 'jl' 'l6' 'ld' 'lg' 'lr' 'ly' 'nf' 'nj' 'ob' 'ot' 'p4' 'p8' 'pe' 'ph' 'q5' 'qa' 'qd' 'qe' 'qi' 'qo' 'qv' 'qz' 'ra' 'ri' 'rl' 'rr' 's7' 'sj' 'sx' 't0' 'tj' 'to' 'u2' 'ui' 'uq' 'ut' 'uw' 'vg' 'vs' 'w2' 'wa' 'wi' 'wl' 'wr' 'wu' 'ww' 'x7' 'xm' 'ym' 'yn' 'yp' 'zh' 2016-05-16 03:21:22.326724 +345 '1x' '2h' '6b' 'ah' 'av' 'ay' 'b9' 'bu' 'bv' 'c6' 'dv' 'e2' 'ea' 'ec' 'fh' 'gk' 'ia' 'j2' 'ji' 'k2' 'kc' 'kg' 'kq' 'kz' 'la' 'mq' 'nb' 'od' 'oe' 'ot' 'p8' 'pm' 'pp' 'qc' 'qi' 'qr' 'rg' 'rp' 'rw' 'tl' 'u5' 'ub' 'un' 'uv' 'vc' 'vi' 'wc' 'wi' 'wp' 'wv' 'xg' 'xq' 'yq' 'z3' 'zd' 'zv' 'zx' 'zy' 2016-05-16 04:21:22.326724 +346 'a2' 'al' 'av' 'b9' 'bd' 'bo' 'c8' 'cq' 'dz' 'ej' 'el' 'ep' 'ew' 'f3' 'f5' 'fr' 'ft' 'h2' 'i3' 'ic' 'id' 'iy' 'jd' 'ke' 'kf' 'kr' 'l3' 'lf' 'mg' 'mu' 'nj' 'o3' 'ob' 'ol' 'ov' 'pb' 'pj' 'pl' 'pp' 'qn' 'qz' 're' 'rg' 'rk' 'ru' 's3' 'sa' 'sh' 'sj' 'su' 'sw' 'tb' 'tk' 'tv' 'ue' 'uh' 'ul' 'un' 'vk' 'vu' 'w0' 'wa' 'wm' 'wo' 'wq' 'xa' 'xl' 'ya' 'yn' 'yo' 2016-05-16 05:21:22.326724 +347 'a5' 'av' 'dc' 'dm' 'dt' 'dw' 'e2' 'ea' 'en' 'et' 'ez' 'f2' 'fg' 'fn' 'fv' 'gn' 'gu' 'h5' 'ht' 'i2' 'ie' 'ik' 'il' 'in' 'it' 'iv' 'iy' 'ji' 'jj' 'ju' 'kf' 'kn' 'ko' 'ku' 'l6' 'ls' 'nz' 'o2' 'o4' 'of' 'on' 'op' 'p7' 'q0' 'q2' 'qa' 'qt' 'r1' 'r9' 'ri' 'rq' 'rv' 'se' 'sp' 'sx' 't0' 't2' 't9' 'te' 'tq' 'tu' 'u8' 'uc' 'ue' 'ug' 'ui' 'ut' 'uu' 'v0' 'v7' 'vs' 'w0' 'wc' 'wh' 'wn' 'wo' 'ws' 'y1' 'y3' 'y5' 'ya' 'yc' 'yi' 'ys' 'yt' 'zf' 'zq' 2016-05-16 06:21:22.326724 +348 'eq' 'g7' 'jc' 'jf' 'ji' 'lw' 'ma' 'oe' 'p7' 'qb' 'qj' 'qo' 'u7' 'v7' 'w3' 'wd' 'wz' 'xg' 'yr' 2016-05-16 07:21:22.326724 +349 'b8' 'eh' 'g5' 'gn' 'jo' 'mx' 'og' 'ol' 'on' 'px' 'rr' 'sl' 'un' 'uz' 'wv' 'yf' 2016-05-16 08:21:22.326724 +350 'at' 'bm' 'd0' 'd5' 'eg' 'ei' 'ek' 'eq' 'er' 'eu' 'ez' 'fs' 'gh' 'hc' 'ho' 'ix' 'ko' 'kq' 'l1' 'll' 'mz' 'ob' 'ou' 'p2' 'pi' 'pl' 'ps' 'pt' 'q0' 'q5' 'qa' 'qg' 'qj' 'qm' 'qq' 'qt' 'rf' 'ri' 'rm' 'ru' 'rv' 'rx' 'ry' 's4' 't4' 'td' 'tn' 'tq' 'tx' 'u3' 'up' 'uq' 'vn' 'wb' 'wf' 'wk' 'wr' 'wv' 'wy' 'xh' 'xx' 'y3' 'yc' 'ye' 'ys' 'za' 'zd' 'zo' 'zs' 'zv' 'zy' 2016-05-16 09:21:22.326724 +351 'a0' 'a7' 'ad' 'ae' 'ah' 'ay' 'b8' 'bh' 'dc' 'di' 'do' 'e5' 'ea' 'eb' 'ep' 'ev' 'fn' 'gq' 'gr' 'gs' 'h8' 'hc' 'hj' 'i3' 'ih' 'it' 'iw' 'ja' 'jf' 'kc' 'l8' 'lp' 'lx' 'ly' 'm1' 'og' 'oy' 'p7' 'pp' 'q7' 'qa' 'qm' 'qq' 'qr' 'qu' 'r3' 'rd' 'rg' 'rj' 'rw' 'si' 'sq' 'ss' 't0' 'tb' 'tg' 'tl' 'to' 'tp' 'tx' 'u9' 'ub' 'um' 'up' 'ux' 'vc' 'w0' 'w2' 'w5' 'wl' 'wy' 'xg' 'yb' 'yg' 'yh' 'za' 2016-05-16 10:21:22.326724 +352 '1z' 'aq' 'cf' 'cl' 'e1' 'e5' 'ee' 'eg' 'eo' 'er' 'ff' 'fh' 'fx' 'g5' 'ga' 'gd' 'gm' 'gn' 'hc' 'hf' 'hi' 'hk' 'ho' 'ib' 'ik' 'in' 'iw' 'ix' 'jd' 'kl' 'ky' 'lp' 'm3' 'm5' 'n4' 'ng' 'o0' 'oc' 'oj' 'ot' 'ou' 'p2' 'pa' 'pd' 'pg' 'ps' 'pw' 'q1' 'q8' 'qb' 'qc' 'qd' 'qi' 'qr' 'qs' 'qu' 'qv' 'ra' 'rg' 'rt' 'rz' 's7' 'sm' 'sn' 't0' 'tb' 'th' 'tx' 'tz' 'ub' 'ud' 'ue' 'un' 'ur' 'ut' 'vb' 'vj' 'wg' 'wh' 'wj' 'wl' 'wn' 'ww' 'wz' 'x0' 'xc' 'xq' 'xr' 'xw' 'y8' 'y9' 'yr' 'z7' 'zt' 2016-05-16 11:21:22.326724 +353 '2u' '3q' '7p' 'b4' 'b8' 'bb' 'bq' 'bu' 'ce' 'dk' 'dn' 'e0' 'ea' 'ed' 'ef' 'eg' 'eh' 'ej' 'ek' 'em' 'en' 'eo' 'et' 'fh' 'fr' 'fw' 'gj' 'gx' 'ho' 'i7' 'i9' 'if' 'ii' 'il' 'in' 'iq' 'iw' 'jy' 'k0' 'k1' 'km' 'kr' 'lh' 'lq' 'lw' 'ma' 'mb' 'md' 'mj' 'nh' 'ni' 'nj' 'ob' 'p5' 'p7' 'pc' 'pk' 'pl' 'pu' 'q2' 'qa' 'qc' 'qe' 'qi' 'qm' 'qn' 'qt' 'rb' 'rp' 'rx' 'sd' 't6' 'tb' 'tl' 'tw' 'u5' 'u7' 'ug' 'uh' 'ut' 'vs' 'wi' 'wj' 'wp' 'ws' 'wu' 'wv' 'xm' 'xs' 'y2' 'y6' 'yd' 'yh' 'yl' 'yr' 'yx' 'yz' 2016-05-16 12:21:22.326724 +354 '4c' '4z' 'ag' 'ak' 'an' 'as' 'aw' 'bl' 'd7' 'dy' 'e7' 'eb' 'ec' 'eq' 'et' 'fx' 'gg' 'gm' 'ht' 'hv' 'ik' 'in' 'ip' 'j6' 'k6' 'ka' 'lc' 'md' 'mx' 'n7' 'pw' 'py' 'q7' 'qa' 'qg' 'qh' 'qn' 'qo' 'qt' 'ri' 'rl' 'rs' 'sj' 'su' 'tj' 'u0' 'ua' 'w0' 'wh' 'wi' 'wp' 'wr' 'wx' 'wz' 'ys' 'yx' 2016-05-16 13:21:22.326724 +355 '3s' '6q' 'ad' 'am' 'as' 'ay' 'cd' 'ci' 'cl' 'dw' 'eb' 'en' 'ep' 'ew' 'ey' 'fh' 'g6' 'gb' 'gk' 'h5' 'hk' 'hp' 'hy' 'i8' 'ia' 'ic' 'ih' 'in' 'is' 'iw' 'jd' 'je' 'kh' 'kp' 'ks' 'lg' 'lz' 'mx' 'my' 'nf' 'nh' 'no' 'nv' 'o4' 'oa' 'oc' 'op' 'p2' 'p5' 'pa' 'pb' 'pt' 'pw' 'q2' 'q4' 'qb' 'qg' 'qh' 'qq' 'qs' 'qt' 'qx' 'qy' 'qz' 'r7' 'rc' 'rk' 'rt' 'sr' 'sx' 'sy' 'tc' 'tn' 'tr' 'tv' 'u1' 'u7' 'ua' 'uh' 'uz' 'vi' 'wd' 'wr' 'x3' 'x5' 'xa' 'xe' 'y5' 'y7' 'yb' 'yg' 'yk' 'ym' 'yr' 'yt' 'z7' 'zc' 'zf' 'zj' 'zp' 2016-05-16 14:21:22.326724 +356 'aq' 'bo' 'cx' 'dl' 'dm' 'ed' 'ee' 'ei' 'er' 'eu' 'f4' 'fe' 'fw' 'g2' 'gj' 'h7' 'hv' 'im' 'j8' 'jf' 'kl' 'la' 'lj' 'mo' 'oc' 'oh' 'oi' 'ol' 'pa' 'pb' 'pf' 'pk' 'po' 'q9' 'qf' 'qj' 'qq' 'qv' 'r8' 're' 'rg' 'rh' 'rj' 'rr' 'ru' 'ry' 's8' 'sd' 'sg' 'sj' 'sk' 'so' 'sv' 't3' 'tz' 'u5' 'ua' 'uk' 'um' 'vi' 'vt' 'wb' 'we' 'wk' 'wn' 'wt' 'wu' 'wz' 'xk' 'xz' 'yi' 'yl' 'yz' 'z9' 'zd' 'zu' 'zw' 2016-05-16 15:21:22.326724 +357 'a7' 'ah' 'cj' 'co' 'cv' 'dt' 'ex' 'fs' 'gx' 'hn' 'jv' 'kp' 'l5' 'od' 'on' 'pr' 'q6' 'q8' 'qi' 'qq' 'qr' 'rl' 'u2' 'wb' 'wx' 'yh' 'zq' 2016-05-16 16:21:22.326724 +358 '18' 'bf' 'bj' 'cs' 'dw' 'el' 'ep' 'ez' 'fr' 'g1' 'gv' 'hu' 'hz' 'ii' 'jc' 'je' 'jj' 'jo' 'jv' 'n5' 'ni' 'ns' 'oh' 'oi' 'ov' 'pe' 'q8' 'qg' 'qi' 'qr' 'r6' 'rh' 's3' 'sj' 'sr' 't0' 'ta' 'u2' 'uj' 'ur' 'vu' 'wb' 'wh' 'wx' 'yn' 'yq' 'zg' 'zr' 2016-05-16 17:21:22.326724 +359 'au' 'bl' 'c4' 'd7' 'dc' 'dm' 'ea' 'eo' 'ft' 'gb' 'h3' 'ia' 'ix' 'jr' 'kc' 'ke' 'lq' 'lr' 'mn' 'op' 'pj' 'qc' 'qj' 'qn' 'qq' 'qx' 'r8' 'rb' 'ri' 'rm' 'rn' 'rx' 't6' 'tf' 'tv' 'um' 'vg' 'vi' 'we' 'wp' 'wz' 'x6' 'xb' 'xd' 'xg' 'xv' 'yx' 'yz' 2016-05-16 18:21:22.326724 +360 '1o' '7o' 'a1' 'aq' 'b4' 'bi' 'bp' 'c9' 'cb' 'cd' 'cf' 'co' 'cp' 'd3' 'd8' 'ds' 'dv' 'dy' 'eb' 'ee' 'ef' 'eh' 'em' 'eo' 'es' 'ez' 'fi' 'fk' 'fl' 'gj' 'gn' 'gt' 'h1' 'hn' 'ho' 'hp' 'hu' 'ic' 'ik' 'il' 'im' 'ir' 'iw' 'ji' 'jp' 'k4' 'ks' 'm0' 'ml' 'n0' 'ns' 'oj' 'on' 'or' 'os' 'ov' 'pr' 'pv' 'px' 'py' 'q6' 'qa' 'qc' 'qg' 'qj' 'qm' 'qn' 'qp' 'qq' 'qs' 'qv' 'qz' 'r0' 'ra' 's0' 'te' 'tg' 'tm' 'to' 'tw' 'u1' 'u7' 'ug' 'um' 'uw' 'ux' 'uz' 'vm' 'vu' 'w4' 'wg' 'wh' 'wj' 'wk' 'wm' 'wo' 'wq' 'ws' 'wt' 'wz' 'x0' 'x5' 'yg' 'yn' 'z6' 2016-05-16 19:21:22.326724 +361 '1a' 'a0' 'ab' 'ah' 'ay' 'cd' 'db' 'di' 'eh' 'eo' 'eq' 'f4' 'f8' 'fs' 'gv' 'h7' 'he' 'hf' 'i0' 'id' 'il' 'io' 'ip' 'kc' 'l1' 'l7' 'lw' 'mp' 'na' 'nb' 'nj' 'oj' 'ou' 'pa' 'pn' 'ps' 'pu' 'qb' 'qf' 'ql' 'qn' 'qr' 'rn' 'rw' 'so' 'tf' 'tg' 'th' 'tr' 'us' 'w3' 'w6' 'wb' 'wc' 'wf' 'wg' 'wp' 'wr' 'yv' 'yw' 'zh' 2016-05-16 20:21:22.326724 +362 '5o' 'ar' 'at' 'az' 'bf' 'bv' 'e0' 'ew' 'fh' 'hd' 'ij' 'in' 'is' 'iu' 'ji' 'l5' 'ld' 'mz' 'nk' 'oc' 'p2' 'ph' 'px' 'q9' 'qi' 'qn' 'qo' 'qq' 'qv' 'rw' 'sf' 'sp' 'te' 'up' 'y8' 2016-05-16 21:21:22.326724 +363 '2s' '3k' '4b' '6s' 'a1' 'b2' 'b9' 'bi' 'bz' 'c0' 'c3' 'ck' 'cl' 'cu' 'de' 'dl' 'do' 'dp' 'e8' 'ec' 'ee' 'ei' 'ek' 'eu' 'ez' 'fi' 'fo' 'fr' 'fu' 'fx' 'gh' 'gk' 'go' 'gq' 'h4' 'hd' 'hm' 'hz' 'ia' 'id' 'iv' 'jb' 'jh' 'km' 'kp' 'l4' 'l7' 'l8' 'ld' 'lj' 'nb' 'o7' 'oc' 'oh' 'os' 'p4' 'p9' 'pd' 'q6' 'q7' 'q8' 'qc' 'qh' 'qj' 'qk' 'qw' 'rg' 'rj' 'rm' 'rn' 'rp' 'ru' 'rz' 's5' 'si' 'sv' 'sy' 'tc' 'tg' 'tk' 'tv' 'ue' 'uf' 'ux' 'uz' 'vl' 'w2' 'w6' 'wf' 'wm' 'wu' 'wx' 'xh' 'ya' 'yc' 'ye' 'z7' 'zi' 'zo' 'zq' 2016-05-16 22:21:22.326724 +364 'ap' 'aw' 'ba' 'cz' 'dw' 'eh' 'g5' 'g9' 'gc' 'gi' 'go' 'ir' 'j8' 'kl' 'mg' 'p6' 'ql' 'qu' 'qz' 'sa' 't7' 'tw' 'v5' 'v7' 'wj' 'xt' 'y6' 'yg' 'yh' 'yq' 'z5' 'zj' 2016-05-16 23:21:22.326724 +365 'a9' 'ad' 'az' 'bo' 'bq' 'by' 'c8' 'cb' 'cp' 'd9' 'de' 'dj' 'dp' 'e4' 'ef' 'ey' 'fn' 'gc' 'ge' 'gp' 'gs' 'gu' 'gx' 'hf' 'ic' 'ie' 'ir' 'kc' 'kp' 'ku' 'l2' 'lv' 'mx' 'n7' 'o3' 'o7' 'oe' 'pb' 'ps' 'q3' 'q4' 'qb' 'qn' 'qo' 'qq' 'qu' 'qw' 'r3' 'ra' 'rk' 'sg' 'si' 'sv' 'sw' 'tb' 'tg' 'tw' 'u2' 'uc' 'uf' 'uj' 'us' 'ut' 'uu' 'v3' 'va' 'wa' 'wd' 'ww' 'wy' 'wz' 'xk' 'y6' 'y8' 'ya' 'yc' 'ye' 'yo' 'yx' 'z3' 2016-05-17 00:21:22.326724 +366 'a8' 'am' 'as' 'bb' 'cn' 'co' 'dd' 'di' 'dx' 'dz' 'e3' 'e5' 'ea' 'ef' 'eh' 'el' 'en' 'eo' 'ew' 'fe' 'fg' 'gc' 'gi' 'gj' 'gp' 'ha' 'hl' 'hy' 'i3' 'id' 'ij' 'ir' 'is' 'k9' 'kc' 'kf' 'l9' 'ln' 'ls' 'lw' 'm0' 'm8' 'my' 'nn' 'on' 'p9' 'ph' 'pm' 'pw' 'q1' 'q3' 'qc' 'qm' 'qq' 'qv' 'rg' 'ro' 'se' 'sw' 'sx' 'ta' 'tl' 'tz' 'u4' 'uo' 'vg' 'vl' 'vm' 'vn' 'w3' 'wd' 'wm' 'wo' 'wp' 'wt' 'yg' 'yj' 'yv' 'yx' 'yz' 'zu' 'zx' 'zy' 2016-05-17 01:21:22.326724 +367 '1u' 'ad' 'al' 'c4' 'cz' 'db' 'dk' 'dr' 'e5' 'e7' 'ea' 'f6' 'fk' 'fw' 'g3' 'gl' 'gt' 'gu' 'ha' 'hk' 'hv' 'ie' 'ka' 'kq' 'ks' 'lr' 'nf' 'pk' 'q3' 'q8' 'qf' 'qj' 'qo' 'rp' 'rr' 'rz' 'ss' 'sv' 'sx' 'tb' 'th' 'tt' 'ur' 'vp' 'wj' 'ws' 'wt' 'xk' 'xo' 'ym' 'yz' 2016-05-17 02:21:22.326724 +368 '3t' '5u' 'aa' 'ab' 'b9' 'd8' 'di' 'dj' 'dq' 'dx' 'dy' 'ea' 'ek' 'el' 'es' 'ev' 'ey' 'ez' 'fk' 'fp' 'g3' 'gb' 'gd' 'gg' 'gi' 'go' 'hj' 'hz' 'i3' 'i5' 'in' 'io' 'is' 'jv' 'kb' 'kr' 'lu' 'md' 'nd' 'ny' 'o6' 'oe' 'ok' 'ow' 'ox' 'p4' 'p7' 'p8' 'pb' 'pu' 'q1' 'q3' 'q4' 'qd' 'qj' 'qk' 'qq' 'qx' 'qy' 'r6' 'ro' 'rw' 'rx' 'ry' 'se' 'sp' 't0' 't1' 'th' 'tl' 'tn' 'tp' 'tr' 'tx' 'u5' 'uf' 'uo' 'ux' 'vc' 'vz' 'w2' 'w5' 'wa' 'wb' 'we' 'wi' 'wj' 'ws' 'wt' 'xh' 'y1' 'yb' 'yg' 'yi' 'zd' 'zm' 'zt' 2016-05-17 03:21:22.326724 +369 '2s' '4q' '8x' 'aa' 'ac' 'ah' 'av' 'aw' 'c4' 'c5' 'ce' 'd1' 'dj' 'dq' 'e7' 'e9' 'ea' 'ef' 'fc' 'fs' 'fz' 'gb' 'ge' 'gx' 'hf' 'hl' 'hm' 'i5' 'i7' 'ie' 'ii' 'iw' 'kf' 'ko' 'kx' 'lb' 'm6' 'mh' 'ob' 'od' 'oe' 'og' 'oq' 'or' 'pn' 'ps' 'pu' 'q4' 'qf' 'qg' 'qj' 'ql' 'qn' 'qt' 'qy' 'r3' 'ra' 'ri' 'rj' 'rv' 'sr' 'tg' 'th' 'tl' 'tx' 'u7' 'ub' 'uh' 'uq' 'uu' 'uy' 'vn' 'vt' 'wa' 'we' 'wh' 'wn' 'wp' 'wr' 'ww' 'xg' 'xk' 'xs' 'ya' 'yc' 'yd' 'yf' 'yl' 'ym' 'ys' 'z6' 'zj' 'zm' 'zv' 'zw' 2016-05-17 04:21:22.326724 +370 '2y' '8i' 'by' 'dl' 'h6' 'hj' 'm3' 'ml' 'qa' 'qx' 'r1' 'rm' 'rn' 'u4' 'uj' 'wo' 'xv' 2016-05-17 05:21:22.326724 +371 '2h' '9y' 'ac' 'at' 'cf' 'e3' 'e6' 'ec' 'ek' 'en' 'f1' 'fa' 'fn' 'gm' 'gx' 'hk' 'i7' 'id' 'iw' 'ke' 'kl' 'ks' 'l6' 'l8' 'ls' 'lx' 'me' 'o4' 'oi' 'oj' 'ox' 'pn' 'qa' 'qh' 'qk' 'ql' 'rp' 'tb' 'te' 'tn' 'ts' 'u4' 'u9' 'ud' 'vj' 'vz' 'wc' 'wr' 'ye' 'yk' 'yq' 'zq' 2016-05-17 06:21:22.326724 +372 '5t' 'al' 'av' 'ax' 'by' 'cr' 'cx' 'df' 'dh' 'dp' 'dq' 'e4' 'e7' 'en' 'ff' 'fg' 'fo' 'g4' 'ge' 'gg' 'gm' 'gr' 'gt' 'hc' 'hn' 'hq' 'hs' 'i0' 'i1' 'id' 'ig' 'im' 'jb' 'jv' 'k9' 'kl' 'km' 'lh' 'lp' 'lq' 'mm' 'ne' 'nk' 'nu' 'od' 'os' 'pj' 'pw' 'pz' 'q0' 'q1' 'qa' 'qd' 'qe' 'qf' 'qk' 'ql' 'qn' 'qp' 'qt' 'r0' 'rx' 'sx' 'sy' 't0' 't1' 't8' 'th' 'ti' 'tp' 'tv' 'tz' 'ub' 'uc' 'ul' 'v5' 'vp' 'vu' 'w8' 'wd' 'we' 'wf' 'wh' 'wm' 'wq' 'wr' 'ws' 'wy' 'wz' 'xg' 'y9' 'yg' 'yh' 'yq' 'yz' 'z0' 2016-05-17 07:21:22.326724 +373 'bf' 'cf' 'cg' 'cj' 'ef' 'ej' 'ek' 'ep' 'fk' 'gi' 'ik' 'ir' 'j0' 'km' 'ko' 'lu' 'mr' 'n6' 'oc' 'op' 'pt' 'q4' 'qe' 'qk' 'qv' 'rc' 'rt' 's8' 't3' 'tk' 'ut' 'wc' 'we' 'ws' 'wt' 'ww' 'y8' 'yd' 'yr' 2016-05-17 08:21:22.326724 +374 'a6' 'ad' 'ak' 'ap' 'd1' 'de' 'dj' 'eb' 'eg' 'fi' 'fq' 'ft' 'gx' 'gy' 'hn' 'hs' 'i4' 'ia' 'im' 'is' 'kf' 'ko' 'kt' 'l0' 'lr' 'm8' 'mv' 'oi' 'on' 'pc' 'pl' 'pn' 'pp' 'q2' 'q4' 'qj' 'ql' 'qv' 'qy' 'r2' 'rc' 'rd' 're' 'rm' 'rt' 'sj' 'tb' 'tc' 'tf' 'tk' 'uf' 'uk' 'ul' 'um' 'uq' 'uy' 'vj' 'wl' 'wv' 'ww' 'x7' 'xj' 'y7' 'yx' 'zb' 2016-05-17 09:21:22.326724 +375 '5f' 'ac' 'ad' 'aj' 'av' 'bg' 'ch' 'ci' 'dg' 'di' 'dl' 'dx' 'ep' 'ew' 'f8' 'fp' 'fr' 'fs' 'g4' 'g6' 'gb' 'h5' 'he' 'hk' 'hx' 'i1' 'ic' 'im' 'ji' 'jm' 'js' 'k9' 'kq' 'lf' 'ln' 'mj' 'n2' 'ni' 'no' 'o5' 'oi' 'ot' 'oy' 'pa' 'pj' 'qk' 'qq' 'qt' 'qu' 'r4' 'r8' 'rb' 'rh' 'rj' 'rm' 'ro' 'rp' 'rq' 'rt' 'ru' 'sb' 'th' 'tu' 'tz' 'u9' 'un' 'uo' 'us' 've' 'vi' 'vq' 'vx' 'wf' 'wg' 'wi' 'wq' 'wr' 'wt' 'x5' 'xz' 'y6' 'yd' 'yo' 'yq' 'yx' 'z3' 'zl' 2016-05-17 10:21:22.326724 +376 'a3' 'do' 'ef' 'hp' 'ih' 'im' 'jp' 'km' 'lv' 'mt' 'nh' 'oc' 'od' 'ph' 'pi' 'pj' 'qe' 'qh' 'qr' 'rq' 'rr' 'rs' 'sc' 'st' 'ts' 'tt' 'tw' 'ut' 'vv' 'w7' 'wv' 'x3' 'xa' 'xc' 'xl' 'zc' 2016-05-17 11:21:22.326724 +377 '2o' '6t' 'a7' 'bv' 'di' 'dm' 'ea' 'eo' 'fd' 'fj' 'ft' 'fw' 'gs' 'hu' 'hz' 'ik' 'it' 'jc' 'js' 'jx' 'jz' 'k4' 'lp' 'lx' 'ol' 'p3' 'pf' 'q9' 'qu' 'qv' 'qx' 're' 'rl' 'ro' 'sb' 'ur' 'vf' 'wd' 'wk' 'wo' 'wx' 'yp' 'zo' 2016-05-17 12:21:22.326724 +378 '7u' '7z' 'a0' 'a5' 'c6' 'cj' 'cx' 'do' 'e4' 'eg' 'eh' 'eq' 'ey' 'ez' 'fe' 'fu' 'gk' 'gy' 'h9' 'ih' 'ik' 'il' 'j0' 'jt' 'jy' 'k1' 'k5' 'ke' 'lp' 'lx' 'ly' 'ma' 'mj' 'mm' 'mt' 'nb' 'nm' 'o3' 'o5' 'of' 'oi' 'ov' 'p2' 'pf' 'ph' 'pt' 'q8' 'qj' 'qk' 'qp' 'qs' 'r1' 'r8' 'rl' 'rp' 'rt' 's5' 'sb' 'sh' 'sk' 'so' 'sr' 'su' 'tc' 'tn' 'tr' 'tx' 'u4' 'uh' 'ul' 'up' 'ur' 'uw' 'uy' 'vm' 'w5' 'wb' 'wj' 'wm' 'wq' 'wy' 'x6' 'xq' 'xs' 'y8' 'y9' 'yi' 'yj' 'yv' 'yx' 'zi' 'zy' 2016-05-17 13:21:22.326724 +379 '2n' 'ar' 'b3' 'bc' 'bd' 'c0' 'c9' 'cb' 'cp' 'do' 'ee' 'eh' 'ep' 'f3' 'fb' 'fj' 'gg' 'gk' 'gm' 'h9' 'hk' 'hp' 'ht' 'i7' 'ij' 'iw' 'kx' 'm2' 'mt' 'mv' 'nj' 'ns' 'o7' 'oa' 'od' 'of' 'om' 'or' 'pg' 'pm' 'pu' 'qq' 'qw' 'qx' 'r0' 'rb' 'rg' 'rk' 'rv' 'se' 'sj' 'sp' 'su' 'tf' 'uu' 'v9' 've' 'vg' 'vm' 'w5' 'w6' 'w8' 'wa' 'wp' 'y5' 'y9' 'yk' 'yy' 'z2' 'zd' 'zw' 2016-05-17 14:21:22.326724 +380 '1i' '6c' 'ah' 'aq' 'ax' 'b4' 'cg' 'd5' 'da' 'do' 'eu' 'ex' 'ez' 'fa' 'go' 'gt' 'h5' 'h8' 'hi' 'i2' 'ib' 'il' 'in' 'is' 'iz' 'j0' 'j7' 'jn' 'jq' 'ku' 'kw' 'li' 'ls' 'n3' 'nc' 'nt' 'o6' 'oc' 'p4' 'p7' 'pc' 'qc' 'qi' 'qp' 'qs' 'qw' 'r3' 'rl' 'rq' 'ry' 's5' 'si' 'sm' 'ta' 'tr' 'ud' 'ut' 'uw' 'v0' 'w7' 'wg' 'wm' 'xi' 'yi' 'yl' 'yn' 'z1' 'z5' 'zk' 'zp' 2016-05-17 15:21:22.326724 +381 'a2' 'a4' 'ac' 'ag' 'ao' 'ay' 'ce' 'di' 'dm' 'dr' 'dz' 'e9' 'eg' 'ej' 'el' 'em' 'eq' 'et' 'ey' 'fa' 'fw' 'fz' 'gg' 'gh' 'gp' 'gr' 'h6' 'hj' 'hk' 'i7' 'id' 'ik' 'iu' 'j2' 'kc' 'l1' 'lh' 'm5' 'm9' 'mj' 'mr' 'mx' 'ok' 'ot' 'ov' 'oz' 'p5' 'q0' 'q1' 'qe' 'qg' 'r2' 'rj' 'rk' 'rw' 'sa' 't3' 't6' 't9' 'te' 'tf' 'tj' 'tk' 'u4' 'wa' 'wf' 'wu' 'wy' 'yg' 'yj' 'ys' 'zk' 'zy' 2016-05-17 16:21:22.326724 +382 'a0' 'a8' 'b4' 'b9' 'bb' 'cb' 'cn' 'cq' 'ds' 'e2' 'eb' 'ex' 'ez' 'fa' 'fu' 'fz' 'ge' 'gy' 'h0' 'hj' 'ht' 'hu' 'ig' 'in' 'iq' 'is' 'iu' 'jg' 'le' 'ln' 'lr' 'nj' 'nu' 'oe' 'oo' 'ow' 'oz' 'pf' 'ph' 'pj' 'ps' 'pu' 'pv' 'q0' 'q4' 'qf' 'qm' 'qn' 'qo' 'qq' 'qs' 'qw' 'r2' 'r3' 'ra' 'rk' 's7' 'sa' 'sp' 'tl' 'ue' 'ui' 'vo' 'vs' 'wj' 'wk' 'wn' 'wv' 'ww' 'x0' 'xg' 'xh' 'xx' 'y0' 'yd' 'yr' 'zg' 'zn' 2016-05-17 17:21:22.326724 +383 '1u' '5k' 'ax' 'az' 'c4' 'eo' 'fi' 'gq' 'hg' 'ho' 'is' 'jq' 'ki' 'oq' 'ov' 'p4' 'pl' 'po' 'qe' 'qf' 'qn' 'qy' 'r3' 'rm' 'sd' 'se' 'sz' 'tb' 'td' 'u4' 'u5' 'vl' 'vv' 'wb' 'we' 'wh' 'wz' 'y5' 'ye' 'yv' 'zh' 'zj' 2016-05-17 18:21:22.326724 +384 '5y' 'av' 'ax' 'cl' 'cw' 'dh' 'di' 'g7' 'ga' 'gi' 'gx' 'gy' 'i0' 'iu' 'iv' 'jk' 'jv' 'k3' 'l2' 'lo' 'lq' 'mj' 'oj' 'pd' 'pn' 'pv' 'q6' 'qq' 'qv' 'sc' 'tt' 'tv' 'ug' 'ut' 've' 'vy' 'wi' 'xg' 'xo' 'yi' 'yl' 'yv' 'zm' 2016-05-17 19:21:22.326724 +385 'bp' 'cy' 'd6' 'd7' 'em' 'et' 'gf' 'gt' 'hl' 'ib' 'io' 'kz' 'mx' 'nm' 'oe' 'pd' 'qf' 'qk' 'rk' 'rz' 'se' 'si' 'tq' 'tu' 'uu' 'v3' 'wa' 'wf' 'wg' 'wx' 'yd' 'ym' 2016-05-17 20:21:22.326724 +386 'ah' 'ar' 'bk' 'db' 'dd' 'dq' 'eo' 'fe' 'fj' 'fv' 'g2' 'gc' 'hp' 'i7' 'j2' 'k9' 'kt' 'l9' 'ld' 'lu' 'lz' 'me' 'mr' 'mu' 'mx' 'nr' 'o6' 'ol' 'or' 'pa' 'ph' 'pi' 'q4' 'qn' 'qp' 'r5' 'rb' 'rn' 's8' 'sf' 't6' 'tl' 'tq' 'tx' 'u6' 'uc' 'uv' 'wf' 'x1' 'x7' 'xa' 'xl' 'xm' 'yb' 'yh' 'yw' 'zw' 2016-05-17 21:21:22.326724 +387 'cp' 'i0' 'ia' 'jh' 'kh' 'kl' 'ql' 'sb' 'u3' 'wy' 'ys' 2016-05-17 22:21:22.326724 +388 '2o' 'af' 'ah' 'aj' 'al' 'ap' 'as' 'bc' 'd2' 'di' 'dm' 'do' 'ds' 'dt' 'dy' 'eb' 'ed' 'ee' 'ej' 'es' 'ev' 'ex' 'ey' 'gs' 'hk' 'hr' 'hw' 'hz' 'ig' 'is' 'jp' 'jv' 'jx' 'k4' 'kh' 'kv' 'lb' 'll' 'lw' 'na' 'no' 'o1' 'o8' 'oh' 'oo' 'op' 'pb' 'po' 'q2' 'q4' 'q7' 'qa' 'qc' 'qe' 'qg' 'qm' 'qo' 'qt' 'qw' 'qy' 'qz' 'r4' 'r7' 'r8' 'rp' 'rs' 's2' 'sb' 'sg' 'sl' 't1' 'tj' 'to' 'ts' 'tx' 'u5' 'uz' 'v6' 'w4' 'w5' 'we' 'wh' 'wn' 'wr' 'wu' 'wv' 'x1' 'y1' 'y8' 'ya' 'yk' 'ym' 'yw' 'z5' 'zx' 2016-05-17 23:21:22.326724 +389 'bw' 'ci' 'cp' 'cu' 'cw' 'd7' 'e0' 'e3' 'em' 'ev' 'fi' 'fk' 'fm' 'fu' 'ht' 'ix' 'jn' 'kc' 'kw' 'lm' 'mo' 'n8' 'od' 'oo' 'q1' 'qd' 'qw' 'rs' 'sd' 'su' 'sz' 'th' 'ti' 'ts' 'tw' 'ty' 'u3' 'ua' 'wa' 'wt' 'wy' 'xg' 'xh' 'xp' 'xr' 'yd' 'yj' 'yq' 'yx' 'zu' 2016-05-18 00:21:22.326724 +390 'ak' 'dx' 'eb' 'es' 'eu' 'fd' 'ga' 'gf' 'hl' 'i2' 'kb' 'kd' 'mz' 'n1' 'o8' 'qe' 'qs' 'qw' 'rp' 'sf' 'sk' 'sp' 't0' 'tu' 'uh' 'v9' 'vj' 'vs' 'vw' 'wv' 'xs' 'yo' 'z2' 2016-05-18 01:21:22.326724 +391 '3q' 'am' 'ar' 'az' 'bo' 'cb' 'cv' 'dd' 'ds' 'e1' 'e7' 'en' 'eq' 'er' 'eu' 'ew' 'fq' 'fx' 'g8' 'gm' 'h5' 'ic' 'if' 'jp' 'kw' 'kz' 'l4' 'lf' 'li' 'lm' 'lq' 'lu' 'lz' 'ml' 'nz' 'o4' 'o6' 'op' 'ow' 'po' 'py' 'qj' 'qk' 'qn' 'qp' 'qw' 'qz' 'r6' 'r8' 'rc' 'rl' 'rt' 's6' 'sh' 'sx' 'sz' 'tc' 'ti' 'tt' 'ty' 'uf' 'uh' 'uj' 'um' 'ut' 'uu' 'uv' 'vn' 'vt' 'we' 'wh' 'wi' 'wq' 'wu' 'wz' 'xn' 'y4' 'yu' 'yz' 'z9' 'ze' 2016-05-18 02:21:22.326724 +392 'ad' 'aj' 'ar' 'b4' 'dj' 'eo' 'eu' 'ex' 'gt' 'hj' 'ie' 'ij' 'iz' 'kg' 'ks' 'l1' 'ld' 'me' 'my' 'mz' 'oi' 'pr' 'qb' 'ry' 'sd' 'to' 'u5' 'wc' 'wf' 'wl' 'xs' 'y2' 'yg' 'z1' 'zj' 'zt' 2016-05-18 03:21:22.326724 +393 'ae' 'cn' 'ct' 'dz' 'eb' 'ee' 'ff' 'fi' 'fk' 'fo' 'ft' 'gj' 'gr' 'ie' 'il' 'iv' 'iw' 'iy' 'jb' 'jf' 'ji' 'ke' 'ku' 'kx' 'l3' 'la' 'of' 'ol' 'ox' 'pb' 'pi' 'pq' 'q8' 'qi' 'qj' 'qp' 'qq' 'qw' 'r0' 'r5' 'rk' 'rq' 'ru' 'rz' 'su' 't8' 'tb' 'u0' 'ue' 'um' 'w2' 'wc' 'wm' 'ws' 'wt' 'x4' 'xc' 'xd' 'xm' 'xn' 'y0' 'yb' 'zi' 'zp' 2016-05-18 04:21:22.326724 +394 'a3' 'ai' 'at' 'd7' 'eu' 'fu' 'gd' 'ii' 'ik' 'j0' 'je' 'lw' 'ly' 'mx' 'n7' 'pm' 'qf' 'qk' 'qt' 'ss' 'to' 'tq' 'un' 'vn' 'vq' 'wo' 'wz' 'yk' 'yy' 'zf' 'zg' 2016-05-18 05:21:22.326724 +395 'dz' 'ea' 'h8' 'j9' 'mr' 'pc' 'qa' 'qt' 'rm' 'rv' 'ub' 'wg' 2016-05-18 06:21:22.326724 +396 'a4' 'an' 'go' 'ki' 'px' 'qq' 'td' 'tm' 'ur' 'wc' 'wp' 'yx' 2016-05-18 07:21:22.326724 +397 '0e' 'a0' 'a1' 'a5' 'ad' 'ao' 'bp' 'bw' 'c0' 'ca' 'cw' 'cx' 'cy' 'dg' 'e8' 'eb' 'ef' 'ek' 'fd' 'fk' 'fv' 'g3' 'gj' 'gn' 'gw' 'hr' 'if' 'ij' 'is' 'ix' 'j7' 'km' 'kp' 'kq' 'ks' 'kt' 'kv' 'lk' 'mj' 'mn' 'mv' 'mz' 'nn' 'o5' 'oa' 'ol' 'on' 'os' 'pa' 'pb' 'pf' 'po' 'pq' 'pr' 'pv' 'py' 'q1' 'q6' 'qf' 'qh' 'qj' 'qn' 'qt' 'qu' 'qw' 'rf' 'rk' 'rt' 's5' 'sq' 'ss' 't5' 'ta' 'th' 'tj' 'to' 'ts' 'tx' 'tz' 'u0' 'ua' 'ug' 'ui' 'uw' 'ux' 'vh' 'vp' 'we' 'wg' 'wi' 'wj' 'wp' 'wq' 'xm' 'y6' 'yr' 'yt' 'yu' 'z8' 'zs' 2016-05-18 08:21:22.326724 +398 'a6' 'ad' 'aj' 'b1' 'bz' 'cd' 'df' 'ed' 'eg' 'eh' 'ev' 'fi' 'fo' 'gj' 'h3' 'he' 'ib' 'j0' 'jc' 'jz' 'kz' 'lc' 'mh' 'pk' 'pu' 'q4' 'qc' 'qk' 'qr' 'qw' 'rc' 'rl' 'ro' 'rr' 'so' 'sw' 'td' 'tw' 'tx' 'ue' 'us' 'ut' 'vd' 'wi' 'wu' 'xm' 'y2' 'y8' 'ym' 'z6' 'zm' 2016-05-18 09:21:22.326724 +399 '4z' 'a2' 'eg' 'f4' 'g9' 'gi' 'hd' 'hn' 'ij' 'io' 'ix' 'iz' 'jb' 'jd' 'kk' 'l4' 'lh' 'lq' 'p9' 'pa' 'pb' 'q7' 'qh' 'qi' 'qk' 'qs' 'r9' 'rc' 'ro' 'rr' 's4' 'tj' 'tz' 'vi' 'we' 'wp' 'xe' 'yd' 'yg' 'zj' 2016-05-18 10:21:22.326724 +400 '5y' 'aw' 'e2' 'gd' 'gn' 'hn' 'ig' 'k9' 'ki' 'oj' 'pk' 'ql' 'qz' 'sl' 't3' 'u4' 'v8' 'wg' 'wu' 'xk' 'ya' 'yf' 'zr' 2016-05-18 11:21:22.326724 +401 '5h' 'ak' 'al' 'aq' 'cw' 'd7' 'dm' 'ec' 'ee' 'ef' 'el' 'f4' 'f5' 'f8' 'fh' 'fr' 'fu' 'gh' 'hk' 'ir' 'ix' 'k0' 'k2' 'ka' 'ku' 'lg' 'lr' 'm8' 'mf' 'nq' 'of' 'ph' 'pr' 'px' 'q2' 'qf' 'qh' 'qk' 'qn' 'qq' 'qs' 'qv' 'qx' 'qy' 'qz' 're' 'rh' 'ro' 'rx' 'sa' 'sh' 'sk' 'tb' 'u1' 'ud' 'uo' 'us' 'uw' 'vg' 'vp' 'w3' 'wb' 'wc' 'wf' 'wg' 'wi' 'wk' 'yr' 'ys' 'yw' 'zw' 'zz' 2016-05-18 12:21:22.326724 +402 'af' 'dt' 'e4' 'e8' 'eq' 'et' 'gr' 'kr' 'kv' 'lu' 'oy' 'pb' 'qh' 'ql' 'qw' 'r4' 't8' 'tb' 'td' 'tn' 'uc' 'uj' 'wh' 'xn' 'xs' 'yi' 2016-05-18 13:21:22.326724 +403 'am' 'bv' 'dt' 'dy' 'ed' 'gx' 'm7' 'mt' 'q5' 'qv' 'rr' 'sh' 'th' 'ut' 'wd' 'wm' 'y1' 'ym' 'yq' 'yr' 'yt' 2016-05-18 14:21:22.326724 +404 'aa' 'al' 'ay' 'c1' 'ca' 'de' 'ea' 'ee' 'eq' 'er' 'ez' 'fc' 'gy' 'i1' 'jb' 'jn' 'jz' 'lp' 'o5' 'om' 'os' 'ou' 'ox' 'px' 'q3' 'qc' 'qs' 'rm' 'rq' 'rt' 'rx' 'ry' 'rz' 'ss' 'ta' 'tg' 'ti' 'tr' 'u6' 'uv' 'vp' 'wn' 'wu' 'zt' 2016-05-18 15:21:22.326724 +405 'ad' 'as' 'dn' 'dq' 'e7' 'ei' 'ey' 'fj' 'h2' 'hu' 'hw' 'i7' 'i9' 'iu' 'jr' 'ma' 'na' 'nh' 'nk' 'pa' 'q5' 'qd' 'qn' 's0' 'so' 'sx' 'u7' 'uc' 'um' 'vb' 'w9' 'wf' 'wh' 'wl' 'wn' 'wr' 'xl' 'y8' 'yl' 'zw' 2016-05-18 16:21:22.326724 +406 '1a' 'a0' 'ae' 'av' 'be' 'bj' 'bv' 'bx' 'bz' 'ck' 'd4' 'do' 'ds' 'du' 'ee' 'eu' 'fg' 'fm' 'fy' 'g1' 'gh' 'h8' 'h9' 'ha' 'he' 'hl' 'hy' 'i6' 'ic' 'io' 'jb' 'jk' 'jq' 'k1' 'kf' 'km' 'kv' 'l8' 'n0' 'n3' 'oh' 'oz' 'po' 'q9' 'qg' 'qh' 'qi' 'ql' 'qp' 'qr' 'qu' 'qw' 'qx' 'rb' 're' 'rf' 'ri' 'rj' 'rv' 'sj' 'sm' 'sz' 't1' 'tl' 'tm' 'tw' 'tz' 'ua' 'uc' 'ug' 'ut' 'uv' 'ux' 'v7' 'wa' 'wc' 'wf' 'wi' 'wl' 'wp' 'wq' 'wr' 'x3' 'x6' 'xk' 'xx' 'y4' 'yp' 'yr' 'yw' 'yy' 'ze' 'zk' 'zy' 2016-05-18 17:21:22.326724 +407 '2e' 'al' 'am' 'bt' 'eo' 'ff' 'fl' 'ge' 'gz' 'h0' 'hb' 'i0' 'j2' 'lh' 'lm' 'ls' 'mg' 'mw' 'on' 'ox' 'pa' 'pb' 'pg' 'q3' 'q6' 'qe' 'ra' 're' 'rm' 'rr' 'rt' 'ry' 'sc' 'sm' 't9' 'tu' 'tx' 'ul' 'ux' 'vh' 'wn' 'wt' 'wv' 'ys' 'yw' 2016-05-18 18:21:22.326724 +408 'ag' 'al' 'ay' 'fw' 'hi' 'ou' 'p6' 'qy' 'rg' 'rz' 'sx' 'uy' 'zb' 2016-05-18 19:21:22.326724 +409 '1o' '3w' 'aw' 'be' 'bx' 'e7' 'eo' 'ey' 'ff' 'fx' 'ht' 'jb' 'km' 'kv' 'l6' 'la' 'nu' 'ny' 'pk' 'qc' 'qi' 'rk' 'ro' 'rw' 's3' 'sh' 't5' 'u1' 'uy' 'vi' 'vm' 'wi' 'x3' 'xo' 'yn' 2016-05-18 20:21:22.326724 +410 'aa' 'as' 'cg' 'dh' 'dn' 'dr' 'e0' 'h2' 'hr' 'j2' 'jf' 'js' 'kc' 'kw' 'ld' 'lh' 'mk' 'n3' 'q3' 'qe' 'ql' 'rv' 's3' 'w0' 'xg' 'ym' 'yt' 'zv' 2016-05-18 21:21:22.326724 +411 '3y' 'a1' 'ay' 'cn' 'd3' 'dc' 'dh' 'e9' 'ef' 'ew' 'f9' 'fd' 'fg' 'ft' 'h6' 'hs' 'hu' 'hz' 'i0' 'ib' 'ip' 'iu' 'jw' 'kf' 'kp' 'kw' 'li' 'lp' 'mg' 'mj' 'mz' 'ng' 'oc' 'od' 'om' 'ou' 'ov' 'p8' 'pi' 'q8' 'qe' 'qf' 'qh' 'qj' 'qo' 'qt' 'qv' 'qz' 'r2' 'r3' 'rc' 'rm' 't4' 'ty' 'tz' 'u6' 'ua' 'ut' 'v3' 'vj' 'w0' 'w6' 'wi' 'wj' 'wq' 'wt' 'wu' 'xe' 'xv' 'xy' 'y3' 'yv' 'zw' 2016-05-18 22:21:22.326724 +412 '5q' 'av' 'd2' 'd4' 'e5' 'jc' 'km' 'mm' 'pa' 'rs' 's4' 'si' 'tc' 'ut' 2016-05-18 23:21:22.326724 +413 'ae' 'av' 'bj' 'bk' 'c3' 'd3' 'dj' 'eb' 'ed' 'ef' 'ei' 'ej' 'en' 'ep' 'fl' 'fp' 'fr' 'fu' 'fz' 'g8' 'gd' 'h6' 'ht' 'hu' 'i7' 'if' 'im' 'j0' 'j1' 'je' 'jx' 'ku' 'l6' 'l7' 'll' 'lp' 'mc' 'ns' 'o4' 'oi' 'op' 'oz' 'q4' 'qa' 'qf' 'qh' 'ql' 'qn' 'qv' 'r0' 'r1' 'r4' 'rh' 'rj' 'rw' 'sd' 'sn' 'so' 't6' 'ti' 'tw' 'uc' 'uf' 'uk' 'ul' 'uw' 'v3' 'v8' 'vd' 'vr' 'w7' 'w9' 'wa' 'wb' 'wg' 'wh' 'wk' 'wl' 'wn' 'wo' 'wz' 'xf' 'xg' 'xo' 'xv' 'y2' 'yl' 'zr' 'zs' 'zw' 2016-05-19 00:21:22.326724 +414 '1y' '5p' '6o' 'au' 'b2' 'bg' 'do' 'dw' 'e1' 'e9' 'ea' 'ei' 'em' 'ev' 'f0' 'gc' 'hj' 'hx' 'k7' 'km' 'kp' 'kx' 'mj' 'mu' 'o3' 'oz' 'pf' 'q5' 'qe' 'qg' 'qh' 'qk' 'ql' 'qn' 'qo' 'rh' 'rk' 'ru' 'rx' 't1' 't3' 'th' 'v2' 'vx' 'w2' 'wc' 'wl' 'wm' 'wn' 'ws' 'x5' 'ye' 'yh' 'yk' 'yu' 'zw' 2016-05-19 01:21:22.326724 +415 '1x' 'a2' 'ah' 'az' 'bb' 'be' 'bz' 'd1' 'dd' 'du' 'ee' 'eh' 'en' 'eu' 'ey' 'ez' 'fl' 'fs' 'g4' 'gm' 'gr' 'ha' 'hf' 'hg' 'hr' 'hw' 'i6' 'id' 'ij' 'j2' 'jv' 'k9' 'lg' 'm9' 'md' 'me' 'mg' 'mp' 'nd' 'nf' 'ng' 'nj' 'ny' 'nz' 'oj' 'os' 'pi' 'pj' 'pt' 'q9' 'qa' 'qf' 'qh' 'qk' 'qn' 'qq' 'qs' 'rz' 'sb' 't7' 't8' 'tw' 'ty' 'u2' 'u3' 'ua' 'ub' 'uy' 'uz' 'w1' 'w3' 'w5' 'wa' 'wc' 'we' 'wh' 'wp' 'wr' 'xd' 'xs' 'y3' 'yd' 'yk' 'yl' 'yo' 'ze' 'zh' 'zo' 2016-05-19 02:21:22.326724 +416 '2b' '3f' '5e' '7o' 'a4' 'ai' 'as' 'bc' 'bn' 'cz' 'd8' 'dg' 'dj' 'du' 'e5' 'el' 'et' 'ex' 'gc' 'gj' 'hi' 'hn' 'ig' 'ij' 'j3' 'js' 'l3' 'l6' 'lh' 'ls' 'mb' 'n1' 'o0' 'ot' 'q5' 'qd' 'qh' 'qv' 'qz' 'ra' 'rj' 's8' 'tb' 'tf' 'th' 'tx' 'ty' 'vm' 'wf' 'wg' 'wh' 'wn' 'y1' 'y5' 'yw' 'yz' 2016-05-19 03:21:22.326724 +417 '2o' 'a5' 'cg' 'ch' 'cp' 'eb' 'eg' 'eh' 'ew' 'fu' 'g4' 'hc' 'hx' 'il' 'li' 'rd' 'sf' 'sw' 'tg' 'uc' 'zj' 2016-05-19 04:21:22.326724 +418 'c7' 'eb' 'er' 'gb' 'if' 'ko' 'ml' 'oq' 'ot' 'pa' 'qk' 'qs' 'rl' 'rp' 'sc' 'tf' 'tv' 'tw' 'uc' 'ud' 'uz' 'vk' 'vm' 'w0' 'wm' 'wu' 'yd' 'yq' 'yy' 'zu' 2016-05-19 05:21:22.326724 +419 '5e' 'a1' 'ak' 'at' 'av' 'bi' 'bj' 'c7' 'dl' 'do' 'e0' 'ee' 'el' 'em' 'es' 'fl' 'fy' 'g2' 'g6' 'g7' 'gr' 'gv' 'h3' 'hg' 'hr' 'ht' 'ii' 'il' 'is' 'j8' 'jm' 'jq' 'jv' 'k0' 'ka' 'kk' 'l3' 'la' 'lh' 'ms' 'mz' 'no' 'oa' 'os' 'ox' 'pa' 'pc' 'pl' 'pq' 'px' 'q1' 'q3' 'q5' 'q8' 'qb' 'qi' 'ql' 'qw' 'qy' 'qz' 're' 'ri' 'rp' 'rx' 's1' 's3' 'sh' 'sq' 't1' 't5' 'ta' 'td' 'tj' 'to' 'tt' 'ub' 'uc' 'ur' 'uv' 'v1' 'vs' 'w7' 'wb' 'wd' 'wj' 'wl' 'wm' 'wq' 'xr' 'xt' 'ym' 'yu' 'yx' 'yz' 'z8' 'zp' 'zr' 'zs' 2016-05-19 06:21:22.326724 +420 '2v' '7g' 'a1' 'ar' 'bw' 'cs' 'dt' 'ee' 'eg' 'f4' 'fd' 'fl' 'g0' 'ge' 'gx' 'hf' 'hk' 'ht' 'i7' 'il' 'jy' 'ka' 'kl' 'lb' 'lm' 'lv' 'mu' 'nk' 'ns' 'nw' 'oe' 'og' 'oj' 'ol' 'ou' 'pk' 'q2' 'q3' 'qi' 'qn' 'qo' 'qs' 'qy' 'r5' 'rh' 'rj' 'rm' 'rq' 'rr' 'rs' 'rt' 'ru' 'rv' 'ss' 'th' 'tj' 'uj' 'ul' 'um' 'un' 'uo' 'uv' 'w2' 'w6' 'wf' 'wh' 'wk' 'wo' 'wx' 'y4' 'y5' 'yb' 'yn' 'yo' 'ys' 'yz' 'zb' 'zt' 'zw' 2016-05-19 07:21:22.326724 +421 'ai' 'ak' 'as' 'ax' 'b8' 'bd' 'bq' 'bv' 'd1' 'd8' 'dl' 'e9' 'ew' 'ez' 'fz' 'gj' 'gu' 'hk' 'hq' 'i2' 'ie' 'ip' 'iq' 'is' 'iz' 'jf' 'k0' 'kk' 'l6' 'md' 'mx' 'na' 'nf' 'o5' 'ol' 'p3' 'pe' 'q2' 'qf' 'qj' 'qm' 'qn' 'qs' 'qv' 'r2' 'ra' 'rv' 'ry' 's1' 's5' 'si' 't0' 'tc' 'te' 'tn' 'tq' 'tz' 'u1' 'uj' 'ut' 'w3' 'wa' 'wd' 'wh' 'wj' 'wo' 'wu' 'wv' 'ww' 'xk' 'xo' 'yi' 'yk' 'yl' 'zb' 'zw' 2016-05-19 08:21:22.326724 +422 '6b' 'af' 'd3' 'df' 'dg' 'ds' 'e1' 'eb' 'er' 'f3' 'ft' 'ho' 'ik' 'k1' 'k2' 'li' 'mj' 'ni' 'py' 'qx' 'rb' 'rp' 'rv' 'sd' 'sh' 'sl' 'u5' 'uf' 'vk' 'vs' 'vx' 'wg' 'wm' 'wr' 'ws' 'wz' 'xn' 'zh' 2016-05-19 09:21:22.326724 +423 'ab' 'at' 'cr' 'e9' 'g0' 'gv' 'ib' 'iv' 'iz' 'jb' 'jm' 'k0' 'kh' 'o2' 'o7' 'oq' 'ot' 'pj' 'ps' 'qj' 'qz' 'r9' 'rn' 'sa' 't1' 'ti' 'tq' 'ue' 'us' 'wc' 'wo' 'ww' 'yi' 'ys' 'za' 2016-05-19 10:21:22.326724 +424 '1t' 'ai' 'cu' 'cw' 'cx' 'dd' 'de' 'ds' 'e0' 'e2' 'e6' 'eb' 'ei' 'eq' 'eu' 'ev' 'ez' 'f5' 'f8' 'fc' 'g2' 'gd' 'gs' 'gv' 'hu' 'hy' 'id' 'ig' 'ij' 'ir' 'iv' 'ju' 'ka' 'kj' 'kl' 'ks' 'kv' 'kw' 'kx' 'la' 'lh' 'lm' 'ls' 'lv' 'lz' 'mg' 'mh' 'mp' 'ns' 'nt' 'nu' 'nx' 'o6' 'o9' 'oc' 'oj' 'pa' 'pj' 'pl' 'pv' 'q3' 'q5' 'qb' 'qh' 'ql' 'qn' 'qr' 'qs' 'qz' 'rb' 'rf' 'rh' 'rm' 'sm' 'sn' 'sw' 't5' 't9' 'tq' 'ty' 'uj' 'v2' 'vz' 'w6' 'wc' 'wg' 'wi' 'wj' 'wk' 'wm' 'wn' 'wt' 'wv' 'ww' 'wz' 'x0' 'xb' 'xc' 'yb' 'yt' 'yv' 'zp' 'zy' 2016-05-19 11:21:22.326724 +425 'ax' 'ch' 'de' 'dl' 'e1' 'el' 'fl' 'fo' 'fp' 'fx' 'gb' 'gj' 'gx' 'hg' 'ho' 'hs' 'ip' 'jr' 'kg' 'l6' 'li' 'ln' 'mg' 'mm' 'mo' 'o2' 'os' 'pm' 'q2' 'q6' 'qr' 're' 'ro' 'rr' 'rt' 'rw' 'rx' 'sk' 'tl' 'to' 'tr' 'tt' 'u3' 'u6' 'vo' 'w4' 'w5' 'wb' 'wo' 'wt' 'yi' 'zh' 2016-05-19 12:21:22.326724 +426 '4t' 'a3' 'ac' 'au' 'bp' 'br' 'e5' 'ei' 'ek' 'ez' 'fz' 'g5' 'gn' 'ik' 'k9' 'kb' 'kc' 'lm' 'ls' 'ly' 'mj' 'mx' 'nk' 'nr' 'ny' 'o2' 'o8' 'pb' 'pu' 'q2' 'qa' 'qc' 'qh' 'qi' 'qs' 'qx' 'qz' 'rv' 'rx' 'tc' 'tk' 'tv' 'u8' 'ui' 'uz' 'vo' 'w2' 'wm' 'wp' 'xt' 'yf' 'yv' 'zl' 2016-05-19 13:21:22.326724 +427 'aj' 'ar' 'ay' 'eb' 'g5' 'ic' 'la' 'u2' 'up' 'wb' 'x8' 'yr' 2016-05-19 14:21:22.326724 +428 'a2' 'af' 'an' 'aq' 'ar' 'bj' 'bz' 'cx' 'dy' 'eh' 'ek' 'en' 'ex' 'fy' 'gg' 'gr' 'hh' 'if' 'ip' 'it' 'ix' 'j5' 'jf' 'jn' 'k0' 'kk' 'lq' 'nc' 'oc' 'od' 'og' 'oh' 'oo' 'or' 'pm' 'po' 'qa' 'qd' 'qe' 'qh' 'ql' 'qo' 'qt' 'qx' 'qz' 'rq' 'rr' 'ru' 'sj' 'sk' 'tl' 'tr' 'ug' 'wa' 'wb' 'wd' 'we' 'wk' 'wn' 'wq' 'wr' 'xl' 'xo' 'xw' 'yf' 'yq' 'ys' 'yw' 2016-05-19 15:21:22.326724 +429 'ad' 'ai' 'ak' 'df' 'ee' 'fr' 'gg' 'i2' 'i5' 'ig' 'ij' 'ir' 'j1' 'kk' 'km' 'l9' 'qb' 'ql' 'qt' 'qw' 'rb' 's2' 'te' 'ue' 'w0' 'wk' 'wu' 'y4' 'yb' 'yf' 'yp' 'zn' 2016-05-19 16:21:22.326724 +430 '3y' 'a9' 'au' 'bp' 'ca' 'cf' 'cn' 'de' 'dh' 'dk' 'dw' 'ec' 'er' 'ey' 'fn' 'g1' 'hb' 'hd' 'hg' 'hi' 'hj' 'ib' 'if' 'iu' 'j7' 'jd' 'jg' 'jj' 'jx' 'k2' 'km' 'kv' 'ld' 'lo' 'mz' 'o3' 'ok' 'ot' 'pl' 'pn' 'pr' 'q0' 'q2' 'q7' 'qa' 'qc' 'qg' 'qm' 'qp' 'qx' 'rc' 'rq' 'rt' 'si' 'so' 'tp' 'tu' 'uj' 'vi' 'vx' 'w7' 'wb' 'wd' 'wm' 'ws' 'wt' 'wv' 'y9' 'yf' 'yq' 2016-05-19 17:21:22.326724 +431 '10' 'ac' 'as' 'av' 'b6' 'bb' 'cj' 'd3' 'd8' 'dt' 'eb' 'ey' 'ez' 'fm' 'g8' 'gf' 'gs' 'hn' 'hq' 'ib' 'ii' 'j0' 'jb' 'k4' 'l5' 'ld' 'mg' 'mu' 'nz' 'oa' 'ou' 'pn' 'q1' 'qa' 'qg' 'qj' 'qm' 'qn' 'qs' 'qx' 'qy' 'rj' 'rm' 'rr' 'ry' 's0' 's7' 't4' 't6' 'tb' 'td' 'u0' 'ua' 'ut' 'ux' 'uz' 'v1' 'v9' 'vd' 'vz' 'wc' 'wj' 'wo' 'ws' 'ww' 'x9' 'xu' 'y7' 'yq' 'yv' 'zw' 'zy' 2016-05-19 18:21:22.326724 +432 'ac' 'bv' 'd2' 'de' 'e8' 'ea' 'go' 'gw' 'h4' 'ht' 'iy' 'jm' 'ot' 'pp' 'pw' 'pz' 'qd' 'qn' 'qu' 'qy' 'r1' 'ra' 'rg' 'rh' 'rp' 's0' 'sb' 'sn' 'st' 'td' 'uz' 'vg' 'w1' 'w6' 'wa' 'wc' 'we' 'wi' 'wj' 'wu' 'ww' 'y9' 'zr' 2016-05-19 19:21:22.326724 +433 '5t' 'ad' 'am' 'ed' 'ei' 'en' 'eo' 'ey' 'f0' 'fp' 'fr' 'gc' 'hp' 'hz' 'ic' 'ix' 'jt' 'kn' 'kr' 'lk' 'ls' 'm1' 'mt' 'nk' 'od' 'p3' 'pa' 'pe' 'pi' 'q4' 'qa' 'qi' 'qk' 'qq' 'qt' 'qv' 'rb' 'rr' 'rv' 's3' 'se' 'sr' 't0' 'tj' 'tk' 'tp' 'tu' 'u1' 'ud' 'uf' 'uv' 'ux' 'vd' 'vu' 'wh' 'wi' 'wp' 'wu' 'x9' 'xa' 'ye' 'yn' 'yw' 'zj' 'zs' 2016-05-19 20:21:22.326724 +434 'a1' 'ag' 'ai' 'ap' 'bp' 'by' 'dc' 'dy' 'e5' 'eg' 'ei' 'el' 'em' 'fc' 'gl' 'h8' 'hr' 'i1' 'ib' 'ie' 'if' 'ij' 'j5' 'jj' 'kf' 'kx' 'lc' 'm3' 'mg' 'nb' 'ol' 'ph' 'py' 'q1' 'q5' 'qi' 'qk' 'ql' 'qp' 'qs' 'sv' 'td' 'te' 'tn' 'vd' 'vm' 'vq' 'wv' 'xp' 'xu' 'yb' 'yo' 'yq' 'yx' 'yy' 'zp' 'zr' 'zx' 'zz' 2016-05-19 21:21:22.326724 +435 '2l' '9f' 'a2' 'ak' 'as' 'av' 'bi' 'c1' 'cf' 'ct' 'cu' 'db' 'dp' 'du' 'dv' 'e3' 'e9' 'ee' 'eg' 'el' 'fm' 'gu' 'hc' 'he' 'i3' 'i9' 'ib' 'ik' 'im' 'ir' 'j7' 'jp' 'jv' 'ki' 'kl' 'l6' 'lj' 'lo' 'lw' 'md' 'mj' 'mk' 'ms' 'mw' 'na' 'nq' 'o2' 'od' 'ox' 'p3' 'pj' 'pp' 'q6' 'q7' 'q9' 'qb' 'qg' 'qr' 'qs' 'r7' 'r8' 'rd' 're' 'rf' 'rg' 'rp' 'rv' 'rw' 's6' 'sc' 'sq' 't6' 'tb' 'tc' 'te' 'tj' 'tn' 'tx' 'tz' 'uh' 'uq' 'uu' 'v4' 'vw' 'w3' 'w8' 'wa' 'wj' 'wk' 'wp' 'x3' 'xg' 'xy' 'y1' 'y6' 'yc' 'yi' 'yn' 'yo' 'yw' 'yz' 'z6' 'z8' 'zk' 'zz' 2016-05-19 22:21:22.326724 +436 '5t' 'al' 'db' 'dt' 'dx' 'ea' 'en' 'g6' 'gc' 'gm' 'gy' 'if' 'ii' 'ik' 'jb' 'jv' 'k5' 'po' 'pv' 'py' 'qj' 'qp' 'rz' 'ux' 'v1' 'w4' 'w8' 'wi' 'yv' 2016-05-19 23:21:22.326724 +437 'fb' 'in' 'iy' 'lu' 'p4' 'pd' 'qa' 'qq' 's6' 'ta' 'y1' 'yg' 'yy' 'zk' 2016-05-20 00:21:22.326724 +438 'av' 'dp' 'ff' 'fx' 'jk' 'ke' 'lb' 'lm' 'n1' 'ql' 'rv' 's4' 'uv' 'wl' 'ws' 'yj' 2016-05-20 01:21:22.326724 +439 '7y' 'aq' 'cu' 'd3' 'h2' 'ih' 'oc' 'qh' 'rc' 'rs' 't3' 'ud' 'we' 'zt' 2016-05-20 02:21:22.326724 +440 '7u' 'a4' 'ah' 'aj' 'al' 'cu' 'e1' 'fi' 'fq' 'fr' 'he' 'hi' 'hk' 'hn' 'hp' 'hs' 'ih' 'iw' 'je' 'k4' 'k6' 'kx' 'l1' 'lf' 'li' 'lj' 'lm' 'lp' 'ls' 'lu' 'ma' 'nr' 'o2' 'o4' 'oe' 'oh' 'os' 'ow' 'p7' 'pl' 'q3' 'qj' 'qt' 'qy' 'qz' 'r3' 'rc' 'rm' 'ro' 's0' 's8' 'so' 't7' 'th' 'ti' 'tm' 'tt' 'ut' 'v5' 'va' 've' 'w7' 'wc' 'wk' 'wl' 'wm' 'wq' 'wu' 'x0' 'zh' 2016-05-20 03:21:22.326724 +441 'ah' 'bg' 'bw' 'ca' 'cn' 'cy' 'da' 'dq' 'dt' 'eq' 'ex' 'fe' 'ff' 'ga' 'gr' 'kv' 'ld' 'my' 'od' 'oq' 'p3' 'p5' 'pc' 'pf' 'q5' 'qa' 'qc' 'qi' 'ql' 'qr' 'qx' 'qz' 'rd' 'rk' 'rq' 'ru' 'rw' 's7' 'sa' 'sh' 'tk' 'um' 'un' 'v4' 'vw' 'wg' 'wr' 'wt' 'xt' 'ya' 'yo' 'z6' 'zm' 2016-05-20 04:21:22.326724 +442 'd8' 'dm' 'eg' 'et' 'ge' 'gz' 'i8' 'ig' 'il' 'iy' 'jo' 'k6' 'lm' 'oj' 'p3' 'pw' 'qd' 'qp' 'qq' 'qz' 'r7' 's1' 'sr' 'sx' 't4' 'uz' 'vm' 'vr' 'w0' 'wj' 'wp' 'xc' 'y1' 'yj' 'zd' 2016-05-20 05:21:22.326724 +443 'a0' 'ck' 'fp' 'g4' 'ib' 'ih' 'im' 'iq' 'kz' 'll' 'lv' 'nc' 'oq' 'qf' 'qv' 'rg' 'rk' 'tc' 'tn' 'u1' 'u8' 'uj' 'un' 'vv' 2016-05-20 06:21:22.326724 +444 '6x' 'a4' 'ae' 'as' 'd5' 'df' 'e9' 'ek' 'ew' 'ex' 'ez' 'fa' 'fe' 'fr' 'gk' 'hb' 'hg' 'hl' 'hp' 'id' 'it' 'ix' 'jc' 'jg' 'jk' 'jm' 'js' 'ju' 'ln' 'lq' 'mo' 'ms' 'o7' 'od' 'oi' 'oo' 'pn' 'qd' 'qt' 'qv' 'r0' 'rc' 'rj' 'rk' 'rn' 'rp' 'rx' 'rz' 'sc' 'sg' 'sk' 'so' 'ti' 'tn' 'to' 'tr' 'tz' 'uk' 'um' 'ut' 'v1' 'vr' 'wo' 'wr' 'wu' 'wz' 'xn' 'xu' 'y4' 'yg' 'z2' 2016-05-20 07:21:22.326724 +445 '1l' '2l' 'ag' 'aj' 'al' 'an' 'ay' 'bo' 'cg' 'cs' 'cw' 'cx' 'db' 'dd' 'e1' 'e8' 'ee' 'ef' 'ej' 'eu' 'ew' 'fc' 'ff' 'fh' 'fo' 'fv' 'fz' 'g1' 'gj' 'gk' 'gw' 'ha' 'i3' 'ib' 'iu' 'jv' 'k9' 'kq' 'ku' 'kw' 'lv' 'lw' 'ly' 'mp' 'my' 'nf' 'ng' 'nw' 'o6' 'oe' 'or' 'ou' 'oz' 'p4' 'pc' 'ph' 'pn' 'py' 'q0' 'qd' 'qe' 'qp' 'qs' 'qu' 'qv' 'qz' 'r6' 'rg' 'rj' 'rt' 'ru' 'rv' 'rw' 'sx' 't2' 'tl' 'tn' 'tq' 'tx' 'tz' 'ue' 'ui' 'ul' 'uo' 'uu' 'w0' 'w1' 'w4' 'w6' 'w8' 'w9' 'wh' 'wi' 'wp' 'ws' 'wt' 'wx' 'yd' 'zd' 'zf' 'zx' 2016-05-20 08:21:22.326724 +446 'aw' 'bm' 'dw' 'e5' 'ht' 'j4' 'kv' 'm5' 'oi' 'qa' 'qe' 'qf' 'ri' 'rj' 't6' 't8' 'un' 'wc' 'yb' 'yj' 2016-05-20 09:21:22.326724 +447 '4a' 'a9' 'au' 'bn' 'bs' 'cg' 'd4' 'dx' 'fb' 'fk' 'gc' 'hh' 'ht' 'hy' 'iy' 'je' 'jg' 'jj' 'jp' 'ju' 'kz' 'la' 'm8' 'na' 'oa' 'oj' 'ol' 'or' 'ov' 'ox' 'p6' 'po' 'q0' 'q9' 'qc' 'qe' 'qi' 'qq' 'qt' 'qx' 'si' 'sn' 'su' 'sw' 't8' 'ta' 'tv' 'tz' 'uq' 'ut' 'w6' 'w7' 'we' 'wk' 'wl' 'wq' 'wy' 'y4' 'ya' 'ze' 'zq' 'zy' 2016-05-20 10:21:22.326724 +448 '3h' '4h' '93' 'ak' 'ao' 'bq' 'cw' 'db' 'dx' 'eb' 'ef' 'el' 'eu' 'ex' 'fc' 'fg' 'fo' 'fr' 'fs' 'fx' 'g5' 'gl' 'go' 'hs' 'i0' 'ii' 'ix' 'j3' 'jc' 'ke' 'l2' 'lf' 'lo' 'm6' 'ms' 'ne' 'oi' 'ox' 'p6' 'pb' 'pr' 'q9' 'qa' 'qi' 'qj' 'qq' 'qs' 'qu' 'qw' 're' 'rq' 's0' 'sa' 'se' 'sk' 'sx' 'tj' 'tk' 'u2' 'ua' 'uh' 'un' 'vy' 'wf' 'wh' 'wj' 'wq' 'wv' 'xl' 'xq' 'yf' 'yi' 'yw' 'z3' 2016-05-20 11:21:22.326724 +449 'a2' 'ag' 'cz' 'd0' 'd3' 'da' 'di' 'ds' 'e5' 'e6' 'e7' 'ej' 'em' 'ex' 'ff' 'fq' 'gm' 'go' 'gt' 'gv' 'gx' 'ho' 'hv' 'i8' 'ic' 'ii' 'il' 'im' 'it' 'ix' 'j1' 'ja' 'jd' 'js' 'kw' 'l6' 'le' 'lk' 'ln' 'mt' 'my' 'ns' 'ol' 'op' 'os' 'p0' 'pu' 'pv' 'pz' 'q4' 'q5' 'qe' 'qh' 'ql' 'qx' 'qy' 'r1' 'rk' 'rr' 'sb' 'sn' 'te' 'tt' 'ui' 'v6' 'w6' 'we' 'ws' 'wy' 'xt' 'xu' 'yf' 'yq' 'yz' 'zy' 2016-05-20 12:21:22.326724 +450 'ab' 'ct' 'cv' 'cw' 'dg' 'ds' 'e0' 'e5' 'f3' 'fj' 'fl' 'fn' 'gy' 'ha' 'ie' 'if' 'jf' 'ju' 'jx' 'kg' 'ki' 'kw' 'l0' 'ls' 'm9' 'mj' 'nj' 'of' 'om' 'oq' 'os' 'ov' 'pv' 'pz' 'q0' 'q5' 'q8' 'qf' 'qg' 'qi' 'qj' 'qm' 'qn' 'qw' 'rk' 'ru' 'sc' 'sn' 'so' 't1' 'tn' 'tz' 'un' 'uv' 'uw' 'vn' 'wh' 'wn' 'ww' 'xy' 'yd' 'yr' 'yv' 'zh' 'zy' 2016-05-20 13:21:22.326724 +451 '45' '85' 'an' 'bi' 'ca' 'cb' 'co' 'cq' 'cu' 'dt' 'e4' 'eb' 'ed' 'ef' 'em' 'eq' 'er' 'et' 'ex' 'fe' 'fk' 'fv' 'fw' 'gm' 'gq' 'hu' 'it' 'iw' 'j5' 'jj' 'jm' 'jr' 'lq' 'lz' 'mc' 'n0' 'nq' 'o7' 'ok' 'pd' 'ph' 'q6' 'qe' 'qh' 'qm' 'qy' 'qz' 'r2' 'r5' 'ra' 're' 's6' 'sb' 'sl' 'tb' 'tg' 'ur' 'vl' 'wa' 'wf' 'wt' 'wv' 'ww' 'xr' 'xz' 'yb' 'yf' 'zx' 2016-05-20 14:21:22.326724 +452 'ap' 'ca' 'dt' 'dx' 'ep' 'f5' 'fg' 'gq' 'hi' 'hj' 'i4' 'ic' 'it' 'iy' 'jl' 'lz' 'nd' 'o9' 'og' 'oq' 'pk' 'q6' 'qo' 'ra' 'sf' 'wd' 'wt' 'x9' 2016-05-20 15:21:22.326724 +453 '5g' 'a5' 'dj' 'dl' 'dr' 'e9' 'ed' 'ep' 'er' 'gb' 'hh' 'hl' 'i0' 'if' 'ig' 'io' 'it' 'k2' 'ki' 'll' 'o9' 'om' 'ot' 'p5' 'q1' 'q6' 'qa' 'qp' 'qw' 'qy' 'r9' 'rb' 'rm' 'ry' 's7' 'tl' 'ts' 'ut' 'w0' 'wd' 'wu' 'wv' 'xk' 'ya' 'z5' 'zq' 'zs' 2016-05-20 16:21:22.326724 +454 '6o' 'av' 'ed' 'ee' 'gf' 'ii' 'o8' 'og' 'om' 'qs' 'ta' 'th' 'tk' 'w0' 2016-05-20 17:21:22.326724 +455 '17' 'bv' 'c5' 'cn' 'd0' 'di' 'dm' 'dz' 'ec' 'ef' 'et' 'ev' 'ew' 'fa' 'fb' 'fh' 'fw' 'fy' 'g8' 'he' 'hf' 'hp' 'hu' 'hz' 'i2' 'ig' 'ix' 'jy' 'lt' 'mi' 'nm' 'nq' 'o5' 'oa' 'od' 'oi' 'oo' 'ox' 'pb' 'po' 'qb' 'qc' 'qf' 'qr' 'qs' 'qt' 'qw' 'qx' 'r0' 'rb' 'rf' 'rh' 'rk' 'ry' 'sb' 'sf' 'sh' 'st' 'tb' 'tj' 'tn' 'tq' 'tu' 'ty' 'u9' 'ud' 'ut' 'uw' 'ux' 'w4' 'wi' 'xm' 'ya' 'yl' 'ys' 'yt' 'yv' 'yy' 'zb' 'zv' 2016-05-20 18:21:22.326724 +456 '3h' 'aw' 'az' 'b2' 'bl' 'bq' 'bt' 'c0' 'cd' 'ck' 'cm' 'cw' 'cy' 'd3' 'e0' 'ef' 'eo' 'ew' 'fj' 'fk' 'gb' 'gc' 'gm' 'gt' 'gz' 'hv' 'in' 'ix' 'jb' 'jg' 'jl' 'jn' 'js' 'k7' 'kg' 'ku' 'kx' 'kz' 'ld' 'nb' 'ni' 'o3' 'o6' 'od' 'om' 'p3' 'p4' 'pn' 'pu' 'q7' 'qd' 'qh' 'qp' 'qq' 'qr' 'qv' 'qy' 'qz' 'rq' 'sb' 'se' 'sf' 'sh' 'sm' 't0' 'tg' 'tj' 'tl' 'tq' 'ua' 'uk' 'us' 'wd' 'we' 'wj' 'wn' 'wq' 'wv' 'wz' 'yd' 'yg' 'ym' 'yn' 'yr' 'zc' 'zz' 2016-05-20 19:21:22.326724 +457 '1t' 'am' 'aw' 'b4' 'bi' 'bm' 'bx' 'cd' 'cj' 'cv' 'e5' 'ek' 'eo' 'er' 'ex' 'f5' 'f8' 'fp' 'fr' 'fw' 'ga' 'gy' 'hm' 'hs' 'ip' 'ir' 'is' 'it' 'iv' 'iz' 'jf' 'jm' 'jp' 'ju' 'k6' 'kk' 'kr' 'lb' 'le' 'lt' 'mj' 'nk' 'np' 'nr' 'oa' 'oj' 'pm' 'pn' 'q1' 'q3' 'qb' 'qc' 'qf' 'qh' 'qi' 'qj' 'qq' 'qt' 'qw' 'qy' 'r2' 'r8' 'rf' 'rh' 'rw' 's9' 't3' 'te' 'ti' 'tn' 'tq' 'uu' 'uv' 'uw' 'w0' 'w1' 'w3' 'w7' 'wc' 'wi' 'wk' 'wn' 'wr' 'wt' 'xm' 'y5' 'ya' 'z7' 'zu' 2016-05-20 20:21:22.326724 +458 '2l' 'a5' 'ah' 'ak' 'ar' 'be' 'e6' 'eh' 'ge' 'gj' 'gn' 'gt' 'gy' 'ia' 'ii' 'iw' 'ix' 'jb' 'k9' 'kd' 'ke' 'kh' 'kv' 'lm' 'ly' 'me' 'mt' 'nb' 'np' 'o5' 'oe' 'of' 'og' 'pd' 'pf' 'pm' 'pt' 'pw' 'q3' 'qc' 'qe' 'qg' 'qh' 'qk' 'ql' 'qs' 'qt' 'qx' 'qz' 'rr' 'rw' 'rx' 'ry' 'sk' 'sr' 'su' 'sv' 'sz' 'ug' 'uk' 'uq' 'vg' 'w7' 'wb' 'wn' 'wr' 'ws' 'wz' 'x6' 'yg' 'yj' 'yo' 'zb' 'zx' 2016-05-20 21:21:22.326724 +459 'ab' 'c1' 'cl' 'd3' 'do' 'e5' 'e8' 'eg' 'ek' 'ex' 'gy' 'ia' 'iq' 'iw' 'jf' 'kv' 'm9' 'n4' 'nh' 'nj' 'q3' 'qa' 'qe' 'qg' 'qm' 'sy' 'ta' 'w5' 'w6' 2016-05-20 22:21:22.326724 +460 '1g' '6b' 'az' 'be' 'c5' 'dp' 'dt' 'e6' 'eg' 'en' 'es' 'et' 'f1' 'fi' 'fn' 'ft' 'fz' 'g0' 'gj' 'gv' 'h8' 'hp' 'hs' 'hu' 'hw' 'hz' 'ia' 'im' 'is' 'iv' 'iw' 'iy' 'jd' 'kw' 'ky' 'l4' 'l7' 'mn' 'nn' 'nr' 'ny' 'ot' 'p2' 'p8' 'pt' 'q8' 'qa' 'qb' 'qf' 'qh' 'qi' 'qm' 'qt' 'qu' 'qv' 'qy' 'ra' 'rl' 'ro' 'rw' 'rx' 's8' 's9' 'sk' 'sn' 'st' 'sv' 'th' 'tl' 'to' 'tp' 'tu' 'ua' 'uk' 'un' 'uv' 'v8' 've' 'vt' 'vu' 'vv' 'w4' 'wc' 'wh' 'wo' 'wq' 'wy' 'xe' 'xm' 'xp' 'xu' 'yc' 'yn' 'yq' 'zf' 'zj' 'zs' 'zt' 2016-05-20 23:21:22.326724 +461 '2a' '3o' '6w' '9h' 'ag' 'ak' 'cu' 'db' 'e3' 'ei' 'ep' 'eq' 'f6' 'fm' 'fq' 'fz' 'g5' 'gi' 'gk' 'gn' 'gp' 'gx' 'hb' 'hw' 'ia' 'ic' 'id' 'ig' 'ih' 'im' 'jh' 'jy' 'k3' 'kh' 'kv' 'l4' 'lf' 'lh' 'lu' 'ni' 'o7' 'oe' 'oi' 'op' 'os' 'ow' 'p0' 'p1' 'pj' 'pp' 'qa' 'qe' 'qi' 'qm' 'qz' 'rb' 're' 'rh' 'sa' 'sn' 'te' 'th' 'u3' 'ui' 'uj' 'us' 'vh' 'w1' 'w7' 'wc' 'wg' 'wh' 'wo' 'wx' 'xr' 'xw' 'xz' 'y5' 'y6' 'y9' 'ye' 'yo' 'ze' 'zg' 'zp' 2016-05-21 00:21:22.326724 +462 'b7' 'eu' 'g4' 'hw' 'in' 'pi' 'qt' 'r0' 'rg' 'sn' 'sz' 'tc' 'wd' 'zs' 2016-05-21 01:21:22.326724 +463 'a3' 'ad' 'ar' 'at' 'bb' 'bf' 'bt' 'cg' 'cx' 'd6' 'de' 'df' 'e4' 'e6' 'eg' 'et' 'ex' 'f0' 'fe' 'fg' 'fj' 'fo' 'gh' 'hb' 'hj' 'hq' 'hr' 'hv' 'ia' 'id' 'is' 'it' 'iy' 'ja' 'jj' 'jq' 'jw' 'l2' 'l7' 'lc' 'lu' 'nc' 'no' 'np' 'nt' 'ob' 'od' 'og' 'oo' 'os' 'pe' 'pj' 'pl' 'po' 'pq' 'pu' 'q2' 'qa' 'qc' 'qf' 'qr' 'qt' 'qx' 're' 'rk' 'rn' 'ro' 'ru' 'rw' 'rx' 's7' 's8' 'sy' 'tc' 'tf' 'tg' 'th' 'tm' 'to' 'tv' 'tx' 'tz' 'va' 'vp' 'w0' 'w1' 'wh' 'wl' 'wq' 'wr' 'wt' 'ww' 'wy' 'x7' 'xl' 'xy' 'y0' 'ye' 'yr' 'yy' 'z3' 'zf' 'zo' 2016-05-21 02:21:22.326724 +464 '15' '4o' '7h' 'aa' 'av' 'b0' 'cb' 'da' 'dh' 'di' 'dr' 'e0' 'ee' 'eo' 'ep' 'ey' 'fi' 'fo' 'fq' 'fx' 'fz' 'g9' 'ge' 'hd' 'hh' 'hs' 'i4' 'i9' 'iq' 'it' 'iw' 'ix' 'iy' 'j8' 'jr' 'ld' 'm1' 'mo' 'nx' 'ob' 'ol' 'ot' 'pj' 'qf' 'qj' 'qk' 'qp' 'qv' 'qy' 'r4' 'r5' 'rl' 'rm' 'rq' 'rz' 's8' 'sc' 'sd' 'sf' 'sh' 'sn' 'ss' 't5' 't7' 'tj' 'to' 'tw' 'u6' 'uc' 'ud' 'ug' 'ui' 'uk' 'ut' 'uy' 'uz' 'vi' 'w0' 'w9' 'wb' 'wg' 'wl' 'ww' 'wx' 'wy' 'xa' 'xb' 'xo' 'ya' 'yi' 'yu' 'yv' 'yz' 'zm' 'zv' 'zx' 'zy' 'zz' 2016-05-21 03:21:22.326724 +465 '2b' '2e' 'ao' 'ap' 'au' 'bo' 'by' 'cr' 'do' 'du' 'dy' 'e7' 'eb' 'eh' 'em' 'ey' 'f6' 'ff' 'fu' 'gc' 'gi' 'gk' 'h1' 'hp' 'hs' 'hy' 'ia' 'ic' 'ig' 'ik' 'im' 'je' 'jf' 'ji' 'jr' 'l5' 'lf' 'lp' 'mv' 'ne' 'nt' 'oa' 'os' 'pj' 'po' 'q9' 'qd' 'qe' 'qf' 'qw' 'qy' 'ra' 'rg' 'rk' 'rp' 'rq' 'rx' 'sb' 'si' 'sn' 'ta' 'ux' 'v1' 'vd' 'wa' 'wk' 'wr' 'wu' 'xo' 'yg' 'yl' 'yz' 'zi' 'zt' 2016-05-21 04:21:22.326724 +466 '3k' 'f5' 'if' 'kg' 'lj' 'ol' 'qr' 'so' 'ta' 'u1' 'vu' 'wr' 'y2' 2016-05-21 05:21:22.326724 +467 'cv' 'ds' 'dx' 'dy' 'ex' 'hh' 'lf' 'mq' 'qe' 'qu' 'rb' 'tb' 'tv' 'tz' 'ue' 'ui' 'wi' 'yb' 'zz' 2016-05-21 06:21:22.326724 +468 '1t' 'a0' 'ah' 'ar' 'at' 'be' 'bs' 'bt' 'co' 'd9' 'e9' 'ea' 'ec' 'ei' 'eo' 'er' 'ez' 'fa' 'fz' 'gl' 'gt' 'h8' 'h9' 'hb' 'hv' 'ia' 'ic' 'if' 'im' 'io' 'iq' 'ix' 'k9' 'kq' 'lo' 'm4' 'md' 'mo' 'mz' 'ni' 'nr' 'nz' 'o8' 'ox' 'pk' 'pr' 'q1' 'q3' 'q8' 'qa' 'qb' 'qf' 'qg' 'qi' 'ql' 'qr' 'r3' 'rc' 'rf' 'rg' 'rr' 's0' 'sf' 'tg' 'tw' 'u2' 'uh' 'un' 'ur' 'ux' 'vb' 'vr' 'w7' 'w9' 'wd' 'wh' 'wm' 'wo' 'wr' 'ws' 'wv' 'x4' 'xj' 'xx' 'y3' 'y8' 'yd' 'yl' 'yo' 'yq' 'yr' 'yw' 'z8' 'za' 'zb' 'zg' 'zo' 'zs' 2016-05-21 07:21:22.326724 +469 'a1' 'cd' 'cl' 'd8' 'ek' 'ig' 'ih' 'in' 'lq' 'o3' 'ow' 'px' 'qg' 'qm' 'qq' 'qr' 'qs' 'qy' 'rd' 'rh' 'to' 'tq' 'ul' 'wc' 'x9' 'ya' 'yf' 'yw' 2016-05-21 08:21:22.326724 +470 '3q' '5p' 'a2' 'be' 'bv' 'bw' 'cv' 'd8' 'df' 'dh' 'dk' 'e2' 'ec' 'eo' 'fc' 'g8' 'gq' 'h8' 'ig' 'io' 'ir' 'iv' 'je' 'kq' 'kx' 'lc' 'lr' 'lt' 'ok' 'pk' 'q1' 'q4' 'qe' 'qh' 'qw' 'qz' 'rj' 'rv' 't8' 'tf' 'tl' 'tq' 'uh' 'uz' 'wk' 'wp' 'ws' 'wt' 'wv' 'x3' 'yb' 'ye' 'yo' 'yp' 'yy' 'z6' 'z8' 'zb' 'zm' 2016-05-21 09:21:22.326724 +471 'b0' 'bg' 'bh' 'cu' 'd8' 'dv' 'er' 'fd' 'fm' 'fo' 'gg' 'ij' 'ir' 'iu' 'jc' 'jl' 'jn' 'jo' 'k4' 'kb' 'ku' 'lq' 'ly' 'mw' 'of' 'op' 'ph' 'pk' 'ps' 'px' 'q0' 'qd' 'qg' 'qz' 'rb' 'rp' 'rs' 'rv' 'su' 't9' 'tm' 'tp' 'tx' 'ty' 'ug' 'ul' 'uo' 'up' 'uv' 'v5' 'wh' 'wr' 'ww' 'xw' 'y1' 'yd' 'yf' 'yn' 'z4' 'z8' 'zf' 'zn' 2016-05-21 10:21:22.326724 +472 'cm' 'dm' 'eh' 'em' 'ik' 'j0' 'kk' 'lp' 'ng' 'or' 'pu' 'qd' 'tw' 2016-05-21 11:21:22.326724 +473 '4p' 'am' 'bj' 'bq' 'cg' 'cm' 'cz' 'dm' 'ds' 'du' 'e6' 'eg' 'en' 'eo' 'eq' 'et' 'ev' 'ex' 'fw' 'go' 'hb' 'hq' 'i0' 'ij' 'k6' 'kg' 'l6' 'lm' 'lq' 'mr' 'ms' 'oa' 'p6' 'q7' 'qa' 'qi' 'qk' 'r8' 'ra' 're' 'rr' 's8' 'sj' 't6' 't9' 'tc' 'v1' 'vd' 'w1' 'wf' 'wj' 'xy' 'yd' 'yn' 'yo' 'yt' 'yu' 2016-05-21 12:21:22.326724 +474 '43' '9w' 'ah' 'av' 'bl' 'dr' 'ef' 'es' 'fm' 'ft' 'gy' 'hg' 'hq' 'hy' 'iu' 'ix' 'j1' 'jn' 'lg' 'np' 'o9' 'op' 'ou' 'ox' 'qq' 'st' 'sw' 't8' 't9' 'vs' 'vw' 'wd' 'wx' 'wz' 'xr' 'xt' 'xv' 'y5' 'y8' 'yi' 'ze' 2016-05-21 13:21:22.326724 +475 'a3' 'bh' 'c2' 'ca' 'e1' 'fb' 'fe' 'hd' 'hx' 'jc' 'md' 'nl' 'q9' 'qi' 'qq' 'qs' 'qt' 'rx' 'te' 'tv' 'u2' 'w8' 'wi' 'wr' 'xq' 'y9' 2016-05-21 14:21:22.326724 +476 '1o' 'a0' 'aa' 'bd' 'bj' 'ch' 'cm' 'dj' 'e2' 'eq' 'eu' 'fj' 'fo' 'g9' 'go' 'hi' 'ia' 'ix' 'jh' 'jl' 'jy' 'lh' 'nd' 'nw' 'ox' 'p3' 'pi' 'pm' 'pt' 'q3' 'qh' 'qn' 'ra' 'ri' 'rr' 'ru' 'rv' 't3' 'tm' 'u3' 'ue' 'us' 'uz' 'vn' 'w1' 'wl' 'wo' 'wx' 'xj' 'xn' 'yc' 'yp' 'zr' 2016-05-21 15:21:22.326724 +477 '7a' 'ap' 'bl' 'fl' 'g7' 'ko' 'ma' 'qi' 'ri' 'rp' 'y5' 'yo' 2016-05-21 16:21:22.326724 +478 'an' 'dh' 'do' 'hs' 'hv' 'ia' 'ic' 'ne' 'of' 'oi' 'oq' 'pe' 'pg' 'q9' 'r5' 'rk' 'sc' 'sf' 'sh' 'ta' 'tb' 'tq' 'um' 'wb' 'wj' 'wm' 'wq' 'wt' 'yi' 'ym' 2016-05-21 17:21:22.326724 +479 'am' 'aw' 'ay' 'bj' 'df' 'eb' 'eo' 'eq' 'et' 'ey' 'f9' 'fo' 'g0' 'gg' 'h4' 'hq' 'in' 'k4' 'k7' 'kb' 'kl' 'my' 'nt' 'o3' 'og' 'pc' 'q4' 'qg' 'qi' 'qm' 'qy' 'ri' 'rv' 's9' 'sv' 'tl' 'ue' 'uo' 'v6' 'wb' 'wt' 'wx' 'xi' 'xu' 2016-05-21 18:21:22.326724 +480 '1l' 'e2' 'e4' 'eo' 'ep' 'fd' 'ha' 'hp' 'hx' 'io' 'iu' 'jr' 'jx' 'k4' 'l8' 'nb' 'oa' 'om' 'on' 'ow' 'p2' 'qh' 'qp' 'qz' 'ra' 'rz' 'sy' 'tk' 'tt' 'ul' 'uq' 'vm' 'wt' 'xg' 'xn' 'ya' 'yf' 'yw' 'yx' 'zp' 2016-05-21 19:21:22.326724 +481 '3k' 'ag' 'ak' 'bi' 'bl' 'bw' 'by' 'ch' 'cm' 'dw' 'e1' 'e2' 'ed' 'ej' 'ek' 'er' 'eu' 'ez' 'f8' 'fd' 'fi' 'fl' 'gi' 'gm' 'gx' 'gy' 'h2' 'h8' 'hl' 'hn' 'ij' 'ip' 'iq' 'it' 'jb' 'jl' 'jn' 'k7' 'kh' 'kl' 'kn' 'kt' 'nh' 'nk' 'pa' 'pe' 'pg' 'pp' 'q5' 'q7' 'qi' 'qk' 'ql' 'qs' 'r8' 'ri' 'rj' 'rl' 'rw' 'rx' 'so' 'tb' 'tj' 'tm' 'to' 'tu' 'tv' 'tz' 'u4' 'ue' 'ul' 'uv' 'v1' 'vj' 'vy' 'wc' 'wr' 'wt' 'wx' 'wz' 'xh' 'xj' 'xp' 'xt' 'y4' 'yb' 'yf' 'ym' 'yo' 'yq' 'yy' 'zp' 'zq' 'zt' 2016-05-21 20:21:22.326724 +482 '7r' 'a8' 'aa' 'ag' 'bo' 'bp' 'c7' 'd0' 'd6' 'e5' 'er' 'fc' 'ff' 'go' 'ha' 'hs' 'jj' 'jq' 'ki' 'lc' 'le' 'no' 'pf' 'qc' 'qh' 'qq' 'qs' 'qv' 'rf' 'rj' 'rx' 'su' 'ta' 'ti' 'tu' 'tv' 'ty' 'ug' 'un' 'up' 'vb' 'vi' 'wp' 'wu' 'wz' 'xt' 'y0' 'yd' 'yu' 2016-05-21 21:21:22.326724 +483 'ad' 'af' 'aj' 'am' 'aq' 'ba' 'bo' 'c0' 'c5' 'cx' 'da' 'dc' 'dk' 'ed' 'ek' 'en' 'eo' 'ep' 'ew' 'fb' 'g4' 'gd' 'h2' 'hm' 'ho' 'hy' 'ib' 'if' 'io' 'ir' 'iv' 'iz' 'jc' 'jp' 'jt' 'kr' 'lo' 'me' 'na' 'nc' 'nh' 'o6' 'ok' 'oq' 'or' 'ow' 'pn' 'ps' 'q6' 'qa' 'qj' 'qo' 'qx' 'r2' 'r6' 'rd' 're' 'rg' 'rw' 'ta' 'tk' 'uj' 'uo' 'vn' 'wc' 'wh' 'wj' 'wl' 'wu' 'wv' 'ww' 'wx' 'xe' 'xl' 'xn' 'xo' 'xs' 'yb' 'yl' 'yp' 'yw' 'zg' 'zq' 'zr' 'zu' 'zx' 'zy' 2016-05-21 22:21:22.326724 +484 '39' 'a9' 'ag' 'ap' 'ax' 'b9' 'c3' 'dn' 'e4' 'ea' 'f7' 'f9' 'g2' 'gi' 'h4' 'ht' 'ie' 'ij' 'ir' 'is' 'it' 'jd' 'jj' 'jv' 'kp' 'kt' 'kv' 'lh' 'ln' 'ls' 'mg' 'mh' 'nc' 'nh' 'o0' 'o2' 'od' 'ox' 'pb' 'pn' 'po' 'pz' 'q1' 'q8' 'qc' 'qd' 'qf' 'qh' 'qi' 'qk' 'qz' 'rd' 'rg' 'ro' 'rz' 's7' 'sa' 'sf' 'sn' 'sw' 'tk' 'tu' 'tw' 'u7' 'uu' 'vh' 'wa' 'wd' 'wk' 'wr' 'wu' 'wy' 'wz' 'xh' 'xm' 'xt' 'yb' 'yi' 'yj' 'yn' 'yr' 'z1' 'z8' 'zn' 2016-05-21 23:21:22.326724 +485 '4g' '8w' 'ab' 'aq' 'at' 'bc' 'bi' 'c7' 'cb' 'cj' 'cs' 'd2' 'd3' 'di' 'dm' 'dx' 'dz' 'ed' 'ex' 'fj' 'gs' 'h1' 'h6' 'he' 'hj' 'hr' 'i1' 'ia' 'ie' 'il' 'ix' 'iy' 'j2' 'jd' 'jo' 'jy' 'kx' 'la' 'lv' 'ma' 'mh' 'mp' 'mt' 'n9' 'na' 'nf' 'ng' 'np' 'o7' 'ob' 'on' 'ou' 'ov' 'p9' 'pg' 'po' 'pq' 'q0' 'q4' 'q5' 'qc' 'qj' 'qp' 'qq' 'qt' 'ra' 'rb' 'rq' 'ru' 'sl' 'sp' 't8' 'ta' 'te' 'tl' 'tz' 'u1' 'ud' 'ui' 'uv' 'uw' 'vf' 'vt' 'vu' 'vz' 'w0' 'w7' 'wc' 'wg' 'wh' 'wq' 'wr' 'wz' 'y6' 'y7' 'ye' 'yh' 2016-05-22 00:21:22.326724 +486 '1t' 'a7' 'aj' 'au' 'ca' 'cn' 'cw' 'dg' 'dp' 'ec' 'ei' 'en' 'ew' 'ez' 'f3' 'f8' 'hp' 'ht' 'hw' 'is' 'iv' 'jd' 'ji' 'kn' 'l4' 'lq' 'lz' 'm7' 'o6' 'oj' 'oz' 'p4' 'p7' 'po' 'pp' 'q2' 'qa' 'qj' 'qo' 'qt' 'qv' 'qw' 'qy' 'ra' 'rd' 'ri' 'rn' 'rr' 'ry' 's7' 'st' 'sz' 'tg' 'to' 'tu' 'tx' 'w2' 'w4' 'w5' 'wd' 'we' 'wg' 'wo' 'ws' 'xm' 'y5' 'yf' 'yl' 'ym' 'yr' 'yz' 2016-05-22 01:21:22.326724 +487 '1y' '4f' 'ao' 'bv' 'co' 'cq' 'dd' 'df' 'dy' 'eq' 'eu' 'ex' 'gg' 'gm' 'gr' 'hm' 'iw' 'j9' 'jb' 'jg' 'jo' 'ju' 'k0' 'km' 'lf' 'ng' 'np' 'nw' 'nz' 'od' 'oj' 'or' 'pp' 'pr' 'pu' 'q5' 'q7' 'q9' 'qc' 'qd' 'qn' 'qx' 'r9' 'rd' 'rk' 'ro' 'sb' 'ta' 'th' 'tv' 'ty' 'tz' 'uq' 'vb' 'wb' 'wj' 'xo' 2016-05-22 02:21:22.326724 +488 '2t' '4d' 'bb' 'bd' 'cg' 'co' 'd6' 'db' 'dg' 'dn' 'do' 'dy' 'e1' 'e3' 'e6' 'ec' 'em' 'en' 'ep' 'es' 'ev' 'ew' 'fu' 'fz' 'gc' 'gt' 'hg' 'hl' 'hy' 'id' 'in' 'io' 'ir' 'iu' 'iw' 'jl' 'jo' 'jp' 'jw' 'k4' 'ke' 'ku' 'ld' 'lg' 'li' 'lj' 'lr' 'ls' 'm7' 'n8' 'o6' 'oi' 'op' 'oy' 'pi' 'pq' 'qk' 'qm' 'qn' 'qp' 'qr' 'qu' 'qv' 'qz' 'r3' 're' 'rj' 'rk' 'rq' 'ru' 'rw' 'rz' 'sh' 'si' 'ta' 'to' 'uq' 'ux' 'vc' 'vr' 'wb' 'wh' 'wn' 'wo' 'wy' 'xg' 'y7' 'yb' 'yd' 'yo' 2016-05-22 03:21:22.326724 +489 '5e' 'a2' 'ag' 'ay' 'dc' 'ef' 'jd' 'mn' 'pl' 'qp' 'wu' 'xd' 2016-05-22 04:21:22.326724 +490 '1x' '3m' 'a1' 'a5' 'a7' 'al' 'aq' 'ar' 'c8' 'cc' 'cd' 'd0' 'dt' 'e6' 'ei' 'em' 'ez' 'f6' 'fc' 'ff' 'fg' 'fp' 'gm' 'h3' 'ha' 'hg' 'ho' 'hs' 'ib' 'ie' 'il' 'it' 'ix' 'j3' 'jl' 'jt' 'jv' 'jw' 'kd' 'ki' 'l1' 'lk' 'lr' 'lv' 'm0' 'mx' 'my' 'n6' 'ni' 'o2' 'o4' 'o9' 'om' 'p0' 'p6' 'pi' 'pj' 'q5' 'qa' 'qc' 'qg' 'qp' 'qs' 'qu' 'qw' 'rg' 'rt' 'rz' 'se' 'sj' 'sp' 'su' 'sw' 'te' 'tp' 'tq' 'tt' 'tu' 'tx' 'u3' 'un' 'uo' 'up' 'uq' 'vc' 'vj' 'vu' 'w2' 'wh' 'wj' 'ws' 'wv' 'ww' 'xg' 'yc' 'yf' 'yv' 'z2' 'zl' 'zw' 'zy' 2016-05-22 05:21:22.326724 +491 '68' 'af' 'as' 'at' 'cf' 'ci' 'cx' 'dl' 'dt' 'dv' 'e4' 'ea' 'eg' 'es' 'fd' 'fi' 'fo' 'g1' 'gd' 'gy' 'he' 'hu' 'hw' 'hz' 'i0' 'ia' 'if' 'io' 'it' 'iv' 'iz' 'j0' 'jc' 'jj' 'jm' 'kb' 'ki' 'kk' 'ku' 'ky' 'lr' 'nv' 'oi' 'om' 'p3' 'pa' 'pt' 'qe' 'qh' 'qj' 'qm' 'qq' 'qs' 'qt' 'qu' 'r3' 'rp' 'rr' 'rw' 'rx' 's2' 'sc' 'sp' 'ti' 'tn' 'tv' 'ty' 'ue' 'uk' 'ul' 'up' 'ut' 'uu' 'uw' 'vf' 'vp' 'w2' 'w7' 'wb' 'wl' 'wq' 'xg' 'yc' 'yh' 'z3' 2016-05-22 06:21:22.326724 +492 '9y' 'a0' 'a6' 'ad' 'aj' 'az' 'bw' 'by' 'cg' 'ci' 'dc' 'dk' 'dm' 'dw' 'e0' 'e4' 'ee' 'ef' 'eg' 'en' 'eu' 'fc' 'fg' 'fm' 'fx' 'g7' 'gm' 'go' 'hw' 'hy' 'i4' 'i7' 'ip' 'iq' 'ir' 'jr' 'ju' 'jx' 'kr' 'ky' 'la' 'lk' 'lq' 'm9' 'mg' 'mp' 'my' 'n6' 'nv' 'nz' 'o1' 'o3' 'oe' 'oy' 'pj' 'pv' 'pz' 'qg' 'ql' 'qp' 'qt' 'qy' 'r4' 'rf' 'rg' 'rk' 'ro' 'rw' 'ry' 's3' 'sd' 'sf' 'sm' 'tf' 'tg' 'tq' 'tu' 'ty' 'tz' 'ub' 'uc' 'uf' 'um' 'vi' 'vn' 'wa' 'wc' 'we' 'xo' 'xr' 'xs' 'yb' 'yi' 'yw' 'zn' 'zo' 2016-05-22 07:21:22.326724 +493 '4n' '6a' 'a3' 'a5' 'aa' 'ae' 'ag' 'b9' 'ca' 'cf' 'd1' 'da' 'dr' 'dz' 'ee' 'el' 'et' 'ey' 'fj' 'fs' 'gl' 'hk' 'hl' 'hn' 'ie' 'ih' 'im' 'ix' 'j1' 'jr' 'kf' 'kk' 'lc' 'lk' 'lp' 'lx' 'mh' 'mt' 'mx' 'my' 'nr' 'nu' 'o6' 'og' 'oo' 'p4' 'p7' 'pj' 'pr' 'q3' 'qc' 'qd' 'qj' 'qk' 'ql' 'qp' 'qt' 'qv' 'qx' 'r8' 're' 'rm' 'rs' 'ru' 'rv' 'sp' 'sw' 'td' 'tk' 'to' 'tw' 'tz' 'u8' 'uf' 'vf' 'vw' 'w7' 'wq' 'wr' 'xe' 'ym' 'yo' 'yr' 'ys' 'yz' 'zc' 'zn' 'zs' 2016-05-22 08:21:22.326724 +494 'av' 'es' 'fl' 'gt' 'he' 'it' 'kp' 'mu' 'nc' 'ol' 'om' 'ph' 'qr' 'ra' 'rk' 'ui' 'vh' 'w6' 'wm' 'ws' 'yu' 'z0' 'zl' 'zm' 2016-05-22 09:21:22.326724 +495 '3r' '6o' 'ab' 'ay' 'b3' 'bc' 'bh' 'd8' 'dd' 'df' 'eb' 'ee' 'eh' 'el' 'eu' 'ex' 'fn' 'g3' 'ge' 'gr' 'gz' 'hd' 'ib' 'ie' 'ih' 'il' 'it' 'iu' 'jd' 'jq' 'jt' 'jv' 'li' 'pc' 'pp' 'qc' 'ql' 'qp' 'qu' 'qx' 'qz' 'ro' 'rq' 'sj' 'sz' 'te' 'tt' 'tu' 'uh' 'uo' 'up' 'us' 'uu' 'ux' 'v7' 'w3' 'wl' 'wn' 'xf' 'xu' 'ya' 'yh' 'yk' 'za' 'zt' 2016-05-22 10:21:22.326724 +496 'a0' 'ai' 'bx' 'ca' 'e2' 'eb' 'ed' 'eg' 'eh' 'eo' 'et' 'hn' 'ix' 'jh' 'ki' 'lm' 'lw' 'm8' 'mb' 'mh' 'mk' 'nc' 'o3' 'o9' 'of' 'qc' 'qe' 'qf' 'qh' 'qi' 'qq' 'qr' 'qz' 'r2' 'r3' 'rc' 'rh' 'rs' 'rv' 'sr' 'uk' 'up' 'ur' 'uv' 'wm' 'wr' 'wz' 'xd' 'y3' 'ya' 'yv' 'zr' 2016-05-22 11:21:22.326724 +497 '7h' '7k' 'bi' 'c8' 'cc' 'cj' 'cs' 'd7' 'dh' 'dl' 'dp' 'dt' 'e9' 'ea' 'eh' 'ei' 'ej' 'el' 'ew' 'fo' 'fp' 'ge' 'gg' 'gi' 'gk' 'h2' 'h7' 'hk' 'hs' 'hy' 'ii' 'j4' 'kd' 'kh' 'kp' 'ks' 'm2' 'n1' 'n3' 'nk' 'nr' 'od' 'ok' 'om' 'oy' 'pb' 'ph' 'pm' 'pp' 'pt' 'q2' 'q8' 'q9' 'qc' 'qf' 'qg' 'qh' 'qj' 'qk' 'qn' 'qo' 'qq' 'qv' 'qx' 'r4' 'rc' 'rg' 'rj' 's9' 'sb' 'sg' 'sj' 'ss' 't6' 'ta' 'tc' 'tm' 'tv' 'us' 'uu' 'uy' 'w7' 'wf' 'wh' 'wk' 'wz' 'x2' 'xr' 'ya' 'yc' 'yk' 'yp' 'ys' 'yz' 'z4' 'zg' 'zn' 2016-05-22 12:21:22.326724 +498 '6p' 'an' 'aq' 'au' 'br' 'bz' 'c3' 'ca' 'cg' 'cn' 'db' 'dk' 'dq' 'e0' 'e8' 'ek' 'eo' 'er' 'ez' 'fd' 'ft' 'g0' 'gd' 'gh' 'gk' 'gn' 'gr' 'gv' 'gy' 'hb' 'hc' 'he' 'ht' 'ii' 'ip' 'iu' 'j9' 'jn' 'jo' 'jq' 'jz' 'kh' 'kn' 'ko' 'l3' 'ls' 'lz' 'nh' 'nk' 'ok' 'oy' 'p7' 'pd' 'ph' 'pu' 'pw' 'py' 'q7' 'qa' 'qi' 'qk' 'ql' 'qn' 'qq' 'qr' 'qs' 'qv' 'qx' 'qy' 'rg' 'rs' 'ru' 'sg' 'sl' 'sq' 'sr' 'su' 'ts' 'tt' 'ui' 'um' 'ut' 'uu' 'v0' 'v5' 'w7' 'wb' 'wc' 'wf' 'wi' 'wm' 'xd' 'xs' 'xz' 'y3' 'y4' 'y9' 'yi' 'yp' 'yx' 'z0' 'zf' 2016-05-22 13:21:22.326724 +499 'au' 'cm' 'dj' 'e1' 'eh' 'ey' 'f3' 'fd' 'fg' 'fv' 'hn' 'i0' 'ia' 'jt' 'jy' 'k2' 'll' 'ne' 'o0' 'o2' 'op' 'pa' 'pf' 'qq' 'qx' 'rr' 'rs' 's4' 'sn' 'so' 'sq' 'tc' 'tn' 'ts' 'ty' 'tz' 'un' 'va' 'vg' 'vj' 'w8' 'wa' 'wb' 'wk' 'wo' 'wp' 'wr' 'x3' 'xx' 'yj' 'yz' 'z3' 'z7' 'zn' 2016-05-22 14:21:22.326724 +500 'a4' 'ae' 'ax' 'bb' 'bg' 'ca' 'ch' 'cq' 'cv' 'dm' 'dn' 'en' 'ep' 'eu' 'ev' 'f0' 'g3' 'gk' 'gm' 'hd' 'ho' 'hp' 'hy' 'ij' 'im' 'iy' 'jl' 'jr' 'jy' 'kj' 'kt' 'ku' 'lp' 'mo' 'mr' 'mz' 'n4' 'nk' 'oc' 'ol' 'oo' 'os' 'oy' 'oz' 'p8' 'p9' 'ps' 'qb' 'qd' 'qg' 'qi' 'qv' 'qx' 'r1' 'ra' 'rf' 'rg' 'rm' 'ro' 'rr' 'rv' 'rz' 's7' 'sm' 'ss' 'tl' 'tr' 'tu' 'ty' 'u5' 'ui' 'un' 'uq' 'uv' 'vn' 'w1' 'w2' 'w6' 'wd' 'we' 'wg' 'wn' 'wp' 'wy' 'y2' 'y6' 'yc' 'yd' 'yt' 'yw' 'z8' 'ze' 'zs' 2016-05-22 15:21:22.326724 +501 'ca' 'fu' 'hv' 'la' 'mt' 'ov' 'pl' 'q8' 'r3' 'sp' 'sy' 'tg' 'to' 'tv' 'wn' 'x4' 'yh' 'yp' 'ze' 2016-05-22 16:21:22.326724 +502 'ag' 'ai' 'az' 'br' 'c2' 'cd' 'ck' 'db' 'du' 'e0' 'e7' 'eb' 'ee' 'em' 'ep' 'eq' 'es' 'eu' 'ex' 'fh' 'fi' 'ga' 'gm' 'gn' 'hj' 'hq' 'if' 'ig' 'ii' 'ix' 'jk' 'kd' 'kg' 'kk' 'kr' 'kt' 'ku' 'lx' 'mp' 'mq' 'nx' 'o9' 'oa' 'om' 'oq' 'p5' 'pd' 'pr' 'pu' 'pw' 'q3' 'qa' 'qd' 'qe' 'qf' 'qi' 'qj' 'ql' 'qn' 'qo' 'qr' 'qt' 'qu' 'qv' 'qx' 'rc' 'rf' 'rl' 'rm' 'rn' 'rp' 'rr' 's7' 'se' 'st' 'sx' 't8' 'to' 'u1' 'ua' 'uq' 'ux' 'w6' 'w8' 'wf' 'wt' 'xb' 'xf' 'xk' 'xn' 'y3' 'ym' 'yp' 'yz' 'zi' 'zk' 2016-05-22 17:21:22.326724 +503 '40' '6a' '8j' 'am' 'bf' 'dp' 'dz' 'ew' 'gh' 'he' 'hh' 'ib' 'ii' 'iq' 'is' 'iu' 'kt' 'mf' 'oh' 'or' 'p8' 'pa' 'pb' 'pi' 'qc' 'qo' 'qp' 'qq' 'qs' 'qt' 'qu' 'r4' 'rf' 'rl' 'sd' 'sl' 't4' 'tb' 'ua' 'ue' 'ut' 'wh' 'wm' 'y0' 'y6' 2016-05-22 18:21:22.326724 +504 '1b' '42' 'a7' 'ab' 'ak' 'ap' 'at' 'av' 'ay' 'b0' 'b9' 'bb' 'bp' 'bu' 'bz' 'cq' 'da' 'de' 'dn' 'e0' 'eb' 'ef' 'eg' 'ek' 'eq' 'er' 'eu' 'ey' 'fn' 'ft' 'gg' 'h4' 'hk' 'hl' 'i7' 'ig' 'ik' 'ip' 'ir' 'iu' 'iw' 'jr' 'jw' 'jx' 'kg' 'lc' 'lg' 'm0' 'na' 'np' 'om' 'on' 'oz' 'pg' 'pn' 'ps' 'pt' 'pz' 'q3' 'q6' 'qa' 'qb' 'ql' 'qq' 'qt' 'qv' 'qw' 'qy' 'r8' 'rf' 'ri' 'rk' 'rl' 'rw' 'sg' 'si' 'sp' 'sw' 'ta' 'th' 'ua' 'uj' 'uu' 'uv' 'uz' 'vj' 'vk' 'vm' 'wc' 'wf' 'wh' 'wn' 'wo' 'ww' 'xb' 'xk' 'xt' 'xw' 'y7' 'ye' 'yl' 'yt' 'yw' 'z4' 'z7' 'zc' 'zw' 2016-05-22 19:21:22.326724 +505 '27' '4g' 'a2' 'al' 'bp' 'ca' 'cp' 'da' 'dt' 'e8' 'ee' 'ef' 'eg' 'ej' 'eq' 'eu' 'ev' 'fe' 'gn' 'gq' 'gy' 'i5' 'ic' 'il' 'io' 'ir' 'iw' 'iz' 'j2' 'kz' 'l2' 'l8' 'lh' 'ln' 'lt' 'np' 'ns' 'oi' 'oj' 'or' 'ph' 'pr' 'pt' 'qa' 'qh' 'qi' 'qs' 'qt' 'qu' 'r1' 'rd' 'ry' 's8' 'sj' 'sk' 'sl' 'sq' 'td' 'te' 'tg' 'tj' 'tq' 'tx' 'u0' 'ub' 'ul' 'uu' 'ux' 'uy' 'v0' 've' 'vg' 'vk' 'wa' 'wj' 'ws' 'wu' 'yb' 'yd' 'yi' 'yk' 'yo' 'yr' 'ys' 'zp' 2016-05-22 20:21:22.326724 +506 'am' 'as' 'b1' 'd3' 'db' 'dl' 'ea' 'ed' 'ee' 'el' 'em' 'ep' 'er' 'ew' 'ez' 'f4' 'fm' 'go' 'h9' 'he' 'hl' 'i0' 'ie' 'ii' 'iz' 'ji' 'kl' 'kn' 'lc' 'lr' 'm7' 'mb' 'mt' 'my' 'no' 'nu' 'oo' 'or' 'ov' 'ox' 'pe' 'pq' 'q7' 'qc' 'qd' 'qi' 'qj' 'qo' 'qr' 'qs' 'qv' 'qw' 'r0' 'r8' 're' 'rm' 'rn' 'rp' 's0' 'so' 'sp' 'sw' 'sy' 'te' 'tg' 'tl' 'tr' 'uq' 'uz' 'vk' 'wk' 'wl' 'wp' 'wq' 'wr' 'wu' 'ww' 'xs' 'y0' 'yb' 'yn' 'yr' 'ys' 'yt' 'yy' 'zh' 'zr' 2016-05-22 21:21:22.326724 +507 '18' '5j' '6y' 'ax' 'bw' 'c5' 'de' 'dh' 'dl' 'do' 'dx' 'ek' 'el' 'en' 'et' 'ez' 'f1' 'fe' 'fw' 'ge' 'h1' 'h9' 'ha' 'hb' 'hl' 'it' 'jy' 'kl' 'ky' 'lo' 'nr' 'o3' 'o9' 'p0' 'pe' 'pg' 'ph' 'q1' 'q5' 'qd' 'qg' 'qh' 'qi' 'qj' 'qp' 'qq' 'qr' 'qv' 'qx' 'r0' 'rd' 're' 'rj' 'rm' 'rn' 'rq' 'rv' 'se' 'sv' 'to' 'tz' 'uo' 'v1' 'vm' 'vr' 'wa' 'we' 'wf' 'wh' 'wp' 'wt' 'wy' 'xe' 'xh' 'xz' 'y6' 'yn' 'zm' 'zy' 2016-05-22 22:21:22.326724 +508 'bb' 'bq' 'c2' 'cw' 'cy' 'db' 'dd' 'f3' 'fl' 'fn' 'id' 'ig' 'jb' 'kc' 'kl' 'lp' 'lx' 'mh' 'o0' 'pk' 'qi' 'qx' 'rq' 've' 'w8' 'wd' 'x1' 'y4' 'ye' 'zm' 2016-05-22 23:21:22.326724 diff --git a/expected/orderby.out b/expected/orderby.out new file mode 100644 index 0000000000..60ecee8ef3 --- /dev/null +++ b/expected/orderby.out @@ -0,0 +1,190 @@ +CREATE TABLE tsts (id int, t tsvector, d timestamp); +\copy tsts from 'data/tsts.data' +CREATE INDEX tsts_idx ON tsts USING rum (t rum_tsvector_timestamp_ops, d) + WITH (orderby = 'd', addto = 't'); +INSERT INTO tsts VALUES (-1, 't1 t2', '2016-05-02 02:24:22.326724'); +INSERT INTO tsts VALUES (-2, 't1 t2 t3', '2016-05-02 02:26:22.326724'); +SELECT count(*) FROM tsts WHERE t @@ 'wr|qh'; + count +------- + 158 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ 'wr&qh'; + count +------- + 17 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ 'eq&yt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ 'eq|yt'; + count +------- + 98 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ '(eq&yt)|(wr&qh)'; + count +------- + 23 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ '(eq|yt)&(wr|qh)'; + count +------- + 39 +(1 row) + +SET enable_indexscan=OFF; +SET enable_indexonlyscan=OFF; +SET enable_bitmapscan=OFF; +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; + id | d | ?column? +-----+---------------------------------+--------------- + 355 | Mon May 16 14:21:22.326724 2016 | 2.673276 + 354 | Mon May 16 13:21:22.326724 2016 | 3602.673276 + 371 | Tue May 17 06:21:22.326724 2016 | 57597.326724 + 406 | Wed May 18 17:21:22.326724 2016 | 183597.326724 + 415 | Thu May 19 02:21:22.326724 2016 | 215997.326724 +(5 rows) + +SELECT id, d, d <-| '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-| '2016-05-16 14:21:25' LIMIT 5; + id | d | ?column? +-----+---------------------------------+--------------- + 355 | Mon May 16 14:21:22.326724 2016 | 2.673276 + 354 | Mon May 16 13:21:22.326724 2016 | 3602.673276 + 252 | Thu May 12 07:21:22.326724 2016 | 370802.673276 + 232 | Wed May 11 11:21:22.326724 2016 | 442802.673276 + 168 | Sun May 08 19:21:22.326724 2016 | 673202.673276 +(5 rows) + +SELECT id, d, d |-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d |-> '2016-05-16 14:21:25' LIMIT 5; + id | d | ?column? +-----+---------------------------------+--------------- + 371 | Tue May 17 06:21:22.326724 2016 | 57597.326724 + 406 | Wed May 18 17:21:22.326724 2016 | 183597.326724 + 415 | Thu May 19 02:21:22.326724 2016 | 215997.326724 + 428 | Thu May 19 15:21:22.326724 2016 | 262797.326724 + 457 | Fri May 20 20:21:22.326724 2016 | 367197.326724 +(5 rows) + +RESET enable_indexscan; +RESET enable_indexonlyscan; +RESET enable_bitmapscan; +SET enable_seqscan = off; +EXPLAIN (costs off) +SELECT count(*) FROM tsts WHERE t @@ 'wr|qh'; + QUERY PLAN +------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on tsts + Recheck Cond: (t @@ '''wr'' | ''qh'''::tsquery) + -> Bitmap Index Scan on tsts_idx + Index Cond: (t @@ '''wr'' | ''qh'''::tsquery) +(5 rows) + +SELECT count(*) FROM tsts WHERE t @@ 'wr|qh'; + count +------- + 158 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ 'wr&qh'; + count +------- + 17 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ 'eq&yt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ 'eq|yt'; + count +------- + 98 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ '(eq&yt)|(wr&qh)'; + count +------- + 23 +(1 row) + +SELECT count(*) FROM tsts WHERE t @@ '(eq|yt)&(wr|qh)'; + count +------- + 39 +(1 row) + +EXPLAIN (costs off) +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + -> Index Scan using tsts_idx on tsts + Index Cond: (t @@ '''wr'' & ''qh'''::tsquery) + Order By: (d <-> 'Mon May 16 14:21:25 2016'::timestamp without time zone) +(4 rows) + +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; + id | d | ?column? +-----+---------------------------------+--------------- + 355 | Mon May 16 14:21:22.326724 2016 | 2.673276 + 354 | Mon May 16 13:21:22.326724 2016 | 3602.673276 + 371 | Tue May 17 06:21:22.326724 2016 | 57597.326724 + 406 | Wed May 18 17:21:22.326724 2016 | 183597.326724 + 415 | Thu May 19 02:21:22.326724 2016 | 215997.326724 +(5 rows) + +EXPLAIN (costs off) +SELECT id, d, d <-| '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-| '2016-05-16 14:21:25' LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + -> Index Scan using tsts_idx on tsts + Index Cond: (t @@ '''wr'' & ''qh'''::tsquery) + Order By: (d <-| 'Mon May 16 14:21:25 2016'::timestamp without time zone) +(4 rows) + +SELECT id, d, d <-| '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-| '2016-05-16 14:21:25' LIMIT 5; + id | d | ?column? +-----+---------------------------------+--------------- + 355 | Mon May 16 14:21:22.326724 2016 | 2.673276 + 354 | Mon May 16 13:21:22.326724 2016 | 3602.673276 + 252 | Thu May 12 07:21:22.326724 2016 | 370802.673276 + 232 | Wed May 11 11:21:22.326724 2016 | 442802.673276 + 168 | Sun May 08 19:21:22.326724 2016 | 673202.673276 +(5 rows) + +EXPLAIN (costs off) +SELECT id, d, d |-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d |-> '2016-05-16 14:21:25' LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + -> Index Scan using tsts_idx on tsts + Index Cond: (t @@ '''wr'' & ''qh'''::tsquery) + Order By: (d |-> 'Mon May 16 14:21:25 2016'::timestamp without time zone) +(4 rows) + +SELECT id, d, d |-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d |-> '2016-05-16 14:21:25' LIMIT 5; + id | d | ?column? +-----+---------------------------------+--------------- + 371 | Tue May 17 06:21:22.326724 2016 | 57597.326724 + 406 | Wed May 18 17:21:22.326724 2016 | 183597.326724 + 415 | Thu May 19 02:21:22.326724 2016 | 215997.326724 + 428 | Thu May 19 15:21:22.326724 2016 | 262797.326724 + 457 | Fri May 20 20:21:22.326724 2016 | 367197.326724 +(5 rows) + +--to be fixed +--EXPLAIN (costs off) +--SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; +--SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; diff --git a/expected/rum.out b/expected/rum.out index 88d330d37b..74027e64be 100644 --- a/expected/rum.out +++ b/expected/rum.out @@ -19,11 +19,11 @@ SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever| explain (costs off) SELECT * FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever|wrote') -ORDER BY a >< to_tsquery('pg_catalog.english', 'ever|wrote'); +ORDER BY a <-> to_tsquery('pg_catalog.english', 'ever|wrote'); QUERY PLAN ------------------------------------------------------------------ Sort - Sort Key: ((a >< '''ever'' | ''wrote'''::tsquery)) + Sort Key: ((a <-> '''ever'' | ''wrote'''::tsquery)) -> Bitmap Heap Scan on test_rum Recheck Cond: (a @@ '''ever'' | ''wrote'''::tsquery) -> Bitmap Index Scan on rumidx @@ -93,7 +93,7 @@ SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way')), * FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'way') - ORDER BY a >< to_tsquery('pg_catalog.english', 'way'); + ORDER BY a <-> to_tsquery('pg_catalog.english', 'way'); rum_ts_distance | t | a -----------------+--------------------------------------------------------------------------+--------------------------------------------------------------- 0.0607927 | my appreciation of you in a more complimentary way than by sending this | 'appreci':2 'complimentari':8 'send':12 'way':9 @@ -105,7 +105,7 @@ SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way')), * SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way & (go | half)')), * FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'way & (go | half)') - ORDER BY a >< to_tsquery('pg_catalog.english', 'way & (go | half)'); + ORDER BY a <-> to_tsquery('pg_catalog.english', 'way & (go | half)'); rum_ts_distance | t | a -----------------+---------------------------------------------------------------------+--------------------------------------------------------- 0.103556 | thinking--“to go or not to go?” We are this far on the way. Reached | 'far':11 'go':3,7 'reach':15 'think':1 'way':14 diff --git a/expected/ruminv.out b/expected/ruminv.out new file mode 100644 index 0000000000..08955cceeb --- /dev/null +++ b/expected/ruminv.out @@ -0,0 +1,273 @@ +CREATE TABLE test_invrum(q tsquery); +INSERT INTO test_invrum VALUES ('a|b'::tsquery); +INSERT INTO test_invrum VALUES ('a&b'::tsquery); +INSERT INTO test_invrum VALUES ('!(a|b)'::tsquery); +INSERT INTO test_invrum VALUES ('!(a&b)'::tsquery); +INSERT INTO test_invrum VALUES ('!a|b'::tsquery); +INSERT INTO test_invrum VALUES ('a&!b'::tsquery); +INSERT INTO test_invrum VALUES ('(a|b)&c'::tsquery); +INSERT INTO test_invrum VALUES ('(!(a|b))&c'::tsquery); +INSERT INTO test_invrum VALUES ('(a|b)&(c|d)'::tsquery); +INSERT INTO test_invrum VALUES ('!a'::tsquery); +SELECT * FROM test_invrum WHERE q @@ ''::tsvector; + q +--- +(0 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a'::tsvector; + q +---------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' +(3 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b'::tsvector; + q +---------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + !'a' +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b'::tsvector; + q +------------ + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' +(3 rows) + +SELECT * FROM test_invrum WHERE q @@ 'c'::tsvector; + q +---------------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !( 'a' | 'b' ) & 'c' + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a c'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b c'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) + !'a' +(6 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b c'::tsvector; + q +------------------------------- + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'd'::tsvector; + q +---------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !'a' +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b d'::tsvector; + q +------------------------------- + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'c d'::tsvector; + q +---------------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !( 'a' | 'b' ) & 'c' + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +CREATE INDEX test_invrum_idx ON test_invrum USING rum(q); +SET enable_seqscan = OFF; +SELECT * FROM test_invrum WHERE q @@ ''::tsvector; + q +--- +(0 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a'::tsvector; + q +---------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' +(3 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b'::tsvector; + q +---------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + !'a' +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b'::tsvector; + q +------------ + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' +(3 rows) + +SELECT * FROM test_invrum WHERE q @@ 'c'::tsvector; + q +---------------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !( 'a' | 'b' ) & 'c' + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a c'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b c'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) + !'a' +(6 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b c'::tsvector; + q +------------------------------- + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'd'::tsvector; + q +---------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !'a' +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'b d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + !'a' | 'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a b d'::tsvector; + q +------------------------------- + 'a' | 'b' + 'a' & 'b' + !'a' | 'b' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(4 rows) + +SELECT * FROM test_invrum WHERE q @@ 'c d'::tsvector; + q +---------------------- + !( 'a' | 'b' ) + !( 'a' & 'b' ) + !'a' | 'b' + !( 'a' | 'b' ) & 'c' + !'a' +(5 rows) + +SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; + q +------------------------------- + 'a' | 'b' + !( 'a' & 'b' ) + 'a' & !'b' + ( 'a' | 'b' ) & 'c' + ( 'a' | 'b' ) & ( 'c' | 'd' ) +(5 rows) + +INSERT INTO test_invrum VALUES ('a:*'::tsquery); +ERROR: Indexing of prefix tsqueries isn't supported yet +INSERT INTO test_invrum VALUES ('a <-> b'::tsquery); +ERROR: Indexing of phrase tsqueries isn't supported yet diff --git a/expected/timestamp.out b/expected/timestamp.out new file mode 100644 index 0000000000..b6abda8056 --- /dev/null +++ b/expected/timestamp.out @@ -0,0 +1,132 @@ +CREATE TABLE test_timestamp ( + i timestamp +); +INSERT INTO test_timestamp VALUES + ( '2004-10-26 03:55:08' ), + ( '2004-10-26 04:55:08' ), + ( '2004-10-26 05:55:08' ), + ( '2004-10-26 08:55:08' ), + ( '2004-10-26 09:55:08' ), + ( '2004-10-26 10:55:08' ) +; +SELECT i <-> '2004-10-26 06:24:08', i FROM test_timestamp ORDER BY 1, 2 ASC; + ?column? | i +----------+-------------------------- + 1740 | Tue Oct 26 05:55:08 2004 + 5340 | Tue Oct 26 04:55:08 2004 + 8940 | Tue Oct 26 03:55:08 2004 + 9060 | Tue Oct 26 08:55:08 2004 + 12660 | Tue Oct 26 09:55:08 2004 + 16260 | Tue Oct 26 10:55:08 2004 +(6 rows) + +SELECT i <-| '2004-10-26 06:24:08', i FROM test_timestamp ORDER BY 1, 2 ASC; + ?column? | i +----------+-------------------------- + 1740 | Tue Oct 26 05:55:08 2004 + 5340 | Tue Oct 26 04:55:08 2004 + 8940 | Tue Oct 26 03:55:08 2004 + Infinity | Tue Oct 26 08:55:08 2004 + Infinity | Tue Oct 26 09:55:08 2004 + Infinity | Tue Oct 26 10:55:08 2004 +(6 rows) + +SELECT i |-> '2004-10-26 06:24:08', i FROM test_timestamp ORDER BY 1, 2 ASC; + ?column? | i +----------+-------------------------- + 9060 | Tue Oct 26 08:55:08 2004 + 12660 | Tue Oct 26 09:55:08 2004 + 16260 | Tue Oct 26 10:55:08 2004 + Infinity | Tue Oct 26 03:55:08 2004 + Infinity | Tue Oct 26 04:55:08 2004 + Infinity | Tue Oct 26 05:55:08 2004 +(6 rows) + +CREATE INDEX idx_timestamp ON test_timestamp USING rum (i); +set enable_seqscan=off; +explain (costs off) +SELECT * FROM test_timestamp WHERE i<'2004-10-26 08:55:08'::timestamp ORDER BY i; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Sort Key: i + -> Index Scan using idx_timestamp on test_timestamp + Index Cond: (i < 'Tue Oct 26 08:55:08 2004'::timestamp without time zone) +(4 rows) + +SELECT * FROM test_timestamp WHERE i<'2004-10-26 08:55:08'::timestamp ORDER BY i; + i +-------------------------- + Tue Oct 26 03:55:08 2004 + Tue Oct 26 04:55:08 2004 + Tue Oct 26 05:55:08 2004 +(3 rows) + +explain (costs off) +SELECT * FROM test_timestamp WHERE i<='2004-10-26 08:55:08'::timestamp ORDER BY i; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Sort Key: i + -> Index Scan using idx_timestamp on test_timestamp + Index Cond: (i <= 'Tue Oct 26 08:55:08 2004'::timestamp without time zone) +(4 rows) + +SELECT * FROM test_timestamp WHERE i<='2004-10-26 08:55:08'::timestamp ORDER BY i; + i +-------------------------- + Tue Oct 26 03:55:08 2004 + Tue Oct 26 04:55:08 2004 + Tue Oct 26 05:55:08 2004 + Tue Oct 26 08:55:08 2004 +(4 rows) + +explain (costs off) +SELECT * FROM test_timestamp WHERE i='2004-10-26 08:55:08'::timestamp ORDER BY i; + QUERY PLAN +----------------------------------------------------------------------------- + Index Scan using idx_timestamp on test_timestamp + Index Cond: (i = 'Tue Oct 26 08:55:08 2004'::timestamp without time zone) +(2 rows) + +SELECT * FROM test_timestamp WHERE i='2004-10-26 08:55:08'::timestamp ORDER BY i; + i +-------------------------- + Tue Oct 26 08:55:08 2004 +(1 row) + +explain (costs off) +SELECT * FROM test_timestamp WHERE i>='2004-10-26 08:55:08'::timestamp ORDER BY i; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Sort Key: i + -> Index Scan using idx_timestamp on test_timestamp + Index Cond: (i >= 'Tue Oct 26 08:55:08 2004'::timestamp without time zone) +(4 rows) + +SELECT * FROM test_timestamp WHERE i>='2004-10-26 08:55:08'::timestamp ORDER BY i; + i +-------------------------- + Tue Oct 26 08:55:08 2004 + Tue Oct 26 09:55:08 2004 + Tue Oct 26 10:55:08 2004 +(3 rows) + +explain (costs off) +SELECT * FROM test_timestamp WHERE i>'2004-10-26 08:55:08'::timestamp ORDER BY i; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Sort Key: i + -> Index Scan using idx_timestamp on test_timestamp + Index Cond: (i > 'Tue Oct 26 08:55:08 2004'::timestamp without time zone) +(4 rows) + +SELECT * FROM test_timestamp WHERE i>'2004-10-26 08:55:08'::timestamp ORDER BY i; + i +-------------------------- + Tue Oct 26 09:55:08 2004 + Tue Oct 26 10:55:08 2004 +(2 rows) + diff --git a/rum--1.0.sql b/rum--1.0.sql index 2556d17f7b..a996b8fc94 100644 --- a/rum--1.0.sql +++ b/rum--1.0.sql @@ -12,11 +12,10 @@ RETURNS float4 AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; -CREATE OPERATOR >< ( +CREATE OPERATOR <-> ( LEFTARG = tsvector, RIGHTARG = tsquery, - PROCEDURE = rum_ts_distance, - COMMUTATOR = '><' + PROCEDURE = rum_ts_distance ); CREATE FUNCTION rum_extract_tsvector(tsvector,internal,internal,internal,internal) @@ -24,7 +23,7 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; -CREATE FUNCTION rum_extract_tsquery(tsvector,internal,smallint,internal,internal,internal,internal) +CREATE FUNCTION rum_extract_tsquery(tsquery,internal,smallint,internal,internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; @@ -53,14 +52,152 @@ CREATE OPERATOR CLASS rum_tsvector_ops FOR TYPE tsvector USING rum AS OPERATOR 1 @@ (tsvector, tsquery), - OPERATOR 2 >< (tsvector, tsquery) FOR ORDER BY pg_catalog.float_ops, + OPERATOR 2 <-> (tsvector, tsquery) FOR ORDER BY pg_catalog.float_ops, FUNCTION 1 gin_cmp_tslexeme(text, text), FUNCTION 2 rum_extract_tsvector(tsvector,internal,internal,internal,internal), - FUNCTION 3 rum_extract_tsquery(tsvector,internal,smallint,internal,internal,internal,internal), + FUNCTION 3 rum_extract_tsquery(tsquery,internal,smallint,internal,internal,internal,internal), FUNCTION 4 rum_tsquery_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), FUNCTION 5 gin_cmp_prefix(text,text,smallint,internal), - FUNCTION 6 gin_tsquery_triconsistent(internal,smallint,tsvector,int,internal,internal,internal), - FUNCTION 7 rum_tsvector_config(internal), - FUNCTION 8 rum_tsquery_pre_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), - FUNCTION 9 rum_tsquery_distance(internal,smallint,tsvector,int,internal,internal,internal,internal,internal), + FUNCTION 6 rum_tsvector_config(internal), + FUNCTION 7 rum_tsquery_pre_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), + FUNCTION 8 rum_tsquery_distance(internal,smallint,tsvector,int,internal,internal,internal,internal,internal), + STORAGE text; + +-- timestamp ops + +CREATE FUNCTION timestamp_distance(timestamp, timestamp) +RETURNS float8 +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE OPERATOR <-> ( + PROCEDURE = timestamp_distance, + LEFTARG = timestamp, + RIGHTARG = timestamp, + COMMUTATOR = <-> +); + +CREATE FUNCTION timestamp_left_distance(timestamp, timestamp) +RETURNS float8 +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE OPERATOR <-| ( + PROCEDURE = timestamp_left_distance, + LEFTARG = timestamp, + RIGHTARG = timestamp, + COMMUTATOR = |-> +); + +CREATE FUNCTION timestamp_right_distance(timestamp, timestamp) +RETURNS float8 +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE OPERATOR |-> ( + PROCEDURE = timestamp_right_distance, + LEFTARG = timestamp, + RIGHTARG = timestamp, + COMMUTATOR = <-| +); + + +-- timestamp operator class + +CREATE FUNCTION rum_timestamp_extract_value(timestamp,internal,internal,internal,internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION rum_timestamp_compare_prefix(timestamp,timestamp,smallint,internal) +RETURNS int4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION rum_timestamp_extract_query(timestamp,internal,smallint,internal,internal,internal,internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION rum_timestamp_consistent(internal,smallint,timestamp,int,internal,internal,internal,internal) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION rum_timestamp_outer_distance(timestamp, timestamp, smallint) +RETURNS float8 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE OPERATOR CLASS timestamp_ops +DEFAULT FOR TYPE timestamp USING rum +AS + OPERATOR 1 <, + OPERATOR 2 <=, + OPERATOR 3 =, + OPERATOR 4 >=, + OPERATOR 5 >, + --support + FUNCTION 1 timestamp_cmp(timestamp,timestamp), + FUNCTION 2 rum_timestamp_extract_value(timestamp,internal,internal,internal,internal), + FUNCTION 3 rum_timestamp_extract_query(timestamp,internal,smallint,internal,internal,internal,internal), + FUNCTION 4 rum_timestamp_consistent(internal,smallint,timestamp,int,internal,internal,internal,internal), + FUNCTION 5 rum_timestamp_compare_prefix(timestamp,timestamp,smallint,internal), + -- support to timestamp disttance in rum_tsvector_timestamp_ops + FUNCTION 9 rum_timestamp_outer_distance(timestamp, timestamp, smallint), + OPERATOR 20 <-> (timestamp,timestamp) FOR ORDER BY pg_catalog.float_ops, + OPERATOR 21 <-| (timestamp,timestamp) FOR ORDER BY pg_catalog.float_ops, + OPERATOR 22 |-> (timestamp,timestamp) FOR ORDER BY pg_catalog.float_ops, +STORAGE timestamp; + +--together + +CREATE FUNCTION rum_tsquery_timestamp_consistent(internal, smallint, tsvector, integer, internal, internal, internal, internal) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE OPERATOR CLASS rum_tsvector_timestamp_ops +FOR TYPE tsvector USING rum +AS + OPERATOR 1 @@ (tsvector, tsquery), + --support function + FUNCTION 1 gin_cmp_tslexeme(text, text), + FUNCTION 2 rum_extract_tsvector(tsvector,internal,internal,internal,internal), + FUNCTION 3 rum_extract_tsquery(tsquery,internal,smallint,internal,internal,internal,internal), + FUNCTION 4 rum_tsquery_timestamp_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), + FUNCTION 5 gin_cmp_prefix(text,text,smallint,internal), + FUNCTION 7 rum_tsquery_pre_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), + STORAGE text; + + +CREATE FUNCTION ruminv_extract_tsquery(tsquery,internal,internal,internal,internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION ruminv_extract_tsvector(tsvector,internal,smallint,internal,internal,internal,internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION ruminv_tsvector_consistent(internal, smallint, tsvector, integer, internal, internal, internal, internal) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION ruminv_tsquery_config(internal) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE OPERATOR CLASS rum_tsquery_ops +DEFAULT FOR TYPE tsquery USING rum +AS + OPERATOR 1 @@ (tsquery, tsvector), + FUNCTION 1 gin_cmp_tslexeme(text, text), + FUNCTION 2 ruminv_extract_tsquery(tsquery,internal,internal,internal,internal), + FUNCTION 3 ruminv_extract_tsvector(tsvector,internal,smallint,internal,internal,internal,internal), + FUNCTION 4 ruminv_tsvector_consistent(internal,smallint,tsvector,int,internal,internal,internal,internal), + FUNCTION 6 ruminv_tsquery_config(internal), STORAGE text; diff --git a/rum.h b/rum.h index b922f36148..d3abd0dd27 100644 --- a/rum.h +++ b/rum.h @@ -59,6 +59,11 @@ typedef RumPageOpaqueData *RumPageOpaque; typedef struct RumMetaPageData { + /* + * RUM version number + */ + uint32 rumVersion; + /* * Pointers to head and tail of pending list, which consists of RUM_LIST * pages. These store fast-inserted entries that haven't yet been moved @@ -86,21 +91,9 @@ typedef struct RumMetaPageData BlockNumber nEntryPages; BlockNumber nDataPages; int64 nEntries; - - /* - * RUM version number (ideally this should have been at the front, but too - * late now. Don't move it!) - * - * Currently 1 (for indexes initialized in 9.1 or later) - * - * Version 0 (indexes initialized in 9.0 or before) is compatible but may - * be missing null entries, including both null keys and placeholders. - * Reject full-index-scan attempts on such indexes. - */ - int32 rumVersion; } RumMetaPageData; -#define RUM_CURRENT_VERSION 1 +#define RUM_CURRENT_VERSION (0xC0DE0001) #define RumPageGetMeta(p) \ ((RumMetaPageData *) PageGetContents(p)) @@ -294,6 +287,9 @@ typedef struct RumOptions { int32 vl_len_; /* varlena header (do not touch directly!) */ bool useFastUpdate; /* use fast updates? */ + bool useAlternativeOrder; + int orderByColumn; + int addToColumn; } RumOptions; #define RUM_DEFAULT_USE_FASTUPDATE false @@ -307,6 +303,11 @@ typedef struct RumOptions #define RUM_SHARE BUFFER_LOCK_SHARE #define RUM_EXCLUSIVE BUFFER_LOCK_EXCLUSIVE +typedef struct RumKey { + ItemPointerData ipd; + bool isNull; + Datum addToCompare; +} RumKey; /* * RumState: working data structure describing the index being worked on @@ -315,6 +316,9 @@ typedef struct RumState { Relation index; bool oneCol; /* true if single-column index */ + bool useAlternativeOrder; + AttrNumber attrnOrderByColumn; + AttrNumber attrnAddToColumn; /* * origTupDesc is the nominal tuple descriptor of the index, ie, the i'th @@ -343,12 +347,14 @@ typedef struct RumState FmgrInfo configFn[INDEX_MAX_KEYS]; /* optional method */ FmgrInfo preConsistentFn[INDEX_MAX_KEYS]; /* optional method */ FmgrInfo orderingFn[INDEX_MAX_KEYS]; /* optional method */ + FmgrInfo outerOrderingFn[INDEX_MAX_KEYS]; /* optional method */ /* canPartialMatch[i] is true if comparePartialFn[i] is valid */ bool canPartialMatch[INDEX_MAX_KEYS]; /* canPreConsistent[i] is true if preConsistentFn[i] is valid */ bool canPreConsistent[INDEX_MAX_KEYS]; /* canOrdering[i] is true if orderingFn[i] is valid */ bool canOrdering[INDEX_MAX_KEYS]; + bool canOuterOrdering[INDEX_MAX_KEYS]; /* Collations to pass to the support functions */ Oid supportCollation[INDEX_MAX_KEYS]; } RumState; @@ -483,9 +489,11 @@ extern IndexTuple rumPageGetLinkItup(Buffer buf, Page page); extern void rumReadTuple(RumState *rumstate, OffsetNumber attnum, IndexTuple itup, ItemPointerData *ipd, Datum *addInfo, bool *addInfoIsNull); extern ItemPointerData updateItemIndexes(Page page, OffsetNumber attnum, RumState *rumstate); +extern void checkLeafDataPage(RumState *rumstate, AttrNumber attrnum, Page page); /* rumdatapage.c */ extern int rumCompareItemPointers(ItemPointer a, ItemPointer b); +extern int compareRumKey(RumState *state, RumKey *a, RumKey *b); extern char *rumDataPageLeafWriteItemPointer(char *ptr, ItemPointer iptr, ItemPointer prev, bool addInfoIsNull); extern Pointer rumPlaceToDataPageLeaf(Pointer ptr, OffsetNumber attnum, ItemPointer iptr, Datum addInfo, bool addInfoIsNull, ItemPointer prev, @@ -555,13 +563,16 @@ typedef struct RumScanKeyData bool *entryRes; Datum *addInfo; bool *addInfoIsNull; + bool useAddToColumn; + Datum outerAddInfo; + bool outerAddInfoIsNull; /* other data needed for calling consistentFn */ Datum query; /* NB: these three arrays have only nuserentries elements! */ Datum *queryValues; RumNullCategory *queryCategories; - Pointer *extra_data; + Pointer *extra_data; StrategyNumber strategy; int32 searchMode; OffsetNumber attnum; @@ -629,6 +640,7 @@ typedef struct typedef struct RumScanOpaqueData { MemoryContext tempCtx; + MemoryContext keyCtx; /* used to hold key and entry data */ RumState rumstate; RumScanKey keys; /* one per scan qualifier expr */ @@ -674,8 +686,8 @@ extern IndexBulkDeleteResult *rumvacuumcleanup(IndexVacuumInfo *info, typedef struct { ItemPointerData iptr; - Datum addInfo; bool addInfoIsNull; + Datum addInfo; } RumEntryAccumulatorItem; /* rumvalidate.c */ @@ -733,9 +745,10 @@ extern void rumInsertCleanup(RumState *rumstate, bool vac_delay, IndexBulkDeleteResult *stats); /* rum_ts_utils.c */ -#define RUM_CONFIG_PROC 7 -#define RUM_PRE_CONSISTENT_PROC 8 -#define RUM_ORDERING_PROC 9 +#define RUM_CONFIG_PROC 6 +#define RUM_PRE_CONSISTENT_PROC 7 +#define RUM_ORDERING_PROC 8 +#define RUM_OUTER_ORDERING_PROC 9 #define RUMNProcs 9 extern Datum rum_extract_tsvector(PG_FUNCTION_ARGS); @@ -839,9 +852,47 @@ rumDataPageLeafRead(Pointer ptr, OffsetNumber attnum, ItemPointer iptr, if (!isNull) { attr = rumstate->addAttrs[attnum - 1]; - ptr = (Pointer) att_align_pointer(ptr, attr->attalign, attr->attlen, ptr); - if (addInfo) - *addInfo = fetch_att(ptr, attr->attbyval, attr->attlen); + + if (attr->attbyval) + { + /* do not use aligment for pass-by-value types */ + if (addInfo) + { + union { + int16 i16; + int32 i32; + } u; + + switch(attr->attlen) + { + case sizeof(char): + *addInfo = Int8GetDatum(*ptr); + break; + case sizeof(int16): + memcpy(&u.i16, ptr, sizeof(int16)); + *addInfo = Int16GetDatum(u.i16); + break; + case sizeof(int32): + memcpy(&u.i32, ptr, sizeof(int32)); + *addInfo = Int32GetDatum(u.i32); + break; +#if SIZEOF_DATUM == 8 + case sizeof(Datum): + memcpy(addInfo, ptr, sizeof(Datum)); + break; +#endif + default: + elog(ERROR, "unsupported byval length: %d", (int) (attr->attlen)); + } + } + } + else + { + ptr = (Pointer) att_align_pointer(ptr, attr->attalign, attr->attlen, ptr); + if (addInfo) + *addInfo = fetch_att(ptr, attr->attbyval, attr->attlen); + } + ptr = (Pointer) att_addlength_pointer(ptr, attr->attlen, ptr); } return ptr; diff --git a/rum_timestamp.c b/rum_timestamp.c new file mode 100644 index 0000000000..8217328d3f --- /dev/null +++ b/rum_timestamp.c @@ -0,0 +1,256 @@ +#include "postgres.h" + +#include + +#include "access/stratnum.h" +#include "utils/builtins.h" +#include "utils/timestamp.h" + +#define RUM_TMST_DISTANCE 20 +#define RUM_TMST_LEFT_DISTANCE 21 +#define RUM_TMST_RIGHT_DISTANCE 22 + +typedef struct QueryInfo +{ + StrategyNumber strategy; + Datum datum; +} QueryInfo; + + +PG_FUNCTION_INFO_V1(rum_timestamp_extract_value); +Datum +rum_timestamp_extract_value(PG_FUNCTION_ARGS) +{ + Datum datum = PG_GETARG_DATUM(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + Datum *entries = (Datum *) palloc(sizeof(Datum)); + + entries[0] = datum; + *nentries = 1; + + PG_RETURN_POINTER(entries); +} + +PG_FUNCTION_INFO_V1(rum_timestamp_extract_query); +Datum +rum_timestamp_extract_query(PG_FUNCTION_ARGS) +{ + Datum datum = PG_GETARG_DATUM(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + StrategyNumber strategy = PG_GETARG_UINT16(2); + bool **partialmatch = (bool **) PG_GETARG_POINTER(3); + Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4); + Datum *entries = (Datum *) palloc(sizeof(Datum)); + QueryInfo *data = (QueryInfo *) palloc(sizeof(QueryInfo)); + bool *ptr_partialmatch; + + *nentries = 1; + ptr_partialmatch = *partialmatch = (bool *) palloc(sizeof(bool)); + *ptr_partialmatch = false; + data->strategy = strategy; + data->datum = datum; + *extra_data = (Pointer *) palloc(sizeof(Pointer)); + **extra_data = (Pointer) data; + + switch(strategy) + { + case BTLessStrategyNumber: + case BTLessEqualStrategyNumber: + entries[0] = TimestampGetDatum(DT_NOBEGIN); /* leftmost */ + *ptr_partialmatch = true; + break; + case BTGreaterEqualStrategyNumber: + case BTGreaterStrategyNumber: + *ptr_partialmatch = true; + case BTEqualStrategyNumber: + case RUM_TMST_DISTANCE: + case RUM_TMST_LEFT_DISTANCE: + case RUM_TMST_RIGHT_DISTANCE: + entries[0] = datum; + break; + default: + elog(ERROR, "unrecognized strategy number: %d", strategy); + } + + PG_RETURN_POINTER(entries); +} + +PG_FUNCTION_INFO_V1(rum_timestamp_compare_prefix); +Datum +rum_timestamp_compare_prefix(PG_FUNCTION_ARGS) +{ + Datum a = PG_GETARG_DATUM(0); + Datum b = PG_GETARG_DATUM(1); + QueryInfo *data = (QueryInfo *) PG_GETARG_POINTER(3); + int32 res, cmp; + + cmp = DatumGetInt32(DirectFunctionCall2Coll(timestamp_cmp, + PG_GET_COLLATION(), + (data->strategy == BTLessStrategyNumber || + data->strategy == BTLessEqualStrategyNumber) + ? data->datum : a, b)); + + switch (data->strategy) + { + case BTLessStrategyNumber: + /* If original datum > indexed one then return match */ + if (cmp > 0) + res = 0; + else + res = 1; + break; + case BTLessEqualStrategyNumber: + /* The same except equality */ + if (cmp >= 0) + res = 0; + else + res = 1; + break; + case BTEqualStrategyNumber: + if (cmp != 0) + res = 1; + else + res = 0; + break; + case BTGreaterEqualStrategyNumber: + /* If original datum <= indexed one then return match */ + if (cmp <= 0) + res = 0; + else + res = 1; + break; + case BTGreaterStrategyNumber: + /* If original datum <= indexed one then return match */ + /* If original datum == indexed one then continue scan */ + if (cmp < 0) + res = 0; + else if (cmp == 0) + res = -1; + else + res = 1; + break; + default: + elog(ERROR, "unrecognized strategy number: %d", data->strategy); + res = 0; + } + + PG_RETURN_INT32(res); +} + +PG_FUNCTION_INFO_V1(rum_timestamp_consistent); +Datum +rum_timestamp_consistent(PG_FUNCTION_ARGS) +{ + bool *recheck = (bool *) PG_GETARG_POINTER(5); + + *recheck = false; + PG_RETURN_BOOL(true); +} + +PG_FUNCTION_INFO_V1(timestamp_distance); +Datum +timestamp_distance(PG_FUNCTION_ARGS) +{ + Timestamp dt1 = PG_GETARG_TIMESTAMP(0); + Timestamp dt2 = PG_GETARG_TIMESTAMP(1); + double diff; + + if (TIMESTAMP_NOT_FINITE(dt1) || TIMESTAMP_NOT_FINITE(dt2)) + { + if (TIMESTAMP_NOT_FINITE(dt1) && TIMESTAMP_NOT_FINITE(dt2)) + diff = 0; + else + diff = get_float8_infinity(); + } + else + { + /* see timestamp_mi */ + diff = (dt1 > dt2) ? dt1 - dt2 : dt2 - dt1; + diff /= 1e6; + } + + PG_RETURN_FLOAT8(diff); +} + +PG_FUNCTION_INFO_V1(timestamp_left_distance); +Datum +timestamp_left_distance(PG_FUNCTION_ARGS) +{ + Timestamp dt1 = PG_GETARG_TIMESTAMP(0); + Timestamp dt2 = PG_GETARG_TIMESTAMP(1); + double diff; + + if (TIMESTAMP_NOT_FINITE(dt1) || TIMESTAMP_NOT_FINITE(dt2)) + { + if (TIMESTAMP_NOT_FINITE(dt1) && TIMESTAMP_NOT_FINITE(dt2)) + diff = 0; + else + diff = get_float8_infinity(); + } + else + { + /* see timestamp_mi */ + diff = (dt1 > dt2) ? get_float8_infinity() : dt2 - dt1; + diff /= 1e6; + } + + PG_RETURN_FLOAT8(diff); +} + +PG_FUNCTION_INFO_V1(timestamp_right_distance); +Datum +timestamp_right_distance(PG_FUNCTION_ARGS) +{ + Timestamp dt1 = PG_GETARG_TIMESTAMP(0); + Timestamp dt2 = PG_GETARG_TIMESTAMP(1); + double diff; + + if (TIMESTAMP_NOT_FINITE(dt1) || TIMESTAMP_NOT_FINITE(dt2)) + { + if (TIMESTAMP_NOT_FINITE(dt1) && TIMESTAMP_NOT_FINITE(dt2)) + diff = 0; + else + diff = get_float8_infinity(); + } + else + { + /* see timestamp_mi */ + diff = (dt1 > dt2) ? dt1 - dt2 : get_float8_infinity(); + diff /= 1e6; + } + + PG_RETURN_FLOAT8(diff); +} + +PG_FUNCTION_INFO_V1(rum_timestamp_outer_distance); +Datum +rum_timestamp_outer_distance(PG_FUNCTION_ARGS) +{ + StrategyNumber strategy = PG_GETARG_UINT16(2); + Datum diff; + + switch(strategy) + { + case RUM_TMST_DISTANCE: + diff = DirectFunctionCall2(timestamp_distance, + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1)); + break; + case RUM_TMST_LEFT_DISTANCE: + diff = DirectFunctionCall2(timestamp_left_distance, + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1)); + break; + case RUM_TMST_RIGHT_DISTANCE: + diff = DirectFunctionCall2(timestamp_right_distance, + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1)); + break; + default: + elog(ERROR, "rum_timestamp_outer_distance: unknown strategy %u", + strategy); + } + + PG_RETURN_DATUM(diff); +} + diff --git a/rum_ts_utils.c b/rum_ts_utils.c index 9ba0599140..715b9bbdf9 100644 --- a/rum_ts_utils.c +++ b/rum_ts_utils.c @@ -26,6 +26,7 @@ PG_FUNCTION_INFO_V1(rum_extract_tsquery); PG_FUNCTION_INFO_V1(rum_tsvector_config); PG_FUNCTION_INFO_V1(rum_tsquery_pre_consistent); PG_FUNCTION_INFO_V1(rum_tsquery_consistent); +PG_FUNCTION_INFO_V1(rum_tsquery_timestamp_consistent); PG_FUNCTION_INFO_V1(rum_tsquery_distance); PG_FUNCTION_INFO_V1(rum_ts_distance); @@ -45,6 +46,7 @@ typedef struct bool *need_recheck; Datum *addInfo; bool *addInfoIsNull; + bool notPhrase; } RumChkVal; static bool @@ -115,13 +117,21 @@ checkcondition_rum(void *checkval, QueryOperand *val, ExecPhraseData *data) if (!gcv->check[j]) return false; + /* + * Fill position list for phrase operator if it's needed + * end it exists + */ if (data && gcv->addInfo && gcv->addInfoIsNull[j] == false) { - bytea *positions = DatumGetByteaP(gcv->addInfo[j]); + bytea *positions; int32 i; char *ptrt; WordEntryPos post; + if (gcv->notPhrase) + elog(ERROR, "phrase search isn't supported yet"); + + positions = DatumGetByteaP(gcv->addInfo[j]); data->npos = count_pos(VARDATA_ANY(positions), VARSIZE_ANY_EXHDR(positions)); data->pos = palloc(sizeof(*data->pos) * data->npos); @@ -172,6 +182,7 @@ rum_tsquery_consistent(PG_FUNCTION_ARGS) gcv.need_recheck = recheck; gcv.addInfo = addInfo; gcv.addInfoIsNull = addInfoIsNull; + gcv.notPhrase = false; res = TS_execute(GETQUERY(query), &gcv, true, checkcondition_rum); } @@ -179,7 +190,45 @@ rum_tsquery_consistent(PG_FUNCTION_ARGS) PG_RETURN_BOOL(res); } +Datum +rum_tsquery_timestamp_consistent(PG_FUNCTION_ARGS) +{ + bool *check = (bool *) PG_GETARG_POINTER(0); + /* StrategyNumber strategy = PG_GETARG_UINT16(1); */ + TSQuery query = PG_GETARG_TSQUERY(2); + /* int32 nkeys = PG_GETARG_INT32(3); */ + Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); + bool *recheck = (bool *) PG_GETARG_POINTER(5); + Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); + bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); + bool res = FALSE; + /* The query requires recheck only if it involves + * weights */ + *recheck = false; + + if (query->size > 0) + { + QueryItem *item; + RumChkVal gcv; + + /* + * check-parameter array has one entry for each value + * (operand) in the query. + */ + gcv.first_item = item = GETQUERY(query); + gcv.check = check; + gcv.map_item_operand = (int *) (extra_data[0]); + gcv.need_recheck = recheck; + gcv.addInfo = addInfo; + gcv.addInfoIsNull = addInfoIsNull; + gcv.notPhrase = true; + + res = TS_execute(GETQUERY(query), &gcv, true, checkcondition_rum); + } + + PG_RETURN_BOOL(res); +} static float weights[] = {0.1f, 0.2f, 0.4f, 1.0f}; @@ -648,7 +697,7 @@ rum_tsquery_distance(PG_FUNCTION_ARGS) TSQuery query = PG_GETARG_TSQUERY(2); int32 nkeys = PG_GETARG_INT32(3); - /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); float8 res; diff --git a/rumbtree.c b/rumbtree.c index e1c93cbe6a..5435dc9687 100644 --- a/rumbtree.c +++ b/rumbtree.c @@ -407,7 +407,8 @@ rumInsertValue(Relation index, RumBtree btree, RumBtreeStack *stack, state = GenericXLogStart(index); page = GenericXLogRegisterBuffer(state, stack->buffer, 0); - rpage = GenericXLogRegisterBuffer(state, rbuffer, 0); + rpage = GenericXLogRegisterBuffer(state, rbuffer, + GENERIC_XLOG_FULL_IMAGE); /* * newlpage is a pointer to memory page, it doesn't associate with @@ -421,7 +422,8 @@ rumInsertValue(Relation index, RumBtree btree, RumBtreeStack *stack, * pointer on root to left and right page */ lbuffer = RumNewBuffer(btree->index); - lpage = GenericXLogRegisterBuffer(state, lbuffer, 0); + lpage = GenericXLogRegisterBuffer(state, lbuffer, + GENERIC_XLOG_FULL_IMAGE); RumPageGetOpaque(rpage)->rightlink = InvalidBlockNumber; RumPageGetOpaque(newlpage)->rightlink = BufferGetBlockNumber(rbuffer); diff --git a/rumdatapage.c b/rumdatapage.c index 2a4f97bfb2..abe4531623 100644 --- a/rumdatapage.c +++ b/rumdatapage.c @@ -36,6 +36,12 @@ rumComputeDatumSize(Size data_length, Datum val, bool typbyval, char typalign, */ data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val)); } + else if (typbyval) + { + /* do not align type pass-by-value because anyway we + * will copy Datum */ + data_length = att_addlength_datum(data_length, typlen, val); + } else { data_length = att_align_datum(data_length, typalign, typlen, val); @@ -60,8 +66,34 @@ rumDatumWrite(Pointer ptr, Datum datum, bool typbyval, char typalign, if (typbyval) { /* pass-by-value */ - ptr = (char *) att_align_nominal(ptr, typalign); - store_att_byval(ptr, datum, typlen); + union { + int16 i16; + int32 i32; + } u; + + /* align-safe version of store_att_byval(ptr, datum, typlen); */ + switch(typlen) + { + case sizeof(char): + *ptr = DatumGetChar(datum); + break; + case sizeof(int16): + u.i16 = DatumGetInt16(datum); + memcpy(ptr, &u.i16, sizeof(int16)); + break; + case sizeof(int32): + u.i32 = DatumGetInt32(datum); + memcpy(ptr, &u.i32, sizeof(int32)); + break; +#if SIZEOF_DATUM == 8 + case sizeof(Datum): + memcpy(ptr, &datum, sizeof(Datum)); + break; +#endif + default: + elog(ERROR, "unsupported byval length: %d", (int) (typlen)); + } + data_length = typlen; } else if (typlen == -1) @@ -267,6 +299,42 @@ rumCompareItemPointers(ItemPointer a, ItemPointer b) return (ba > bb) ? 1 : -1; } +int +compareRumKey(RumState *state, RumKey *a, RumKey *b) +{ + + /* assume NULL is greate than any real value */ + if (state->useAlternativeOrder) + { + if (a->isNull == false && b->isNull == false) + { + int res; + AttrNumber attnum = state->attrnOrderByColumn; + + res = DatumGetInt32(FunctionCall2Coll( + &state->compareFn[attnum - 1], + state->supportCollation[attnum - 1], + a->addToCompare, b->addToCompare)); + if (res != 0) + return res; + /* fallback to ItemPointerCompare */ + } + else if (a->isNull == true) + { + if (b->isNull == false) + return 1; + /* fallback to ItemPointerCompare */ + } + else + { + Assert(b->isNull == true); + return -1; + } + } + + return rumCompareItemPointers(&a->ipd, &b->ipd); +} + /* * Merge two ordered arrays of itempointers, eliminating any duplicates. * Returns the number of items in the result. @@ -431,7 +499,8 @@ findInLeafPage(RumBtree btree, Page page, OffsetNumber *offset, */ for (i = 0; i < RumDataLeafIndexCount; i++) { - RumDataLeafItemIndex *index = &RumPageGetIndexes(page)[i]; + RumDataLeafItemIndex *index = RumPageGetIndexes(page) + i; + if (index->offsetNumer == InvalidOffsetNumber) break; @@ -1196,11 +1265,62 @@ updateItemIndexes(Page page, OffsetNumber attnum, RumState *rumstate) } /* Update freespace of page */ RumPageGetOpaque(page)->freespace = RumDataPageFreeSpacePre(page, ptr); - /* Adjust pd_lower */ + /* Adjust pd_lower and pd_upper */ ((PageHeader) page)->pd_lower = ptr - page; + ((PageHeader) page)->pd_upper = ((char*)RumPageGetIndexes(page)) - page; + return iptr; } +void +checkLeafDataPage(RumState *rumstate, AttrNumber attnum, Page page) +{ + Offset maxoff, i; + char *ptr; + ItemPointerData iptr; + RumDataLeafItemIndex *index, *previndex = NULL; + + if (!(RumPageGetOpaque(page)->flags & RUM_DATA)) + return; + + maxoff = RumPageGetOpaque(page)->maxoff; + ptr = RumDataPageGetData(page); + iptr.ip_blkid.bi_lo = 0; + iptr.ip_blkid.bi_hi = 0; + iptr.ip_posid = 0; + + Assert(RumPageGetOpaque(page)->flags & RUM_LEAF); + + for(i = FirstOffsetNumber; i <= maxoff; i++) + ptr = rumDataPageLeafRead(ptr, attnum, &iptr, NULL, NULL, rumstate); + + Assert((char*)RumPageGetIndexes(page) == page + ((PageHeader)page)->pd_upper); + + for(i = 0; i offsetNumer == InvalidOffsetNumber) + break; + + Assert(index->pageOffset < ((PageHeader)page)->pd_lower); + + if (previndex) + { + Assert(previndex->offsetNumer < index->offsetNumer); + Assert(previndex->pageOffset < index->pageOffset); + Assert(rumCompareItemPointers(&index->iptr, &previndex->iptr) > 0); + } + + if (i != RumDataLeafIndexCount - 1) + { + iptr = index->iptr; + rumDataPageLeafRead(RumDataPageGetData(page) + index->pageOffset, + attnum, &iptr, NULL, NULL, rumstate); + } + } +} + /* * Fills new root by right bound values from child. * Also called from rumxlog, should not use btree diff --git a/rumget.c b/rumget.c index bb88485727..923dc799ca 100644 --- a/rumget.c +++ b/rumget.c @@ -16,6 +16,7 @@ #include "access/relscan.h" #include "miscadmin.h" +#include "utils/builtins.h" #include "utils/datum.h" #include "utils/memutils.h" @@ -46,6 +47,7 @@ static void entryGetItem(RumState *rumstate, RumScanEntry entry); static bool callConsistentFn(RumState *rumstate, RumScanKey key) { + bool res; /* * If we're dealing with a dummy EVERYTHING key, we don't want to call the * consistentFn; just claim it matches. @@ -62,7 +64,7 @@ callConsistentFn(RumState *rumstate, RumScanKey key) */ key->recheckCurItem = true; - return DatumGetBool(FunctionCall10Coll(&rumstate->consistentFn[key->attnum - 1], + res = DatumGetBool(FunctionCall10Coll(&rumstate->consistentFn[key->attnum - 1], rumstate->supportCollation[key->attnum - 1], PointerGetDatum(key->entryRes), UInt16GetDatum(key->strategy), @@ -75,6 +77,33 @@ callConsistentFn(RumState *rumstate, RumScanKey key) PointerGetDatum(key->addInfo), PointerGetDatum(key->addInfoIsNull) )); + + if (res && key->attnum == rumstate->attrnAddToColumn) + { + int i; + + /* remember some addinfo value for later ordering by addinfo + from another column */ + + key->outerAddInfoIsNull = true; + + for(i=0; inuserentries; i++) + { + if (key->entryRes[i] && key->addInfoIsNull[0] == false) + { + key->outerAddInfoIsNull = false; + /* + * XXX FIXME only pass-by-value!!! + * Value should be copied to long-lived memory context and, + * somehow, freeed. Seems, the last is real problem + */ + key->outerAddInfo = key->addInfo[0]; + break; + } + } + } + + return res; } /* @@ -371,6 +400,7 @@ collectMatchBitmap(RumBtreeData *btree, RumBtreeStack *stack, tbm_add_tuples(scanEntry->matchBitmap, ipd, RumGetNPosting(itup), false); scanEntry->predictNumberResult += RumGetNPosting(itup); + pfree(ipd); } /* @@ -398,6 +428,8 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) entry->curAddInfoIsNull = true; entry->offset = InvalidOffsetNumber; entry->list = NULL; + entry->addInfo = NULL; + entry->addInfoIsNull = NULL; entry->gdi = NULL; entry->nlist = 0; entry->matchBitmap = NULL; @@ -574,15 +606,18 @@ scan_entry_cmp(const void *p1, const void *p2) static void startScan(IndexScanDesc scan) { + MemoryContext oldCtx = CurrentMemoryContext; RumScanOpaque so = (RumScanOpaque) scan->opaque; RumState *rumstate = &so->rumstate; uint32 i; bool useFastScan = false; + MemoryContextSwitchTo(so->keyCtx); for (i = 0; i < so->totalentries; i++) { startScanEntry(rumstate, so->entries[i]); } + MemoryContextSwitchTo(oldCtx); if (RumFuzzySearchLimit > 0) { @@ -2126,6 +2161,22 @@ keyGetOrdering(RumState *rumstate, MemoryContext tempCtx, RumScanKey key, RumScanEntry entry; int i; + if (key->useAddToColumn) + { + Assert(key->nentries == 0); + Assert(key->nuserentries == 0); + + if (key->outerAddInfoIsNull) + return get_float8_infinity(); + + return DatumGetFloat8(FunctionCall3( + &rumstate->outerOrderingFn[rumstate->attrnOrderByColumn - 1], + key->outerAddInfo, + key->queryValues[0], + UInt16GetDatum(key->strategy) + )); + } + for (i = 0; i < key->nentries; i++) { entry = key->scanEntry[i]; @@ -2163,15 +2214,54 @@ static void insertScanItem(RumScanOpaque so, bool recheck) { RumSortItem *item; - int i, j = 0; + int i, j; - item = (RumSortItem *)palloc(RumSortItemSize(so->norderbys)); + item = (RumSortItem *) + MemoryContextAlloc(rum_tuplesort_get_memorycontext(so->sortstate), + RumSortItemSize(so->norderbys)); item->iptr = so->iptr; item->recheck = recheck; + + if (AttributeNumberIsValid(so->rumstate.attrnAddToColumn)) + { + int nOrderByAnother = 0, count = 0; + + for (i = 0; i < so->nkeys; i++) + { + if (so->keys[i].useAddToColumn) { + so->keys[i].outerAddInfoIsNull = true; + nOrderByAnother++; + } + } + + for (i = 0; count < nOrderByAnother && i < so->nkeys; i++) + { + if (so->keys[i].attnum == so->rumstate.attrnAddToColumn && + so->keys[i].outerAddInfoIsNull == false) + { + Assert(!so->keys[i].orderBy); + Assert(!so->keys[i].useAddToColumn); + + for(j = i; j < so->nkeys; j++) + { + if (so->keys[j].useAddToColumn && + so->keys[j].outerAddInfoIsNull == true) + { + so->keys[j].outerAddInfoIsNull = false; + so->keys[j].outerAddInfo = so->keys[i].outerAddInfo; + count++; + } + } + } + } + } + + j = 0; for (i = 0; i < so->nkeys; i++) { if (!so->keys[i].orderBy) continue; + item->data[j] = keyGetOrdering(&so->rumstate, so->tempCtx, &so->keys[i], &so->iptr); j++; } diff --git a/ruminsert.c b/ruminsert.c index 4334dc2cf8..2fbaf0e39a 100644 --- a/ruminsert.c +++ b/ruminsert.c @@ -492,7 +492,9 @@ rumEntryInsert(RumState *rumstate, static void rumHeapTupleBulkInsert(RumBuildState *buildstate, OffsetNumber attnum, Datum value, bool isNull, - ItemPointer heapptr) + ItemPointer heapptr, + Datum outerAddInfo, + bool outerAddInfoIsNull) { Datum *entries; RumNullCategory *categories; @@ -508,6 +510,19 @@ rumHeapTupleBulkInsert(RumBuildState *buildstate, OffsetNumber attnum, value, isNull, &nentries, &categories, &addInfo, &addInfoIsNull); + + if (attnum == buildstate->rumstate.attrnAddToColumn) + { + addInfo = palloc(sizeof(*addInfo) * nentries); + addInfoIsNull = palloc(sizeof(*addInfoIsNull) * nentries); + + for(i=0; irumstate.attrnOrderByColumn)) + { + outerAddInfo = values[buildstate->rumstate.attrnOrderByColumn - 1]; + outerAddInfoIsNull = isnull[buildstate->rumstate.attrnOrderByColumn - 1]; + } oldCtx = MemoryContextSwitchTo(buildstate->tmpCtx); for (i = 0; i < buildstate->rumstate.origTupdesc->natts; i++) rumHeapTupleBulkInsert(buildstate, (OffsetNumber) (i + 1), values[i], isnull[i], - &htup->t_self); + &htup->t_self, + outerAddInfo, outerAddInfoIsNull); /* If we've maxed out our available memory, dump everything to the index */ if (buildstate->accum.allocatedMemory >= maintenance_work_mem * 1024L) @@ -553,9 +577,9 @@ rumBuildCallback(Relation index, HeapTuple htup, Datum *values, while ((list = rumGetBAEntry(&buildstate->accum, &attnum, &key, &category, &nlist)) != NULL) { - ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(ItemPointerData) *nlist); - Datum *addInfo = (Datum *)palloc(sizeof(Datum) * nlist); - bool *addInfoIsNull = (bool *)palloc(sizeof(bool) * nlist); + ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(*iptrs) *nlist); + Datum *addInfo = (Datum *)palloc(sizeof(*addInfo) * nlist); + bool *addInfoIsNull = (bool *)palloc(sizeof(*addInfoIsNull) * nlist); int i; for (i = 0; i < nlist; i++) @@ -570,6 +594,10 @@ rumBuildCallback(Relation index, HeapTuple htup, Datum *values, CHECK_FOR_INTERRUPTS(); rumEntryInsert(&buildstate->rumstate, attnum, key, category, iptrs, addInfo, addInfoIsNull, nlist, &buildstate->buildStats); + + pfree(addInfoIsNull); + pfree(addInfo); + pfree(iptrs); } MemoryContextReset(buildstate->tmpCtx); @@ -732,7 +760,9 @@ rumbuildempty(Relation index) static void rumHeapTupleInsert(RumState *rumstate, OffsetNumber attnum, Datum value, bool isNull, - ItemPointer item) + ItemPointer item, + Datum outerAddInfo, + bool outerAddInfoIsNull) { Datum *entries; RumNullCategory *categories; @@ -744,6 +774,18 @@ rumHeapTupleInsert(RumState *rumstate, OffsetNumber attnum, entries = rumExtractEntries(rumstate, attnum, value, isNull, &nentries, &categories, &addInfo, &addInfoIsNull); + if (attnum == rumstate->attrnAddToColumn) + { + addInfo = palloc(sizeof(*addInfo) * nentries); + addInfoIsNull = palloc(sizeof(*addInfoIsNull) * nentries); + + for(i=0; inatts; i++) rumHeapTupleInsert(&rumstate, (OffsetNumber) (i + 1), values[i], isnull[i], - ht_ctid); + ht_ctid, + outerAddInfo, outerAddInfoIsNull); } MemoryContextSwitchTo(oldCtx); diff --git a/rumscan.c b/rumscan.c index f7ed8f6a9e..28759213b6 100644 --- a/rumscan.c +++ b/rumscan.c @@ -33,11 +33,19 @@ rumbeginscan(Relation rel, int nkeys, int norderbys) so->keys = NULL; so->nkeys = 0; so->firstCall = true; + so->totalentries = 0; + so->sortedEntries = NULL; so->tempCtx = AllocSetContextCreate(CurrentMemoryContext, "Rum scan temporary context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); + so->keyCtx = AllocSetContextCreate(CurrentMemoryContext, + "Gin scan key context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + initRumState(&so->rumstate, scan->indexRelation); scan->opaque = so; @@ -140,17 +148,8 @@ rumFillScanKey(RumScanOpaque so, OffsetNumber attnum, /* Non-default search modes add one "hidden" entry to each key */ if (searchMode != GIN_SEARCH_MODE_DEFAULT) nQueryValues++; - key->nentries = nQueryValues; - key->nuserentries = nUserQueryValues; key->orderBy = orderBy; - key->scanEntry = (RumScanEntry *) palloc(sizeof(RumScanEntry) * nQueryValues); - key->entryRes = (bool *) palloc0(sizeof(bool) * nQueryValues); - key->addInfo = (Datum *) palloc0(sizeof(Datum) * nQueryValues); - key->addInfoIsNull = (bool *) palloc(sizeof(bool) * nQueryValues); - for (i = 0; i < nQueryValues; i++) - key->addInfoIsNull[i] = true; - key->query = query; key->queryValues = queryValues; key->queryCategories = queryCategories; @@ -158,12 +157,44 @@ rumFillScanKey(RumScanOpaque so, OffsetNumber attnum, key->strategy = strategy; key->searchMode = searchMode; key->attnum = attnum; + key->useAddToColumn = false; ItemPointerSetMin(&key->curItem); key->curItemMatches = false; key->recheckCurItem = false; key->isFinished = false; + if (key->orderBy && key->attnum == rumstate->attrnOrderByColumn) + { + if (nQueryValues != 1) + elog(ERROR, "extractQuery should return only one value"); + if (rumstate->canOuterOrdering[attnum - 1] == false) + elog(ERROR,"doesn't support ordering as additional info"); + + key->useAddToColumn = true; + key->attnum = rumstate->attrnAddToColumn; + key->nentries = 0; + key->nuserentries = 0; + + key->outerAddInfoIsNull = true; + + key->scanEntry = NULL; + key->entryRes = NULL; + key->addInfo = NULL; + key->addInfoIsNull = NULL; + + return; + } + + key->nentries = nQueryValues; + key->nuserentries = nUserQueryValues; + key->scanEntry = (RumScanEntry *) palloc(sizeof(RumScanEntry) * nQueryValues); + key->entryRes = (bool *) palloc0(sizeof(bool) * nQueryValues); + key->addInfo = (Datum *) palloc0(sizeof(Datum) * nQueryValues); + key->addInfoIsNull = (bool *) palloc(sizeof(bool) * nQueryValues); + for (i = 0; i < nQueryValues; i++) + key->addInfoIsNull[i] = true; + for (i = 0; i < nQueryValues; i++) { Datum queryKey; @@ -226,21 +257,6 @@ freeScanKeys(RumScanOpaque so) { uint32 i; - if (so->keys == NULL) - return; - - for (i = 0; i < so->nkeys; i++) - { - RumScanKey key = so->keys + i; - - pfree(key->scanEntry); - pfree(key->entryRes); - } - - pfree(so->keys); - so->keys = NULL; - so->nkeys = 0; - for (i = 0; i < so->totalentries; i++) { RumScanEntry entry = so->entries[i]; @@ -257,6 +273,10 @@ freeScanKeys(RumScanOpaque so) } if (entry->list) pfree(entry->list); + if (entry->addInfo) + pfree(entry->addInfo); + if (entry->addInfoIsNull) + pfree(entry->addInfoIsNull); if (entry->matchIterator) tbm_end_iterate(entry->matchIterator); if (entry->matchBitmap) @@ -264,8 +284,14 @@ freeScanKeys(RumScanOpaque so) pfree(entry); } - pfree(so->entries); + MemoryContextReset(so->keyCtx); + so->keys = NULL; + so->nkeys = 0; + + if (so->sortedEntries) + pfree(so->sortedEntries); so->entries = NULL; + so->sortedEntries = NULL; so->totalentries = 0; } @@ -275,14 +301,14 @@ initScanKey(RumScanOpaque so, ScanKey skey, bool *hasNullQuery) Datum *queryValues; int32 nQueryValues = 0; bool *partial_matches = NULL; - Pointer *extra_data = NULL; + Pointer *extra_data = NULL; bool *nullFlags = NULL; int32 searchMode = GIN_SEARCH_MODE_DEFAULT; /* - * We assume that RUM-indexable operators are strict, so a null query - * argument means an unsatisfiable query. - */ + * We assume that RUM-indexable operators are strict, so a null query + * argument means an unsatisfiable query. + */ if (skey->sk_flags & SK_ISNULL) { so->isVoidRes = true; @@ -365,6 +391,14 @@ rumNewScanKey(IndexScanDesc scan) RumScanOpaque so = (RumScanOpaque) scan->opaque; int i; bool hasNullQuery = false; + MemoryContext oldCtx; + + /* + * Allocate all the scan key information in the key context. (If + * extractQuery leaks anything there, it won't be reset until the end of + * scan or rescan, but that's OK.) + */ + oldCtx = MemoryContextSwitchTo(so->keyCtx); /* if no scan keys provided, allocate extra EVERYTHING RumScanKey */ so->keys = (RumScanKey) @@ -377,6 +411,7 @@ rumNewScanKey(IndexScanDesc scan) so->allocentries = 32; so->entries = (RumScanEntry *) palloc0(so->allocentries * sizeof(RumScanEntry)); + so->sortedEntries = NULL; so->isVoidRes = false; @@ -415,23 +450,7 @@ rumNewScanKey(IndexScanDesc scan) NULL, NULL, NULL, NULL, false); } - /* - * If the index is version 0, it may be missing null and placeholder - * entries, which would render searches for nulls and full-index scans - * unreliable. Throw an error if so. - */ - if (hasNullQuery && !so->isVoidRes) - { - GinStatsData rumStats; - - rumGetStats(scan->indexRelation, &rumStats); - if (rumStats.ginVersion < 1) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("old RUM indexes do not support whole-index scans nor searches for nulls"), - errhint("To fix this, do REINDEX INDEX \"%s\".", - RelationGetRelationName(scan->indexRelation)))); - } + MemoryContextSwitchTo(oldCtx); pgstat_count_index_scan(scan->indexRelation); } @@ -454,6 +473,12 @@ rumrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, memmove(scan->orderByData, orderbys, scan->numberOfOrderBys * sizeof(ScanKeyData)); } + + if (so->sortstate) + { + rum_tuplesort_end(so->sortstate); + so->sortstate = NULL; + } } void @@ -467,6 +492,7 @@ rumendscan(IndexScanDesc scan) rum_tuplesort_end(so->sortstate); MemoryContextDelete(so->tempCtx); + MemoryContextDelete(so->keyCtx); pfree(so); } diff --git a/rumsort.c b/rumsort.c index 92bdadcbf5..3a7298398c 100644 --- a/rumsort.c +++ b/rumsort.c @@ -911,6 +911,12 @@ rum_tuplesort_begin_common(int workMem, bool randomAccess) return state; } +MemoryContext +rum_tuplesort_get_memorycontext(Tuplesortstate *state) +{ + return state->sortcontext; +} + Tuplesortstate * rum_tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber *attNums, diff --git a/rumsort.h b/rumsort.h index f93c81be84..95afa82251 100644 --- a/rumsort.h +++ b/rumsort.h @@ -67,6 +67,7 @@ typedef struct #define RumSortItemSize(nKeys) (offsetof(RumSortItem,data)+(nKeys)*sizeof(float8)) +extern MemoryContext rum_tuplesort_get_memorycontext(Tuplesortstate *state); extern Tuplesortstate *rum_tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, diff --git a/rumtsquery.c b/rumtsquery.c new file mode 100644 index 0000000000..9ecb01598c --- /dev/null +++ b/rumtsquery.c @@ -0,0 +1,600 @@ +/*------------------------------------------------------------------------- + * + * rumtsquery.c + * Inverted fulltext search: indexing tsqueries. + * + * Portions Copyright (c) 2015-2016, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/pg_type.h" +#include "tsearch/ts_type.h" +#include "tsearch/ts_utils.h" +#include "utils/array.h" +#include "utils/builtins.h" +#include "utils/bytea.h" + +#include "rum.h" + +typedef struct QueryItemWrap +{ + QueryItemType type; + int8 oper; + bool not; + int operandsCount, + operandsAllocated; + struct QueryItemWrap *operands; + struct QueryItemWrap *parent; + int distance, + length; + int sum; + int num; +} QueryItemWrap; + +static QueryItemWrap * +add_child(QueryItemWrap *parent) +{ + QueryItemWrap *result; + if (!parent) + { + result = (QueryItemWrap *) palloc0(sizeof(QueryItemWrap)); + } + else + { + parent->operandsCount++; + while (parent->operandsCount > parent->operandsAllocated) + { + if (parent->operandsAllocated > 0) + { + parent->operandsAllocated *= 2; + parent->operands = (QueryItemWrap *) repalloc(parent->operands, parent->operandsAllocated * sizeof(*parent->operands)); + } + else + { + parent->operandsAllocated = 4; + parent->operands = (QueryItemWrap *) palloc(parent->operandsAllocated * sizeof(*parent->operands)); + } + } + result = &parent->operands[parent->operandsCount - 1]; + memset(result, 0, sizeof(*result)); + result->parent = parent; + } + return result; +} + +static QueryItemWrap * +make_query_item_wrap(QueryItem *item, QueryItemWrap *parent, bool not) +{ + if (item->type == QI_VAL) + { + QueryOperand *operand = (QueryOperand *) item; + QueryItemWrap *wrap = add_child(parent); + + if (operand->prefix) + elog(ERROR, "Indexing of prefix tsqueries isn't supported yet"); + + wrap->type = QI_VAL; + wrap->distance = operand->distance; + wrap->length = operand->length; + wrap->not = not; + return wrap; + } + + switch (item->qoperator.oper) + { + case OP_NOT: + return make_query_item_wrap(item + 1, parent, !not); + + case OP_AND: + case OP_OR: + { + uint8 oper = item->qoperator.oper; + if (not) + oper = (oper == OP_AND) ? OP_OR : OP_AND; + + if (!parent || oper != parent->oper) + { + QueryItemWrap *wrap = add_child(parent); + + wrap->type = QI_OPR; + wrap->oper = oper; + + make_query_item_wrap(item + item->qoperator.left, wrap, not); + make_query_item_wrap(item + 1, wrap, not); + return wrap; + } + else + { + make_query_item_wrap(item + item->qoperator.left, parent, not); + make_query_item_wrap(item + 1, parent, not); + return NULL; + } + } + case OP_PHRASE: + elog(ERROR, "Indexing of phrase tsqueries isn't supported yet"); + default: + elog(ERROR, "Invalid tsquery operator"); + } + + /* not reachable, but keep compiler quiet */ + return NULL; +} + +static int +calc_wraps(QueryItemWrap *wrap, int *num) +{ + int i, notCount = 0, result; + + for (i = 0; i < wrap->operandsCount; i++) + { + if (wrap->operands[i].not) + notCount++; + } + + if (wrap->type == QI_OPR) + { + wrap->num = (*num)++; + if (wrap->oper == OP_AND) + wrap->sum = notCount + 1 - wrap->operandsCount; + if (wrap->oper == OP_OR) + wrap->sum = notCount; + } + else if (wrap->type == QI_VAL) + { + return 1; + } + + result = 0; + for (i = 0; i < wrap->operandsCount; i++) + result += calc_wraps(&wrap->operands[i], num); + return result; +} + +static bool +check_allnegative(QueryItemWrap *wrap) +{ + if (wrap->type == QI_VAL) + { + return wrap->not; + } + else if (wrap->oper == OP_AND) + { + int i; + for (i = 0; i < wrap->operandsCount; i++) + { + if (!check_allnegative(&wrap->operands[i])) + return false; + } + return true; + } + else if (wrap->oper == OP_OR) + { + int i; + for (i = 0; i < wrap->operandsCount; i++) + { + if (check_allnegative(&wrap->operands[i])) + return true; + } + return false; + } + else + { + elog(ERROR, "check_allnegative: invalid node"); + return false; + } + +} + +#define MAX_ENCODED_LEN 5 + +/* + * Varbyte-encode 'val' into *ptr. *ptr is incremented to next integer. + */ +static void +encode_varbyte(uint32 val, unsigned char **ptr) +{ + unsigned char *p = *ptr; + + while (val > 0x7F) + { + *(p++) = 0x80 | (val & 0x7F); + val >>= 7; + } + *(p++) = (unsigned char) val; + + *ptr = p; +} + +/* + * Decode varbyte-encoded integer at *ptr. *ptr is incremented to next integer. + */ +static uint32 +decode_varbyte(unsigned char **ptr) +{ + uint32 val; + unsigned char *p = *ptr; + uint32 c; + + c = *(p++); + val = c & 0x7F; + if (c & 0x80) + { + c = *(p++); + val |= (c & 0x7F) << 7; + if (c & 0x80) + { + c = *(p++); + val |= (c & 0x7F) << 14; + if (c & 0x80) + { + c = *(p++); + val |= (c & 0x7F) << 21; + if (c & 0x80) + { + c = *(p++); + val |= (c & 0x7F) << 28; + } + } + } + } + + *ptr = p; + + return val; +} + +typedef struct +{ + Datum *addInfo; + bool *addInfoIsNull; + Datum *entries; + int index; + char *operand; +} ExtractContext; + +static void +extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) +{ + if (wrap->type == QI_VAL) + { + bytea *addinfo; + unsigned char *ptr; + int index = context->index; + + + for (index = 0; index < context->index; index++) + { + text *entry; + entry = DatumGetByteaP(context->entries[index]); + if (VARSIZE_ANY_EXHDR(entry) == wrap->length && + !memcmp(context->operand + wrap->distance, VARDATA_ANY(entry), wrap->length)) + break; + } + + if (index >= context->index) + { + index = context->index; + addinfo = (bytea *) palloc(VARHDRSZ + 2 * Max(level, 1) * MAX_ENCODED_LEN); + ptr = (unsigned char *) VARDATA(addinfo); + context->entries[index] = PointerGetDatum(cstring_to_text_with_len(context->operand + wrap->distance, wrap->length)); + context->addInfo[index] = PointerGetDatum(addinfo); + context->addInfoIsNull[index] = false; + context->index++; + /*ptrEnd = (unsigned char *) VARDATA(addinfo) + VARHDRSZ + 2 * Max(level, 1) * MAX_ENCODED_LEN;*/ + } + else + { + addinfo = DatumGetByteaP(context->addInfo[index]); + addinfo = (bytea *) repalloc(addinfo, + VARSIZE(addinfo) + 2 * Max(level, 1) * MAX_ENCODED_LEN); + context->addInfo[index] = PointerGetDatum(addinfo); + ptr = (unsigned char *) VARDATA(addinfo) + VARSIZE_ANY_EXHDR(addinfo); + /*ptrEnd = (unsigned char *) VARDATA(addinfo) + VARSIZE_ANY_EXHDR(addinfo) + 2 * Max(level, 1) * MAX_ENCODED_LEN;*/ + } + /*elog(NOTICE, "%s", text_to_cstring(DatumGetTextP(context->entries[index])));*/ + + while (wrap->parent) + { + QueryItemWrap *parent = wrap->parent; + uint32 sum; + /*elog(NOTICE, "%d %d %d", parent->num, parent->sum, wrap->not);*/ + encode_varbyte((uint32) parent->num, &ptr); + sum = (uint32)abs(parent->sum); + sum <<= 2; + if (parent->sum < 0) + sum |= 2; + if (wrap->not) + sum |= 1; + encode_varbyte(sum, &ptr); + wrap = parent; + } + if (level == 0 && wrap->not) + { + encode_varbyte(1, &ptr); + encode_varbyte(4 | 1, &ptr); + } + /*Assert(ptr <= ptrEnd);*/ + SET_VARSIZE(addinfo, ptr - (unsigned char *)addinfo); + /*elog(NOTICE, "%s", DatumGetPointer(DirectFunctionCall1(byteaout, PointerGetDatum(addinfo))));*/ + } + else if (wrap->type == QI_OPR) + { + int i; + for (i = 0; i < wrap->operandsCount; i++) + extract_wraps(&wrap->operands[i], context, level + 1); + } +} + +/*PG_FUNCTION_INFO_V1(rum_process_tsquery); +Datum +rum_process_tsquery(PG_FUNCTION_ARGS) +{ + TSQuery query = PG_GETARG_TSQUERY(0); + QueryItem *item = GETQUERY(query); + QueryItemWrap *wrap = make_query_item_wrap(item, NULL, false); + int num = 1; + + calc_wraps(wrap, &num); + print_wraps(wrap, , 0); + + PG_RETURN_VOID(); +}*/ + +PG_FUNCTION_INFO_V1(ruminv_extract_tsquery); +Datum +ruminv_extract_tsquery(PG_FUNCTION_ARGS) +{ + TSQuery query = PG_GETARG_TSQUERY(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + bool **nullFlags = (bool **) PG_GETARG_POINTER(2); + Datum **addInfo = (Datum **) PG_GETARG_POINTER(3); + bool **addInfoIsNull = (bool **) PG_GETARG_POINTER(4); + Datum *entries = NULL; + QueryItem *item = GETQUERY(query); + QueryItemWrap *wrap; + ExtractContext context; + int num = 1, + count; + bool extractNull; + + wrap = make_query_item_wrap(item, NULL, false); + count = calc_wraps(wrap, &num); + extractNull = check_allnegative(wrap); + if (extractNull) + count++; + + entries = (Datum *) palloc(sizeof(Datum) * count); + *addInfo = (Datum *) palloc(sizeof(Datum) * count); + *addInfoIsNull = (bool *) palloc(sizeof(bool) * count); + + context.addInfo = *addInfo; + context.addInfoIsNull = *addInfoIsNull; + context.entries = entries; + context.operand = GETOPERAND(query); + context.index = 0; + + extract_wraps(wrap, &context, 0); + + count = context.index; + if (extractNull) + { + int i; + + count++; + *nullFlags = (bool *) palloc(sizeof(bool) * count); + for (i = 0; i < count - 1; i++) + (*nullFlags)[i] = false; + (*nullFlags)[count - 1] = true; + (*addInfoIsNull)[count - 1] = true; + } + *nentries = count; + +/* elog(NOTICE, "%d", *nentries); + for (i = 0; i < *nentries; i++) + { + elog(NOTICE, "%s", text_to_cstring(DatumGetPointer((entries)[i]))); + }*/ + + PG_FREE_IF_COPY(query, 0); + PG_RETURN_POINTER(entries); +} + +PG_FUNCTION_INFO_V1(ruminv_extract_tsvector); +Datum +ruminv_extract_tsvector(PG_FUNCTION_ARGS) +{ + TSVector vector = PG_GETARG_TSVECTOR(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + + /* StrategyNumber strategy = PG_GETARG_UINT16(2); */ + bool **ptr_partialmatch = (bool **) PG_GETARG_POINTER(3); + Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4); + + bool **nullFlags = (bool **) PG_GETARG_POINTER(5); + int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); + Datum *entries = NULL; + + *searchMode = GIN_SEARCH_MODE_DEFAULT; + + if (vector->size > 0) + { + int i; + WordEntry *we = ARRPTR(vector); + + *nentries = vector->size + 1; + *extra_data = NULL; + *ptr_partialmatch = NULL; + + entries = (Datum *) palloc(sizeof(Datum) * (*nentries)); + *nullFlags = (bool *) palloc(sizeof(bool) * (*nentries)); + + for (i = 0; i < vector->size; i++) + { + text *txt; + + txt = cstring_to_text_with_len(STRPTR(vector) + we[i].pos, we[i].len); + entries[i] = PointerGetDatum(txt); + (*nullFlags)[i] = false; + } + (*nullFlags)[*nentries - 1] = true; + } + else + { + *nentries = 0; + } + PG_FREE_IF_COPY(vector, 0); + PG_RETURN_POINTER(entries); +} + +typedef struct +{ + int sum; + int parent; + bool not; +} TmpNode; + +PG_FUNCTION_INFO_V1(ruminv_tsvector_consistent); +Datum +ruminv_tsvector_consistent(PG_FUNCTION_ARGS) +{ + bool *check = (bool *) PG_GETARG_POINTER(0); + /* StrategyNumber strategy = PG_GETARG_UINT16(1); */ + /* TSVector vector = PG_GETARG_TSVECTOR(2); */ + int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ + bool *recheck = (bool *) PG_GETARG_POINTER(5); + Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); + bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); + bool res = false, + allFalse = true; + int i, + lastIndex = 0; + TmpNode nodes[256]; + + *recheck = false; + + for (i = 0; i < nkeys - 1; i++) + { + unsigned char *ptr, + *ptrEnd; + int size; + TmpNode *child = NULL; + + if (!check[i]) + continue; + + allFalse = false; + + if (addInfoIsNull[i]) + elog(ERROR, "Unexpected addInfoIsNull"); + + ptr = (unsigned char *)VARDATA_ANY(DatumGetPointer(addInfo[i])); + size = VARSIZE_ANY_EXHDR(DatumGetPointer(addInfo[i])); + + /*elog(NOTICE, "%d %s", i, DatumGetPointer(DirectFunctionCall1(byteaout, addInfo[i])));*/ + + if (size == 0) + { + res = true; + break; + } + + ptrEnd = ptr + size; + while (ptr < ptrEnd) + { + uint32 num = decode_varbyte(&ptr), + sumVal = decode_varbyte(&ptr); + int sum, index; + bool not; + + not = (sumVal & 1) ? true : false; + sum = sumVal >> 2; + sum = (sumVal & 2) ? (-sum) : (sum); + + index = num - 1; + + /*elog(NOTICE, "a %d %d %d %d", i, index, sum, not);*/ + + if (child) + { + child->parent = index; + child->not = not; + } + + while (num > lastIndex) + { + nodes[lastIndex].parent = -2; + lastIndex++; + } + + if (nodes[index].parent == -2) + { + nodes[index].sum = sum; + nodes[index].parent = -1; + nodes[index].not = false; + } + if (!child) + { + if (not) + nodes[index].sum--; + else + nodes[index].sum++; + } + + if (index == 0) + child = NULL; + else + child = &nodes[index]; + } + } + + if (allFalse && check[nkeys - 1]) + { + res = true; + } + else + { + /*for (i = 0; i < lastIndex; i++) + { + elog(NOTICE, "s %d %d %d %d", i, nodes[i].sum, nodes[i].parent, nodes[i].not); + }*/ + + for (i = lastIndex - 1; i >= 0; i--) + { + if (nodes[i].parent != -2) + { + if (nodes[i].sum > 0) + { + if (nodes[i].parent == -1) + { + res = true; + break; + } + else + { + int parent = nodes[i].parent; + nodes[parent].sum += nodes[i].not ? -1 : 1; + } + } + } + } + } + +/* elog(NOTICE, "%d", res);*/ + + PG_RETURN_BOOL(res); +} + +PG_FUNCTION_INFO_V1(ruminv_tsquery_config); +Datum +ruminv_tsquery_config(PG_FUNCTION_ARGS) +{ + RumConfig *config = (RumConfig *)PG_GETARG_POINTER(0); + config->addInfoTypeOid = BYTEAOID; + PG_RETURN_VOID(); +} + diff --git a/rumutil.c b/rumutil.c index 105aa0acd9..43c99594f3 100644 --- a/rumutil.c +++ b/rumutil.c @@ -20,6 +20,7 @@ #include "storage/lmgr.h" #include "utils/guc.h" #include "utils/index_selfuncs.h" +#include "utils/lsyscache.h" #include "rum.h" @@ -29,6 +30,8 @@ void _PG_init(void); PG_FUNCTION_INFO_V1(rumhandler); +/* Kind of relation optioms for rum index */ +static relopt_kind rum_relopt_kind; /* * Module load callback */ @@ -43,6 +46,18 @@ _PG_init(void) 0, 0, INT_MAX, PGC_USERSET, 0, NULL, NULL, NULL); + + rum_relopt_kind = add_reloption_kind(); + + add_string_reloption(rum_relopt_kind, "orderby", + "Column name to order by operation", + NULL, NULL); + add_string_reloption(rum_relopt_kind, "addto", + "Column name to add a order by column", + NULL, NULL); + add_bool_reloption(rum_relopt_kind, "use_alternative_order", + "Use (addinfo, itempointer) order instead of just itempointer", + false); } /* @@ -55,7 +70,7 @@ rumhandler(PG_FUNCTION_ARGS) IndexAmRoutine *amroutine = makeNode(IndexAmRoutine); amroutine->amstrategies = 0; - amroutine->amsupport = 9; + amroutine->amsupport = RUMNProcs; amroutine->amcanorder = false; amroutine->amcanorderbyop = true; amroutine->amcanbackward = false; @@ -106,6 +121,58 @@ initRumState(RumState *state, Relation index) state->oneCol = (origTupdesc->natts == 1) ? true : false; state->origTupdesc = origTupdesc; + state->attrnOrderByColumn = InvalidAttrNumber; + state->attrnAddToColumn = InvalidAttrNumber; + if (index->rd_options) + { + RumOptions *options = (RumOptions*) index->rd_options; + + if (options->orderByColumn > 0) + { + char *colname = (char *) options + options->orderByColumn; + AttrNumber attrnOrderByHeapColumn; + + attrnOrderByHeapColumn = get_attnum(index->rd_index->indrelid, colname); + + if (!AttributeNumberIsValid(attrnOrderByHeapColumn)) + elog(ERROR, "attribute \"%s\" is not found in table", colname); + + state->attrnOrderByColumn = get_attnum(index->rd_id, colname); + + if (!AttributeNumberIsValid(state->attrnOrderByColumn)) + elog(ERROR, "attribute \"%s\" is not found in index", colname); + } + + if (options->addToColumn > 0) + { + char *colname = (char *) options + options->addToColumn; + AttrNumber attrnAddToHeapColumn; + + attrnAddToHeapColumn = get_attnum(index->rd_index->indrelid, colname); + + if (!AttributeNumberIsValid(attrnAddToHeapColumn)) + elog(ERROR, "attribute \"%s\" is not found in table", colname); + + state->attrnAddToColumn = get_attnum(index->rd_id, colname); + + if (!AttributeNumberIsValid(state->attrnAddToColumn)) + elog(ERROR, "attribute \"%s\" is not found in index", colname); + } + + if (!(AttributeNumberIsValid(state->attrnOrderByColumn) && + AttributeNumberIsValid(state->attrnAddToColumn))) + elog(ERROR, "AddTo and OrderBy columns should be defined both"); + + if (options->useAlternativeOrder) + { + if (!(AttributeNumberIsValid(state->attrnOrderByColumn) && + AttributeNumberIsValid(state->attrnAddToColumn))) + elog(ERROR, "to use alternative ordering AddTo and OrderBy should be defined"); + + state->useAlternativeOrder = true; + } + } + for (i = 0; i < origTupdesc->natts; i++) { RumConfig rumConfig; @@ -120,7 +187,19 @@ initRumState(RumState *state, Relation index) FunctionCall1(&state->configFn[i], PointerGetDatum(&rumConfig)); } - state->addInfoTypeOid[i] = rumConfig.addInfoTypeOid; + + if (state->attrnAddToColumn == i+1) + { + if (OidIsValid(rumConfig.addInfoTypeOid)) + elog(ERROR, "AddTo could should not have AddInfo"); + + state->addInfoTypeOid[i] = origTupdesc->attrs[ + state->attrnOrderByColumn - 1]->atttypid; + } + else + { + state->addInfoTypeOid[i] = rumConfig.addInfoTypeOid; + } if (state->oneCol) { @@ -225,6 +304,18 @@ initRumState(RumState *state, Relation index) state->canOrdering[i] = false; } + if (index_getprocid(index, i + 1, RUM_OUTER_ORDERING_PROC) != InvalidOid) + { + fmgr_info_copy(&(state->outerOrderingFn[i]), + index_getprocinfo(index, i + 1, RUM_OUTER_ORDERING_PROC), + CurrentMemoryContext); + state->canOuterOrdering[i] = true; + } + else + { + state->canOuterOrdering[i] = false; + } + /* * If the index column has a specified collation, we should honor that * while doing comparisons. However, we may have a collatable storage @@ -242,6 +333,13 @@ initRumState(RumState *state, Relation index) else state->supportCollation[i] = DEFAULT_COLLATION_OID; } + + if (AttributeNumberIsValid(state->attrnOrderByColumn)) + { + /* Follow FIXME comment(s) to understand */ + if (origTupdesc->attrs[state->attrnOrderByColumn - 1]->attbyval == false) + elog(ERROR, "currently, RUM doesn't support order by over pass-by-reference column"); + } } /* @@ -685,10 +783,13 @@ rumoptions(Datum reloptions, bool validate) RumOptions *rdopts; int numoptions; static const relopt_parse_elt tab[] = { - {"fastupdate", RELOPT_TYPE_BOOL, offsetof(RumOptions, useFastUpdate)} + {"fastupdate", RELOPT_TYPE_BOOL, offsetof(RumOptions, useFastUpdate)}, + {"orderby", RELOPT_TYPE_STRING, offsetof(RumOptions, orderByColumn)}, + {"addto", RELOPT_TYPE_STRING, offsetof(RumOptions, addToColumn)}, + {"use_alternative_order", RELOPT_TYPE_BOOL, offsetof(RumOptions, useAlternativeOrder)} }; - options = parseRelOptions(reloptions, validate, RELOPT_KIND_GIN, + options = parseRelOptions(reloptions, validate, rum_relopt_kind, &numoptions); /* if none set, we're done */ @@ -730,6 +831,9 @@ rumGetStats(Relation index, GinStatsData *stats) stats->nEntries = metadata->nEntries; stats->ginVersion = metadata->rumVersion; + if (stats->ginVersion != RUM_CURRENT_VERSION) + elog(ERROR, "unexpected RUM index version. Reindex"); + UnlockReleaseBuffer(metabuffer); } diff --git a/rumvalidate.c b/rumvalidate.c index bf608a5723..befc8f694b 100644 --- a/rumvalidate.c +++ b/rumvalidate.c @@ -135,13 +135,6 @@ rumvalidate(Oid opclassoid) 4, 4, opckeytype, opckeytype, INT2OID, INTERNALOID); break; - case GIN_TRICONSISTENT_PROC: - ok = check_amproc_signature(procform->amproc, CHAROID, false, - 7, 7, INTERNALOID, INT2OID, - opcintype, INT4OID, - INTERNALOID, INTERNALOID, - INTERNALOID); - break; case RUM_CONFIG_PROC: ok = check_amproc_signature(procform->amproc, VOIDOID, false, 1, 1, INTERNALOID); @@ -161,6 +154,11 @@ rumvalidate(Oid opclassoid) INTERNALOID, INTERNALOID, INTERNALOID); break; + case RUM_OUTER_ORDERING_PROC: + ok = check_amproc_signature(procform->amproc, FLOAT8OID, false, + 3, 3, + opcintype, opcintype, INT2OID); + break; default: ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), @@ -258,8 +256,8 @@ rumvalidate(Oid opclassoid) continue; /* got it */ if (i == GIN_COMPARE_PARTIAL_PROC) continue; /* optional method */ - if (i == GIN_CONSISTENT_PROC || i == GIN_TRICONSISTENT_PROC) - continue; /* don't need both, see check below loop */ + if (i == GIN_CONSISTENT_PROC) + continue; if (i == RUM_PRE_CONSISTENT_PROC) continue; ereport(INFO, @@ -269,18 +267,15 @@ rumvalidate(Oid opclassoid) result = false; } if (!opclassgroup || - ((opclassgroup->functionset & (1 << GIN_CONSISTENT_PROC)) == 0 && - (opclassgroup->functionset & (1 << GIN_TRICONSISTENT_PROC)) == 0)) + (opclassgroup->functionset & (1 << GIN_CONSISTENT_PROC)) == 0) { ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("rum opclass %s is missing support function %d or %d", - opclassname, - GIN_CONSISTENT_PROC, GIN_TRICONSISTENT_PROC))); + errmsg("rum opclass %s is missing support function %d", + opclassname, GIN_CONSISTENT_PROC))); result = false; } - ReleaseCatCacheList(proclist); ReleaseCatCacheList(oprlist); ReleaseSysCache(familytup); diff --git a/sql/orderby.sql b/sql/orderby.sql new file mode 100644 index 0000000000..cf4d90e3d4 --- /dev/null +++ b/sql/orderby.sql @@ -0,0 +1,56 @@ +CREATE TABLE tsts (id int, t tsvector, d timestamp); + +\copy tsts from 'data/tsts.data' + +CREATE INDEX tsts_idx ON tsts USING rum (t rum_tsvector_timestamp_ops, d) + WITH (orderby = 'd', addto = 't'); + + +INSERT INTO tsts VALUES (-1, 't1 t2', '2016-05-02 02:24:22.326724'); +INSERT INTO tsts VALUES (-2, 't1 t2 t3', '2016-05-02 02:26:22.326724'); + + +SELECT count(*) FROM tsts WHERE t @@ 'wr|qh'; +SELECT count(*) FROM tsts WHERE t @@ 'wr&qh'; +SELECT count(*) FROM tsts WHERE t @@ 'eq&yt'; +SELECT count(*) FROM tsts WHERE t @@ 'eq|yt'; +SELECT count(*) FROM tsts WHERE t @@ '(eq&yt)|(wr&qh)'; +SELECT count(*) FROM tsts WHERE t @@ '(eq|yt)&(wr|qh)'; + +SET enable_indexscan=OFF; +SET enable_indexonlyscan=OFF; +SET enable_bitmapscan=OFF; +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; +SELECT id, d, d <-| '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-| '2016-05-16 14:21:25' LIMIT 5; +SELECT id, d, d |-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d |-> '2016-05-16 14:21:25' LIMIT 5; + + +RESET enable_indexscan; +RESET enable_indexonlyscan; +RESET enable_bitmapscan; +SET enable_seqscan = off; + +EXPLAIN (costs off) +SELECT count(*) FROM tsts WHERE t @@ 'wr|qh'; +SELECT count(*) FROM tsts WHERE t @@ 'wr|qh'; +SELECT count(*) FROM tsts WHERE t @@ 'wr&qh'; +SELECT count(*) FROM tsts WHERE t @@ 'eq&yt'; +SELECT count(*) FROM tsts WHERE t @@ 'eq|yt'; +SELECT count(*) FROM tsts WHERE t @@ '(eq&yt)|(wr&qh)'; +SELECT count(*) FROM tsts WHERE t @@ '(eq|yt)&(wr|qh)'; + +EXPLAIN (costs off) +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; +EXPLAIN (costs off) +SELECT id, d, d <-| '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-| '2016-05-16 14:21:25' LIMIT 5; +SELECT id, d, d <-| '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d <-| '2016-05-16 14:21:25' LIMIT 5; +EXPLAIN (costs off) +SELECT id, d, d |-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d |-> '2016-05-16 14:21:25' LIMIT 5; +SELECT id, d, d |-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d |-> '2016-05-16 14:21:25' LIMIT 5; + +--to be fixed +--EXPLAIN (costs off) +--SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; +--SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; + diff --git a/sql/rum.sql b/sql/rum.sql index 93eb42f282..a80a133465 100644 --- a/sql/rum.sql +++ b/sql/rum.sql @@ -16,7 +16,7 @@ explain (costs off) SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever|wrote'); explain (costs off) SELECT * FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'ever|wrote') -ORDER BY a >< to_tsquery('pg_catalog.english', 'ever|wrote'); +ORDER BY a <-> to_tsquery('pg_catalog.english', 'ever|wrote'); explain (costs off) SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'def <-> fgr'); @@ -34,11 +34,11 @@ SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way')), * FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'way') - ORDER BY a >< to_tsquery('pg_catalog.english', 'way'); + ORDER BY a <-> to_tsquery('pg_catalog.english', 'way'); SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way & (go | half)')), * FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'way & (go | half)') - ORDER BY a >< to_tsquery('pg_catalog.english', 'way & (go | half)'); + ORDER BY a <-> to_tsquery('pg_catalog.english', 'way & (go | half)'); INSERT INTO test_rum (t) VALUES ('foo bar foo the over foo qq bar'); INSERT INTO test_rum (t) VALUES ('345 qwerty copyright'); diff --git a/sql/ruminv.sql b/sql/ruminv.sql new file mode 100644 index 0000000000..ec836fd165 --- /dev/null +++ b/sql/ruminv.sql @@ -0,0 +1,48 @@ +CREATE TABLE test_invrum(q tsquery); + +INSERT INTO test_invrum VALUES ('a|b'::tsquery); +INSERT INTO test_invrum VALUES ('a&b'::tsquery); +INSERT INTO test_invrum VALUES ('!(a|b)'::tsquery); +INSERT INTO test_invrum VALUES ('!(a&b)'::tsquery); +INSERT INTO test_invrum VALUES ('!a|b'::tsquery); +INSERT INTO test_invrum VALUES ('a&!b'::tsquery); +INSERT INTO test_invrum VALUES ('(a|b)&c'::tsquery); +INSERT INTO test_invrum VALUES ('(!(a|b))&c'::tsquery); +INSERT INTO test_invrum VALUES ('(a|b)&(c|d)'::tsquery); +INSERT INTO test_invrum VALUES ('!a'::tsquery); + +SELECT * FROM test_invrum WHERE q @@ ''::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'd'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'c d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; + +CREATE INDEX test_invrum_idx ON test_invrum USING rum(q); +SET enable_seqscan = OFF; + +SELECT * FROM test_invrum WHERE q @@ ''::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b c'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'd'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'b d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a b d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'c d'::tsvector; +SELECT * FROM test_invrum WHERE q @@ 'a c d'::tsvector; + +INSERT INTO test_invrum VALUES ('a:*'::tsquery); +INSERT INTO test_invrum VALUES ('a <-> b'::tsquery); diff --git a/sql/timestamp.sql b/sql/timestamp.sql new file mode 100644 index 0000000000..c521d14c39 --- /dev/null +++ b/sql/timestamp.sql @@ -0,0 +1,41 @@ + +CREATE TABLE test_timestamp ( + i timestamp +); + +INSERT INTO test_timestamp VALUES + ( '2004-10-26 03:55:08' ), + ( '2004-10-26 04:55:08' ), + ( '2004-10-26 05:55:08' ), + ( '2004-10-26 08:55:08' ), + ( '2004-10-26 09:55:08' ), + ( '2004-10-26 10:55:08' ) +; + +SELECT i <-> '2004-10-26 06:24:08', i FROM test_timestamp ORDER BY 1, 2 ASC; +SELECT i <-| '2004-10-26 06:24:08', i FROM test_timestamp ORDER BY 1, 2 ASC; +SELECT i |-> '2004-10-26 06:24:08', i FROM test_timestamp ORDER BY 1, 2 ASC; + +CREATE INDEX idx_timestamp ON test_timestamp USING rum (i); + +set enable_seqscan=off; + +explain (costs off) +SELECT * FROM test_timestamp WHERE i<'2004-10-26 08:55:08'::timestamp ORDER BY i; +SELECT * FROM test_timestamp WHERE i<'2004-10-26 08:55:08'::timestamp ORDER BY i; + +explain (costs off) +SELECT * FROM test_timestamp WHERE i<='2004-10-26 08:55:08'::timestamp ORDER BY i; +SELECT * FROM test_timestamp WHERE i<='2004-10-26 08:55:08'::timestamp ORDER BY i; + +explain (costs off) +SELECT * FROM test_timestamp WHERE i='2004-10-26 08:55:08'::timestamp ORDER BY i; +SELECT * FROM test_timestamp WHERE i='2004-10-26 08:55:08'::timestamp ORDER BY i; + +explain (costs off) +SELECT * FROM test_timestamp WHERE i>='2004-10-26 08:55:08'::timestamp ORDER BY i; +SELECT * FROM test_timestamp WHERE i>='2004-10-26 08:55:08'::timestamp ORDER BY i; + +explain (costs off) +SELECT * FROM test_timestamp WHERE i>'2004-10-26 08:55:08'::timestamp ORDER BY i; +SELECT * FROM test_timestamp WHERE i>'2004-10-26 08:55:08'::timestamp ORDER BY i; From 157d1372064f76149e3fc938ee0b8d1bc48b88ce Mon Sep 17 00:00:00 2001 From: Artur Zakirov Date: Tue, 31 May 2016 12:08:27 +0300 Subject: [PATCH 13/13] Added features and fix bugs: - use index scan with order by without where clause. - added RumKey structure. - fix indentation. - fix tests. --- .gitignore | 2 +- README.md | 16 +- expected/orderby.out | 23 +- expected/rum.out | 32 +- rum.h | 327 +++++++++--------- rum_timestamp.c | 36 +- rum_ts_utils.c | 196 ++++++----- rumbtree.c | 46 ++- rumbulk.c | 53 ++- rumdatapage.c | 475 +++++++++++++------------- rumentrypage.c | 55 +-- rumfast.c | 102 +++--- rumget.c | 789 +++++++++++++++++++++++++++++++------------ ruminsert.c | 252 ++++++-------- rumscan.c | 103 +++--- rumsort.c | 97 +++--- rumsort.h | 58 ++-- rumtsquery.c | 213 +++++++----- rumutil.c | 79 ++--- rumvacuum.c | 63 ++-- rumvalidate.c | 2 +- sql/orderby.sql | 8 +- sql/rum.sql | 10 +- 23 files changed, 1723 insertions(+), 1314 deletions(-) diff --git a/.gitignore b/.gitignore index 71cb53d15a..5e29f4675d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ .deps *.o *.so -results +results \ No newline at end of file diff --git a/README.md b/README.md index 3d0b9519f8..6a5934c5d4 100644 --- a/README.md +++ b/README.md @@ -32,9 +32,9 @@ The **rum** module provides the access method **rum** and the operator class The module provides new operators. -| Operator | Returns | Description -| ------------------- | ------- | ---------------------------------------------- -| tsvector >< tsquery | float4 | Returns distance between tsvector and tsquery. +| Operator | Returns | Description +| -------------------- | ------- | ---------------------------------------------- +| tsvector <-> tsquery | float4 | Returns distance between tsvector and tsquery. ## Examples @@ -67,7 +67,10 @@ CREATE INDEX rumidx ON test_rum USING rum (a rum_tsvector_ops); And we can execute the following queries: ```sql -=# SELECT t, a >< to_tsquery('english', 'beautiful | place') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'beautiful | place') order by a >< to_tsquery('english', 'beautiful | place'); +=# SELECT t, a <-> to_tsquery('english', 'beautiful | place') AS rank + FROM test_rum + WHERE a @@ to_tsquery('english', 'beautiful | place') + ORDER BY a <-> to_tsquery('english', 'beautiful | place'); t | rank ---------------------------------+----------- The situation is most beautiful | 0.0303964 @@ -75,7 +78,10 @@ And we can execute the following queries: It looks like a beautiful place | 0.0607927 (3 rows) -=# SELECT t, a >< to_tsquery('english', 'place | situation') AS rank FROM test_rum WHERE a @@ to_tsquery('english', 'place | situation') order by a >< to_tsquery('english', 'place | situation'); +=# SELECT t, a <-> to_tsquery('english', 'place | situation') AS rank + FROM test_rum + WHERE a @@ to_tsquery('english', 'place | situation') + ORDER BY a <-> to_tsquery('english', 'place | situation'); t | rank ---------------------------------+----------- The situation is most beautiful | 0.0303964 diff --git a/expected/orderby.out b/expected/orderby.out index 60ecee8ef3..6587d7cbd1 100644 --- a/expected/orderby.out +++ b/expected/orderby.out @@ -184,7 +184,22 @@ SELECT id, d, d |-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY 457 | Fri May 20 20:21:22.326724 2016 | 367197.326724 (5 rows) ---to be fixed ---EXPLAIN (costs off) ---SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; ---SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; +EXPLAIN (costs off) +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + -> Index Scan using tsts_idx on tsts + Order By: (d <-> 'Mon May 16 14:21:25 2016'::timestamp without time zone) +(3 rows) + +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; + id | d | ?column? +-----+---------------------------------+------------- + 355 | Mon May 16 14:21:22.326724 2016 | 2.673276 + 356 | Mon May 16 15:21:22.326724 2016 | 3597.326724 + 354 | Mon May 16 13:21:22.326724 2016 | 3602.673276 + 357 | Mon May 16 16:21:22.326724 2016 | 7197.326724 + 353 | Mon May 16 12:21:22.326724 2016 | 7202.673276 +(5 rows) + diff --git a/expected/rum.out b/expected/rum.out index 74027e64be..bbaf605434 100644 --- a/expected/rum.out +++ b/expected/rum.out @@ -96,10 +96,10 @@ SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way')), * ORDER BY a <-> to_tsquery('pg_catalog.english', 'way'); rum_ts_distance | t | a -----------------+--------------------------------------------------------------------------+--------------------------------------------------------------- - 0.0607927 | my appreciation of you in a more complimentary way than by sending this | 'appreci':2 'complimentari':8 'send':12 'way':9 - 0.0607927 | itself. Put on your “specs” and look at the castle, half way up the | 'castl':10 'half':11 'look':7 'put':2 'spec':5 'way':12 - 0.0607927 | so well that only a fragment, as it were, gave way. It still hangs as if | 'fragment':6 'gave':10 'hang':14 'still':13 'way':11 'well':2 - 0.0607927 | thinking--“to go or not to go?” We are this far on the way. Reached | 'far':11 'go':3,7 'reach':15 'think':1 'way':14 + 16.4493 | my appreciation of you in a more complimentary way than by sending this | 'appreci':2 'complimentari':8 'send':12 'way':9 + 16.4493 | itself. Put on your “specs” and look at the castle, half way up the | 'castl':10 'half':11 'look':7 'put':2 'spec':5 'way':12 + 16.4493 | so well that only a fragment, as it were, gave way. It still hangs as if | 'fragment':6 'gave':10 'hang':14 'still':13 'way':11 'well':2 + 16.4493 | thinking--“to go or not to go?” We are this far on the way. Reached | 'far':11 'go':3,7 'reach':15 'think':1 'way':14 (4 rows) SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way & (go | half)')), * @@ -108,10 +108,23 @@ SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way & (go | half)')) ORDER BY a <-> to_tsquery('pg_catalog.english', 'way & (go | half)'); rum_ts_distance | t | a -----------------+---------------------------------------------------------------------+--------------------------------------------------------- - 0.103556 | thinking--“to go or not to go?” We are this far on the way. Reached | 'far':11 'go':3,7 'reach':15 'think':1 'way':14 - 0.0991032 | itself. Put on your “specs” and look at the castle, half way up the | 'castl':10 'half':11 'look':7 'put':2 'spec':5 'way':12 + 9.65659 | thinking--“to go or not to go?” We are this far on the way. Reached | 'far':11 'go':3,7 'reach':15 'think':1 'way':14 + 10.0905 | itself. Put on your “specs” and look at the castle, half way up the | 'castl':10 'half':11 'look':7 'put':2 'spec':5 'way':12 (2 rows) +SELECT + a <-> to_tsquery('pg_catalog.english', 'way & (go | half)'), + rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way & (go | half)')), + * + FROM test_rum + ORDER BY a <-> to_tsquery('pg_catalog.english', 'way & (go | half)') limit 3; + ?column? | rum_ts_distance | t | a +----------+-----------------+-------------------------------------------------------------------------+--------------------------------------------------------- + 9.65659 | 9.65659 | thinking--“to go or not to go?” We are this far on the way. Reached | 'far':11 'go':3,7 'reach':15 'think':1 'way':14 + 10.0905 | 10.0905 | itself. Put on your “specs” and look at the castle, half way up the | 'castl':10 'half':11 'look':7 'put':2 'spec':5 'way':12 + 1e+20 | 1e+20 | my appreciation of you in a more complimentary way than by sending this | 'appreci':2 'complimentari':8 'send':12 'way':9 +(3 rows) + INSERT INTO test_rum (t) VALUES ('foo bar foo the over foo qq bar'); INSERT INTO test_rum (t) VALUES ('345 qwerty copyright'); INSERT INTO test_rum (t) VALUES ('345 qwerty'); @@ -146,13 +159,6 @@ SELECT a FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'bar') ORDER 'bar':2,8 'foo':1,3,6 'qq':7 (1 row) -DELETE FROM test_rum; -SELECT count(*) from test_rum; - count -------- - 0 -(1 row) - CREATE TABLE tst (i int4, t tsvector); INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(1,100000) i; CREATE INDEX tstidx ON tst USING rum (t rum_tsvector_ops); diff --git a/rum.h b/rum.h index 1217b4ce35..754bb2085d 100644 --- a/rum.h +++ b/rum.h @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * rum.h + * bloom.h * Exported definitions for RUM index. * * Portions Copyright (c) 2015-2016, Postgres Professional @@ -42,7 +42,7 @@ typedef struct RumPageOpaqueData * heap tuples. */ OffsetNumber freespace; uint16 flags; /* see bit definitions below */ -} RumPageOpaqueData; +} RumPageOpaqueData; typedef RumPageOpaqueData *RumPageOpaque; @@ -91,7 +91,7 @@ typedef struct RumMetaPageData BlockNumber nEntryPages; BlockNumber nDataPages; int64 nEntries; -} RumMetaPageData; +} RumMetaPageData; #define RUM_CURRENT_VERSION (0xC0DE0001) @@ -194,14 +194,14 @@ typedef signed char RumNullCategory; (IndexInfoFindDataOffset((itup)->t_info) + \ ((rumstate)->oneCol ? 0 : sizeof(int16))) /*#define RumGetNullCategory(itup,rumstate) \ - (*((RumNullCategory *) ((char*)(itup) + RumCategoryOffset(itup,rumstate)))) + (*((RumNullCategory *) ((char*)(itup) + RumCategoryOffset(itup,rumstate)))) #define RumSetNullCategory(itup,rumstate,c) \ (*((RumNullCategory *) ((char*)(itup) + RumCategoryOffset(itup,rumstate))) = (c))*/ #define RumGetNullCategory(itup,rumstate) \ - (*((RumNullCategory *) ((char*)(itup) + IndexTupleSize(itup) - sizeof(RumNullCategory)))) + (*((RumNullCategory *) ((char*)(itup) + IndexTupleSize(itup) - sizeof(RumNullCategory)))) #define RumSetNullCategory(itup,rumstate,c) \ - (*((RumNullCategory *) ((char*)(itup) + IndexTupleSize(itup) - sizeof(RumNullCategory))) = (c)) + (*((RumNullCategory *) ((char*)(itup) + IndexTupleSize(itup) - sizeof(RumNullCategory))) = (c)) /* * Access macros for leaf-page entry tuples (see discussion in README) @@ -261,12 +261,12 @@ typedef struct { ItemPointerData iptr; OffsetNumber offsetNumer; - uint16 pageOffset; -} RumDataLeafItemIndex; + uint16 pageOffset; +} RumDataLeafItemIndex; #define RumDataLeafIndexCount 32 -#define RumDataPageSize \ +#define RumDataPageSize \ (BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \ - MAXALIGN(sizeof(ItemPointerData)) \ - MAXALIGN(sizeof(RumPageOpaqueData)) \ @@ -290,7 +290,7 @@ typedef struct RumOptions bool useAlternativeOrder; int orderByColumn; int addToColumn; -} RumOptions; +} RumOptions; #define RUM_DEFAULT_USE_FASTUPDATE false #define RumGetUseFastUpdate(relation) \ @@ -303,11 +303,19 @@ typedef struct RumOptions #define RUM_SHARE BUFFER_LOCK_SHARE #define RUM_EXCLUSIVE BUFFER_LOCK_EXCLUSIVE -typedef struct RumKey { - ItemPointerData ipd; - bool isNull; - Datum addToCompare; -} RumKey; +typedef struct RumKey +{ + ItemPointerData iptr; + Datum addInfo; + bool addInfoIsNull; +} RumKey; + +#define RumItemSetMin(item) \ +do { \ + ItemPointerSetMin(&((item)->iptr)); \ + (item)->addInfo = (Datum) 0; \ + (item)->addInfoIsNull = true; \ +} while (0) /* * RumState: working data structure describing the index being worked on @@ -343,11 +351,11 @@ typedef struct RumState FmgrInfo extractValueFn[INDEX_MAX_KEYS]; FmgrInfo extractQueryFn[INDEX_MAX_KEYS]; FmgrInfo consistentFn[INDEX_MAX_KEYS]; - FmgrInfo comparePartialFn[INDEX_MAX_KEYS]; /* optional method */ - FmgrInfo configFn[INDEX_MAX_KEYS]; /* optional method */ - FmgrInfo preConsistentFn[INDEX_MAX_KEYS]; /* optional method */ - FmgrInfo orderingFn[INDEX_MAX_KEYS]; /* optional method */ - FmgrInfo outerOrderingFn[INDEX_MAX_KEYS]; /* optional method */ + FmgrInfo comparePartialFn[INDEX_MAX_KEYS]; /* optional method */ + FmgrInfo configFn[INDEX_MAX_KEYS]; /* optional method */ + FmgrInfo preConsistentFn[INDEX_MAX_KEYS]; /* optional method */ + FmgrInfo orderingFn[INDEX_MAX_KEYS]; /* optional method */ + FmgrInfo outerOrderingFn[INDEX_MAX_KEYS]; /* optional method */ /* canPartialMatch[i] is true if comparePartialFn[i] is valid */ bool canPartialMatch[INDEX_MAX_KEYS]; /* canPreConsistent[i] is true if preConsistentFn[i] is valid */ @@ -357,12 +365,12 @@ typedef struct RumState bool canOuterOrdering[INDEX_MAX_KEYS]; /* Collations to pass to the support functions */ Oid supportCollation[INDEX_MAX_KEYS]; -} RumState; +} RumState; typedef struct RumConfig { Oid addInfoTypeOid; -} RumConfig; +} RumConfig; /* XLog stuff */ @@ -371,47 +379,45 @@ typedef struct rumxlogDeleteListPages { int32 ndeleted; BlockNumber toDelete[RUM_NDELETE_AT_ONCE]; -} rumxlogDeleteListPages; +} rumxlogDeleteListPages; /* rumutil.c */ extern bytea *rumoptions(Datum reloptions, bool validate); extern Datum rumhandler(PG_FUNCTION_ARGS); -extern void initRumState(RumState *state, Relation index); +extern void initRumState(RumState * state, Relation index); extern Buffer RumNewBuffer(Relation index); extern void RumInitBuffer(GenericXLogState *state, Buffer buffer, uint32 flags); extern void RumInitPage(Page page, uint32 f, Size pageSize); extern void RumInitMetabuffer(GenericXLogState *state, Buffer metaBuffer); -extern int rumCompareEntries(RumState *rumstate, OffsetNumber attnum, +extern int rumCompareEntries(RumState * rumstate, OffsetNumber attnum, Datum a, RumNullCategory categorya, Datum b, RumNullCategory categoryb); -extern int rumCompareAttEntries(RumState *rumstate, +extern int rumCompareAttEntries(RumState * rumstate, OffsetNumber attnuma, Datum a, RumNullCategory categorya, OffsetNumber attnumb, Datum b, RumNullCategory categoryb); -extern Datum *rumExtractEntries(RumState *rumstate, OffsetNumber attnum, +extern Datum *rumExtractEntries(RumState * rumstate, OffsetNumber attnum, Datum value, bool isNull, - int32 *nentries, RumNullCategory **categories, + int32 *nentries, RumNullCategory ** categories, Datum **addInfo, bool **addInfoIsNull); -extern OffsetNumber rumtuple_get_attrnum(RumState *rumstate, IndexTuple tuple); -extern Datum rumtuple_get_key(RumState *rumstate, IndexTuple tuple, - RumNullCategory *category); +extern OffsetNumber rumtuple_get_attrnum(RumState * rumstate, IndexTuple tuple); +extern Datum rumtuple_get_key(RumState * rumstate, IndexTuple tuple, + RumNullCategory * category); extern void rumGetStats(Relation index, GinStatsData *stats); extern void rumUpdateStats(Relation index, const GinStatsData *stats); /* ruminsert.c */ extern IndexBuildResult *rumbuild(Relation heap, Relation index, - struct IndexInfo *indexInfo); + struct IndexInfo *indexInfo); extern void rumbuildempty(Relation index); extern bool ruminsert(Relation index, Datum *values, bool *isnull, - ItemPointer ht_ctid, Relation heapRel, - IndexUniqueCheck checkUnique); -extern void rumEntryInsert(RumState *rumstate, + ItemPointer ht_ctid, Relation heapRel, + IndexUniqueCheck checkUnique); +extern void rumEntryInsert(RumState * rumstate, OffsetNumber attnum, Datum key, RumNullCategory category, - ItemPointerData *items, Datum *addInfo, - bool *addInfoIsNull, uint32 nitem, - GinStatsData *buildStats); + RumKey * items, uint32 nitem, GinStatsData *buildStats); /* rumbtree.c */ @@ -423,7 +429,7 @@ typedef struct RumBtreeStack /* predictNumber contains predicted number of pages on current level */ uint32 predictNumber; struct RumBtreeStack *parent; -} RumBtreeStack; +} RumBtreeStack; typedef struct RumBtreeData *RumBtree; @@ -460,50 +466,46 @@ typedef struct RumBtreeData bool isDelete; /* Data (posting tree) options */ - ItemPointerData *items; - Datum *addInfo; - bool *addInfoIsNull; + RumKey *items; uint32 nitem; uint32 curitem; PostingItem pitem; -} RumBtreeData; +} RumBtreeData; extern RumBtreeStack *rumPrepareFindLeafPage(RumBtree btree, BlockNumber blkno); -extern RumBtreeStack *rumFindLeafPage(RumBtree btree, RumBtreeStack *stack); -extern RumBtreeStack *rumReFindLeafPage(RumBtree btree, RumBtreeStack *stack); +extern RumBtreeStack *rumFindLeafPage(RumBtree btree, RumBtreeStack * stack); +extern RumBtreeStack *rumReFindLeafPage(RumBtree btree, RumBtreeStack * stack); extern Buffer rumStepRight(Buffer buffer, Relation index, int lockmode); -extern void freeRumBtreeStack(RumBtreeStack *stack); -extern void rumInsertValue(Relation index, RumBtree btree, RumBtreeStack *stack, - GinStatsData *buildStats); -extern void rumFindParents(RumBtree btree, RumBtreeStack *stack, BlockNumber rootBlkno); +extern void freeRumBtreeStack(RumBtreeStack * stack); +extern void rumInsertValue(Relation index, RumBtree btree, RumBtreeStack * stack, + GinStatsData *buildStats); +extern void rumFindParents(RumBtree btree, RumBtreeStack * stack, BlockNumber rootBlkno); /* rumentrypage.c */ extern void rumPrepareEntryScan(RumBtree btree, OffsetNumber attnum, Datum key, RumNullCategory category, - RumState *rumstate); + RumState * rumstate); extern void rumEntryFillRoot(RumBtree btree, Buffer root, Buffer lbuf, Buffer rbuf, - Page page, Page lpage, Page rpage); + Page page, Page lpage, Page rpage); extern IndexTuple rumPageGetLinkItup(Buffer buf, Page page); -extern void rumReadTuple(RumState *rumstate, OffsetNumber attnum, - IndexTuple itup, ItemPointerData *ipd, Datum *addInfo, bool *addInfoIsNull); -extern ItemPointerData updateItemIndexes(Page page, OffsetNumber attnum, RumState *rumstate); -extern void checkLeafDataPage(RumState *rumstate, AttrNumber attrnum, Page page); +extern void rumReadTuple(RumState * rumstate, OffsetNumber attnum, + IndexTuple itup, RumKey * items); +extern void rumReadTuplePointers(RumState * rumstate, OffsetNumber attnum, + IndexTuple itup, ItemPointerData *ipd); +extern ItemPointerData updateItemIndexes(Page page, OffsetNumber attnum, RumState * rumstate); +extern void checkLeafDataPage(RumState * rumstate, AttrNumber attrnum, Page page); /* rumdatapage.c */ -extern int rumCompareItemPointers(ItemPointer a, ItemPointer b); -extern int compareRumKey(RumState *state, RumKey *a, RumKey *b); -extern char *rumDataPageLeafWriteItemPointer(char *ptr, ItemPointer iptr, ItemPointer prev, bool addInfoIsNull); +extern int rumCompareItemPointers(const ItemPointerData *a, const ItemPointerData *b); +extern int compareRumKey(RumState * state, const RumKey * a, const RumKey * b); extern Pointer rumPlaceToDataPageLeaf(Pointer ptr, OffsetNumber attnum, - ItemPointer iptr, Datum addInfo, bool addInfoIsNull, ItemPointer prev, - RumState *rumstate); + RumKey * item, ItemPointer prev, RumState * rumstate); extern Size rumCheckPlaceToDataPageLeaf(OffsetNumber attnum, - ItemPointer iptr, Datum addInfo, bool addInfoIsNull, ItemPointer prev, - RumState *rumstate, Size size); -extern uint32 rumMergeItemPointers(ItemPointerData *dst, Datum *dst2, bool *dst3, - ItemPointerData *a, Datum *a2, bool *a3, uint32 na, - ItemPointerData *b, Datum * b2, bool *b3, uint32 nb); + RumKey * item, ItemPointer prev, RumState * rumstate, Size size); +extern uint32 rumMergeItemPointers(RumState * rumstate, RumKey * dst, + RumKey * a, uint32 na, RumKey * b, uint32 nb); extern void RumDataPageAddItem(Page page, void *data, OffsetNumber offset); extern void RumPageDeletePostingItem(Page page, OffsetNumber offset); @@ -511,22 +513,19 @@ typedef struct { RumBtreeData btree; RumBtreeStack *stack; -} RumPostingTreeScan; +} RumPostingTreeScan; extern RumPostingTreeScan *rumPrepareScanPostingTree(Relation index, - BlockNumber rootBlkno, bool searchMode, OffsetNumber attnum, RumState *rumstate); -extern void rumInsertItemPointers(RumState *rumstate, + BlockNumber rootBlkno, bool searchMode, OffsetNumber attnum, RumState * rumstate); +extern void rumInsertItemPointers(RumState * rumstate, OffsetNumber attnum, - RumPostingTreeScan *gdi, - ItemPointerData *items, - Datum *addInfo, - bool *addInfoIsNull, - uint32 nitem, + RumPostingTreeScan * gdi, + RumKey * items, uint32 nitem, GinStatsData *buildStats); -extern Buffer rumScanBeginPostingTree(RumPostingTreeScan *gdi); +extern Buffer rumScanBeginPostingTree(RumPostingTreeScan * gdi); extern void rumDataFillRoot(RumBtree btree, Buffer root, Buffer lbuf, Buffer rbuf, - Page page, Page lpage, Page rpage); -extern void rumPrepareDataScan(RumBtree btree, Relation index, OffsetNumber attnum, RumState *rumstate); + Page page, Page lpage, Page rpage); +extern void rumPrepareDataScan(RumBtree btree, Relation index, OffsetNumber attnum, RumState * rumstate); /* rumscan.c */ @@ -572,7 +571,7 @@ typedef struct RumScanKeyData /* NB: these three arrays have only nuserentries elements! */ Datum *queryValues; RumNullCategory *queryCategories; - Pointer *extra_data; + Pointer *extra_data; StrategyNumber strategy; int32 searchMode; OffsetNumber attnum; @@ -606,9 +605,7 @@ typedef struct RumScanEntryData Buffer buffer; /* current ItemPointer to heap */ - ItemPointerData curItem; - Datum curAddInfo; - bool curAddInfoIsNull; + RumKey curItem; /* for a partial-match or full-scan query, we accumulate all TIDs here */ TIDBitmap *matchBitmap; @@ -616,11 +613,10 @@ typedef struct RumScanEntryData TBMIterateResult *matchResult; /* used for Posting list and one page in Posting tree */ - ItemPointerData *list; - Datum *addInfo; - bool *addInfoIsNull; + RumKey *list; MemoryContext context; uint32 nlist; + uint32 nalloc; OffsetNumber offset; bool isFinished; @@ -633,9 +629,9 @@ typedef struct RumScanEntryData typedef struct { ItemPointerData iptr; - float8 distance; - bool recheck; -} RumOrderingItem; + float8 distance; + bool recheck; +} RumOrderingItem; typedef struct RumScanOpaqueData { @@ -648,7 +644,7 @@ typedef struct RumScanOpaqueData int norderbys; RumScanEntry *entries; /* one per index search condition */ - RumScanEntry *sortedEntries; /* one per index search condition */ + RumScanEntry *sortedEntries; /* one per index search condition */ int entriesIncrIndex; uint32 totalentries; uint32 allocentries; /* allocated length of entries[] */ @@ -660,14 +656,14 @@ typedef struct RumScanOpaqueData bool isVoidRes; /* true if query is unsatisfiable */ bool useFastScan; TIDBitmap *tbm; -} RumScanOpaqueData; +} RumScanOpaqueData; typedef RumScanOpaqueData *RumScanOpaque; extern IndexScanDesc rumbeginscan(Relation rel, int nkeys, int norderbys); extern void rumendscan(IndexScanDesc scan); extern void rumrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, - ScanKey orderbys, int norderbys); + ScanKey orderbys, int norderbys); extern Datum rummarkpos(PG_FUNCTION_ARGS); extern Datum rumrestrpos(PG_FUNCTION_ARGS); extern void rumNewScanKey(IndexScanDesc scan); @@ -678,17 +674,10 @@ extern bool rumgettuple(IndexScanDesc scan, ScanDirection direction); /* rumvacuum.c */ extern IndexBulkDeleteResult *rumbulkdelete(IndexVacuumInfo *info, - IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, - void *callback_state); + IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback, + void *callback_state); extern IndexBulkDeleteResult *rumvacuumcleanup(IndexVacuumInfo *info, - IndexBulkDeleteResult *stats); - -typedef struct -{ - ItemPointerData iptr; - bool addInfoIsNull; - Datum addInfo; -} RumEntryAccumulatorItem; + IndexBulkDeleteResult *stats); /* rumvalidate.c */ extern bool rumvalidate(Oid opclassoid); @@ -701,10 +690,10 @@ typedef struct RumEntryAccumulator RumNullCategory category; OffsetNumber attnum; bool shouldSort; - RumEntryAccumulatorItem *list; + RumKey *list; uint32 maxcount; /* allocated size of list[] */ uint32 count; /* current number of list[] entries */ -} RumEntryAccumulator; +} RumEntryAccumulator; typedef struct { @@ -713,16 +702,18 @@ typedef struct RumEntryAccumulator *entryallocator; uint32 eas_used; RBTree *tree; + RumKey *sortSpace; + uint32 sortSpaceN; } BuildAccumulator; extern void rumInitBA(BuildAccumulator *accum); extern void rumInsertBAEntries(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum, Datum *entries, Datum *addInfo, bool *addInfoIsNull, - RumNullCategory *categories, int32 nentries); + RumNullCategory * categories, int32 nentries); extern void rumBeginBAScan(BuildAccumulator *accum); -extern RumEntryAccumulatorItem *rumGetBAEntry(BuildAccumulator *accum, - OffsetNumber *attnum, Datum *key, RumNullCategory *category, +extern RumKey *rumGetBAEntry(BuildAccumulator *accum, + OffsetNumber *attnum, Datum *key, RumNullCategory * category, uint32 *n); /* rumfast.c */ @@ -733,15 +724,15 @@ typedef struct RumTupleCollector uint32 ntuples; uint32 lentuples; uint32 sumsize; -} RumTupleCollector; +} RumTupleCollector; -extern void rumHeapTupleFastInsert(RumState *rumstate, - RumTupleCollector *collector); -extern void rumHeapTupleFastCollect(RumState *rumstate, - RumTupleCollector *collector, +extern void rumHeapTupleFastInsert(RumState * rumstate, + RumTupleCollector * collector); +extern void rumHeapTupleFastCollect(RumState * rumstate, + RumTupleCollector * collector, OffsetNumber attnum, Datum value, bool isNull, ItemPointer ht_ctid); -extern void rumInsertCleanup(RumState *rumstate, +extern void rumInsertCleanup(RumState * rumstate, bool vac_delay, IndexBulkDeleteResult *stats); /* rum_ts_utils.c */ @@ -778,10 +769,10 @@ extern PGDLLIMPORT int RumFuzzySearchLimit; static inline char * rumDataPageLeafReadItemPointer(char *ptr, ItemPointer iptr, bool *addInfoIsNull) { - uint32 blockNumberIncr = 0; - uint16 offset = 0; - int i; - uint8 v; + uint32 blockNumberIncr = 0; + uint16 offset = 0; + int i; + uint8 v; i = 0; do @@ -794,8 +785,8 @@ rumDataPageLeafReadItemPointer(char *ptr, ItemPointer iptr, bool *addInfoIsNull) } while (v & HIGHBIT); - Assert((uint64)iptr->ip_blkid.bi_lo + ((uint64)iptr->ip_blkid.bi_hi << 16) + - (uint64)blockNumberIncr < ((uint64)1 << 32)); + Assert((uint64) iptr->ip_blkid.bi_lo + ((uint64) iptr->ip_blkid.bi_hi << 16) + + (uint64) blockNumberIncr < ((uint64) 1 << 32)); blockNumberIncr += iptr->ip_blkid.bi_lo + (iptr->ip_blkid.bi_hi << 16); @@ -804,7 +795,7 @@ rumDataPageLeafReadItemPointer(char *ptr, ItemPointer iptr, bool *addInfoIsNull) i = 0; - while(true) + while (true) { v = *ptr; ptr++; @@ -836,18 +827,17 @@ rumDataPageLeafReadItemPointer(char *ptr, ItemPointer iptr, bool *addInfoIsNull) * passed in order to read the first item pointer. */ static inline Pointer -rumDataPageLeafRead(Pointer ptr, OffsetNumber attnum, ItemPointer iptr, - Datum *addInfo, bool *addInfoIsNull, RumState *rumstate) +rumDataPageLeafRead(Pointer ptr, OffsetNumber attnum, RumKey * item, + RumState * rumstate) { Form_pg_attribute attr; - bool isNull; + bool isNull; - ptr = rumDataPageLeafReadItemPointer(ptr, iptr, &isNull); + ptr = rumDataPageLeafReadItemPointer(ptr, &item->iptr, &isNull); - Assert(iptr->ip_posid != InvalidOffsetNumber); + Assert(item->iptr.ip_posid != InvalidOffsetNumber); - if (addInfoIsNull) - *addInfoIsNull = isNull; + item->addInfoIsNull = isNull; if (!isNull) { @@ -856,41 +846,39 @@ rumDataPageLeafRead(Pointer ptr, OffsetNumber attnum, ItemPointer iptr, if (attr->attbyval) { /* do not use aligment for pass-by-value types */ - if (addInfo) + union + { + int16 i16; + int32 i32; + } u; + + switch (attr->attlen) { - union { - int16 i16; - int32 i32; - } u; - - switch(attr->attlen) - { - case sizeof(char): - *addInfo = Int8GetDatum(*ptr); - break; - case sizeof(int16): - memcpy(&u.i16, ptr, sizeof(int16)); - *addInfo = Int16GetDatum(u.i16); - break; - case sizeof(int32): - memcpy(&u.i32, ptr, sizeof(int32)); - *addInfo = Int32GetDatum(u.i32); - break; + case sizeof(char): + item->addInfo = Int8GetDatum(*ptr); + break; + case sizeof(int16): + memcpy(&u.i16, ptr, sizeof(int16)); + item->addInfo = Int16GetDatum(u.i16); + break; + case sizeof(int32): + memcpy(&u.i32, ptr, sizeof(int32)); + item->addInfo = Int32GetDatum(u.i32); + break; #if SIZEOF_DATUM == 8 - case sizeof(Datum): - memcpy(addInfo, ptr, sizeof(Datum)); - break; + case sizeof(Datum): + memcpy(&item->addInfo, ptr, sizeof(Datum)); + break; #endif - default: - elog(ERROR, "unsupported byval length: %d", (int) (attr->attlen)); - } + default: + elog(ERROR, "unsupported byval length: %d", + (int) (attr->attlen)); } } else { ptr = (Pointer) att_align_pointer(ptr, attr->attalign, attr->attlen, ptr); - if (addInfo) - *addInfo = fetch_att(ptr, attr->attbyval, attr->attlen); + item->addInfo = fetch_att(ptr, attr->attbyval, attr->attlen); } ptr = (Pointer) att_addlength_pointer(ptr, attr->attlen, ptr); @@ -898,10 +886,39 @@ rumDataPageLeafRead(Pointer ptr, OffsetNumber attnum, ItemPointer iptr, return ptr; } +/* + * Reads next item pointer from leaf data page. + * Replaces current item pointer with the next one. Zero item pointer should be + * passed in order to read the first item pointer. + */ +static inline Pointer +rumDataPageLeafReadPointer(Pointer ptr, OffsetNumber attnum, RumKey * item, + RumState * rumstate) +{ + Form_pg_attribute attr; + bool isNull; + + ptr = rumDataPageLeafReadItemPointer(ptr, &item->iptr, &isNull); + + Assert(item->iptr.ip_posid != InvalidOffsetNumber); + + if (!isNull) + { + attr = rumstate->addAttrs[attnum - 1]; + + if (!attr->attbyval) + ptr = (Pointer) att_align_pointer(ptr, attr->attalign, attr->attlen, + ptr); + + ptr = (Pointer) att_addlength_pointer(ptr, attr->attlen, ptr); + } + return ptr; +} + extern Datum FunctionCall10Coll(FmgrInfo *flinfo, Oid collation, - Datum arg1, Datum arg2, - Datum arg3, Datum arg4, Datum arg5, - Datum arg6, Datum arg7, Datum arg8, - Datum arg9, Datum arg10); + Datum arg1, Datum arg2, + Datum arg3, Datum arg4, Datum arg5, + Datum arg6, Datum arg7, Datum arg8, + Datum arg9, Datum arg10); #endif /* __RUM_H__ */ diff --git a/rum_timestamp.c b/rum_timestamp.c index 8217328d3f..32ca7f0219 100644 --- a/rum_timestamp.c +++ b/rum_timestamp.c @@ -12,8 +12,8 @@ typedef struct QueryInfo { - StrategyNumber strategy; - Datum datum; + StrategyNumber strategy; + Datum datum; } QueryInfo; @@ -21,9 +21,9 @@ PG_FUNCTION_INFO_V1(rum_timestamp_extract_value); Datum rum_timestamp_extract_value(PG_FUNCTION_ARGS) { - Datum datum = PG_GETARG_DATUM(0); - int32 *nentries = (int32 *) PG_GETARG_POINTER(1); - Datum *entries = (Datum *) palloc(sizeof(Datum)); + Datum datum = PG_GETARG_DATUM(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + Datum *entries = (Datum *) palloc(sizeof(Datum)); entries[0] = datum; *nentries = 1; @@ -35,12 +35,12 @@ PG_FUNCTION_INFO_V1(rum_timestamp_extract_query); Datum rum_timestamp_extract_query(PG_FUNCTION_ARGS) { - Datum datum = PG_GETARG_DATUM(0); - int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + Datum datum = PG_GETARG_DATUM(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); StrategyNumber strategy = PG_GETARG_UINT16(2); bool **partialmatch = (bool **) PG_GETARG_POINTER(3); Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4); - Datum *entries = (Datum *) palloc(sizeof(Datum)); + Datum *entries = (Datum *) palloc(sizeof(Datum)); QueryInfo *data = (QueryInfo *) palloc(sizeof(QueryInfo)); bool *ptr_partialmatch; @@ -52,7 +52,7 @@ rum_timestamp_extract_query(PG_FUNCTION_ARGS) *extra_data = (Pointer *) palloc(sizeof(Pointer)); **extra_data = (Pointer) data; - switch(strategy) + switch (strategy) { case BTLessStrategyNumber: case BTLessEqualStrategyNumber: @@ -79,16 +79,17 @@ PG_FUNCTION_INFO_V1(rum_timestamp_compare_prefix); Datum rum_timestamp_compare_prefix(PG_FUNCTION_ARGS) { - Datum a = PG_GETARG_DATUM(0); - Datum b = PG_GETARG_DATUM(1); + Datum a = PG_GETARG_DATUM(0); + Datum b = PG_GETARG_DATUM(1); QueryInfo *data = (QueryInfo *) PG_GETARG_POINTER(3); - int32 res, cmp; + int32 res, + cmp; cmp = DatumGetInt32(DirectFunctionCall2Coll(timestamp_cmp, PG_GET_COLLATION(), - (data->strategy == BTLessStrategyNumber || + (data->strategy == BTLessStrategyNumber || data->strategy == BTLessEqualStrategyNumber) - ? data->datum : a, b)); + ? data->datum : a, b)); switch (data->strategy) { @@ -141,7 +142,7 @@ PG_FUNCTION_INFO_V1(rum_timestamp_consistent); Datum rum_timestamp_consistent(PG_FUNCTION_ARGS) { - bool *recheck = (bool *) PG_GETARG_POINTER(5); + bool *recheck = (bool *) PG_GETARG_POINTER(5); *recheck = false; PG_RETURN_BOOL(true); @@ -226,10 +227,10 @@ PG_FUNCTION_INFO_V1(rum_timestamp_outer_distance); Datum rum_timestamp_outer_distance(PG_FUNCTION_ARGS) { - StrategyNumber strategy = PG_GETARG_UINT16(2); + StrategyNumber strategy = PG_GETARG_UINT16(2); Datum diff; - switch(strategy) + switch (strategy) { case RUM_TMST_DISTANCE: diff = DirectFunctionCall2(timestamp_distance, @@ -253,4 +254,3 @@ rum_timestamp_outer_distance(PG_FUNCTION_ARGS) PG_RETURN_DATUM(diff); } - diff --git a/rum_ts_utils.c b/rum_ts_utils.c index 715b9bbdf9..fd2cb92888 100644 --- a/rum_ts_utils.c +++ b/rum_ts_utils.c @@ -30,13 +30,13 @@ PG_FUNCTION_INFO_V1(rum_tsquery_timestamp_consistent); PG_FUNCTION_INFO_V1(rum_tsquery_distance); PG_FUNCTION_INFO_V1(rum_ts_distance); -static float calc_rank_pos_and(float *w, Datum *addInfo, bool *addInfoIsNull, - int size); -static float calc_rank_pos_or(float *w, Datum *addInfo, bool *addInfoIsNull, - int size); +static float calc_rank_pos_and(float *w, bool *check, + Datum *addInfo, bool *addInfoIsNull, int size); +static float calc_rank_pos_or(float *w, bool *check, + Datum *addInfo, bool *addInfoIsNull, int size); -static int count_pos(char *ptr, int len); -static char * decompress_pos(char *ptr, uint16 *pos); +static int count_pos(char *ptr, int len); +static char *decompress_pos(char *ptr, uint16 *pos); typedef struct { @@ -47,7 +47,7 @@ typedef struct Datum *addInfo; bool *addInfoIsNull; bool notPhrase; -} RumChkVal; +} RumChkVal; static bool pre_checkcondition_rum(void *checkval, QueryOperand *val, ExecPhraseData *data) @@ -73,7 +73,7 @@ rum_tsquery_pre_consistent(PG_FUNCTION_ARGS) TSQuery query = PG_GETARG_TSQUERY(2); - Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); + Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); bool recheck; bool res = FALSE; @@ -118,14 +118,13 @@ checkcondition_rum(void *checkval, QueryOperand *val, ExecPhraseData *data) return false; /* - * Fill position list for phrase operator if it's needed - * end it exists + * Fill position list for phrase operator if it's needed end it exists */ if (data && gcv->addInfo && gcv->addInfoIsNull[j] == false) { - bytea *positions; - int32 i; - char *ptrt; + bytea *positions; + int32 i; + char *ptrt; WordEntryPos post; if (gcv->notPhrase) @@ -137,10 +136,10 @@ checkcondition_rum(void *checkval, QueryOperand *val, ExecPhraseData *data) data->pos = palloc(sizeof(*data->pos) * data->npos); data->allocated = true; - ptrt = (char *)VARDATA_ANY(positions); + ptrt = (char *) VARDATA_ANY(positions); post = 0; - for(i=0; inpos; i++) + for (i = 0; i < data->npos; i++) { ptrt = decompress_pos(ptrt, &post); data->pos[i] = post; @@ -153,28 +152,31 @@ checkcondition_rum(void *checkval, QueryOperand *val, ExecPhraseData *data) Datum rum_tsquery_consistent(PG_FUNCTION_ARGS) { - bool *check = (bool *) PG_GETARG_POINTER(0); + bool *check = (bool *) PG_GETARG_POINTER(0); + /* StrategyNumber strategy = PG_GETARG_UINT16(1); */ TSQuery query = PG_GETARG_TSQUERY(2); + /* int32 nkeys = PG_GETARG_INT32(3); */ - Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); - bool *recheck = (bool *) PG_GETARG_POINTER(5); - Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); - bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); + Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); + bool *recheck = (bool *) PG_GETARG_POINTER(5); + Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); + bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); bool res = FALSE; - /* The query requires recheck only if it involves - * weights */ + /* + * The query requires recheck only if it involves weights + */ *recheck = false; if (query->size > 0) { QueryItem *item; - RumChkVal gcv; + RumChkVal gcv; /* - * check-parameter array has one entry for each value - * (operand) in the query. + * check-parameter array has one entry for each value (operand) in the + * query. */ gcv.first_item = item = GETQUERY(query); gcv.check = check; @@ -193,28 +195,31 @@ rum_tsquery_consistent(PG_FUNCTION_ARGS) Datum rum_tsquery_timestamp_consistent(PG_FUNCTION_ARGS) { - bool *check = (bool *) PG_GETARG_POINTER(0); + bool *check = (bool *) PG_GETARG_POINTER(0); + /* StrategyNumber strategy = PG_GETARG_UINT16(1); */ TSQuery query = PG_GETARG_TSQUERY(2); + /* int32 nkeys = PG_GETARG_INT32(3); */ - Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); - bool *recheck = (bool *) PG_GETARG_POINTER(5); - Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); - bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); + Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); + bool *recheck = (bool *) PG_GETARG_POINTER(5); + Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); + bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); bool res = FALSE; - /* The query requires recheck only if it involves - * weights */ + /* + * The query requires recheck only if it involves weights + */ *recheck = false; if (query->size > 0) { QueryItem *item; - RumChkVal gcv; + RumChkVal gcv; /* - * check-parameter array has one entry for each value - * (operand) in the query. + * check-parameter array has one entry for each value (operand) in the + * query. */ gcv.first_item = item = GETQUERY(query); gcv.check = check; @@ -261,9 +266,10 @@ word_distance(int32 w) static int compress_pos(char *target, uint16 *pos, int npos) { - int i; - uint16 prev = 0, delta; - char *ptr; + int i; + uint16 prev = 0, + delta; + char *ptr; ptr = target; for (i = 0; i < npos; i++) @@ -293,9 +299,9 @@ compress_pos(char *target, uint16 *pos, int npos) static char * decompress_pos(char *ptr, uint16 *pos) { - int i; - uint8 v; - uint16 delta = 0; + int i; + uint8 v; + uint16 delta = 0; i = 0; while (true) @@ -320,7 +326,9 @@ decompress_pos(char *ptr, uint16 *pos) static int count_pos(char *ptr, int len) { - int count = 0, i; + int count = 0, + i; + for (i = 0; i < len; i++) { if (!(ptr[i] & HIGHBIT)) @@ -330,37 +338,38 @@ count_pos(char *ptr, int len) } static float -calc_rank_pos_and(float *w, Datum *addInfo, bool *addInfoIsNull, int size) +calc_rank_pos_and(float *w, bool *check, Datum *addInfo, bool *addInfoIsNull, int size) { int i, k, l, p; WordEntryPos post, - ct; + ct; int32 dimt, lenct, dist; float res = -1.0; - char *ptrt, *ptrc; + char *ptrt, + *ptrc; if (size < 2) - { - return calc_rank_pos_or(w, addInfo, addInfoIsNull, size); - } - WEP_SETPOS(POSNULL.pos[0], MAXENTRYPOS - 1); + return calc_rank_pos_or(w, check, addInfo, addInfoIsNull, size); for (i = 0; i < size; i++) { + if (!check[i]) + continue; + if (!addInfoIsNull[i]) { dimt = count_pos(VARDATA_ANY(addInfo[i]), VARSIZE_ANY_EXHDR(addInfo[i])); - ptrt = (char *)VARDATA_ANY(addInfo[i]); + ptrt = (char *) VARDATA_ANY(addInfo[i]); } else { dimt = POSNULL.npos; - ptrt = (char *)POSNULL.pos; + ptrt = (char *) POSNULL.pos; } for (k = 0; k < i; k++) { @@ -371,17 +380,23 @@ calc_rank_pos_and(float *w, Datum *addInfo, bool *addInfoIsNull, int size) post = 0; for (l = 0; l < dimt; l++) { - ptrt = decompress_pos(ptrt, &post); + if (ptrt == (char *) POSNULL.pos) + post = POSNULL.pos[0]; + else + ptrt = decompress_pos(ptrt, &post); ct = 0; if (!addInfoIsNull[k]) - ptrc = (char *)VARDATA_ANY(addInfo[k]); + ptrc = (char *) VARDATA_ANY(addInfo[k]); else - ptrc = (char *)POSNULL.pos; + ptrc = (char *) POSNULL.pos; for (p = 0; p < lenct; p++) { - ptrc = decompress_pos(ptrc, &ct); + if (ptrc == (char *) POSNULL.pos) + ct = POSNULL.pos[0]; + else + ptrc = decompress_pos(ptrc, &ct); dist = Abs((int) WEP_GETPOS(post) - (int) WEP_GETPOS(ct)); - if (dist || (dist == 0 && (ptrt == (char *)POSNULL.pos || ptrc == (char *)POSNULL.pos))) + if (dist || (dist == 0 && (ptrt == (char *) POSNULL.pos || ptrc == (char *) POSNULL.pos))) { float curw; @@ -399,14 +414,14 @@ calc_rank_pos_and(float *w, Datum *addInfo, bool *addInfoIsNull, int size) } static float -calc_rank_pos_or(float *w, Datum *addInfo, bool *addInfoIsNull, int size) +calc_rank_pos_or(float *w, bool *check, Datum *addInfo, bool *addInfoIsNull, int size) { WordEntryPos post; int32 dimt, j, i; float res = 0.0; - char *ptrt; + char *ptrt; for (i = 0; i < size; i++) { @@ -414,15 +429,18 @@ calc_rank_pos_or(float *w, Datum *addInfo, bool *addInfoIsNull, int size) wjm; int32 jm; + if (!check[i]) + continue; + if (!addInfoIsNull[i]) { dimt = count_pos(VARDATA_ANY(addInfo[i]), VARSIZE_ANY_EXHDR(addInfo[i])); - ptrt = (char *)VARDATA_ANY(addInfo[i]); + ptrt = (char *) VARDATA_ANY(addInfo[i]); } else { dimt = POSNULL.npos; - ptrt = (char *)POSNULL.pos; + ptrt = (char *) POSNULL.pos; } resj = 0.0; @@ -431,7 +449,10 @@ calc_rank_pos_or(float *w, Datum *addInfo, bool *addInfoIsNull, int size) post = 0; for (j = 0; j < dimt; j++) { - ptrt = decompress_pos(ptrt, &post); + if (ptrt == (char *) POSNULL.pos) + post = POSNULL.pos[0]; + else + ptrt = decompress_pos(ptrt, &post); resj = resj + wpos(post) / ((j + 1) * (j + 1)); if (wpos(post) > wjm) { @@ -455,7 +476,7 @@ calc_rank_pos_or(float *w, Datum *addInfo, bool *addInfoIsNull, int size) } static float -calc_rank_pos(float *w, TSQuery q, Datum *addInfo, bool *addInfoIsNull, int size) +calc_rank_pos(float *w, bool *check, TSQuery q, Datum *addInfo, bool *addInfoIsNull, int size) { QueryItem *item = GETQUERY(q); float res = 0.0; @@ -463,11 +484,13 @@ calc_rank_pos(float *w, TSQuery q, Datum *addInfo, bool *addInfoIsNull, int size if (!size || !q->size) return 0.0; + WEP_SETPOS(POSNULL.pos[0], MAXENTRYPOS - 1); + /* XXX: What about NOT? */ res = (item->type == QI_OPR && (item->qoperator.oper == OP_AND || item->qoperator.oper == OP_PHRASE)) ? - calc_rank_pos_and(w, addInfo, addInfoIsNull, size) : - calc_rank_pos_or(w, addInfo, addInfoIsNull, size); + calc_rank_pos_and(w, check, addInfo, addInfoIsNull, size) : + calc_rank_pos_or(w, check, addInfo, addInfoIsNull, size); if (res < 0) res = 1e-20f; @@ -548,8 +571,8 @@ rum_extract_tsvector(PG_FUNCTION_ARGS) { TSVector vector = PG_GETARG_TSVECTOR(0); int32 *nentries = (int32 *) PG_GETARG_POINTER(1); - Datum **addInfo = (Datum **) PG_GETARG_POINTER(3); - bool **addInfoIsNull = (bool **) PG_GETARG_POINTER(4); + Datum **addInfo = (Datum **) PG_GETARG_POINTER(3); + bool **addInfoIsNull = (bool **) PG_GETARG_POINTER(4); Datum *entries = NULL; *nentries = vector->size; @@ -576,7 +599,7 @@ rum_extract_tsvector(PG_FUNCTION_ARGS) { posVec = _POSVECPTR(vector, we); posDataSize = VARHDRSZ + 2 * posVec->npos * sizeof(WordEntryPos); - posData = (bytea *)palloc(posDataSize); + posData = (bytea *) palloc(posDataSize); posDataSize = compress_pos(posData->vl_dat, posVec->pos, posVec->npos) + VARHDRSZ; SET_VARSIZE(posData, posDataSize); @@ -585,7 +608,7 @@ rum_extract_tsvector(PG_FUNCTION_ARGS) } else { - (*addInfo)[i] = (Datum)0; + (*addInfo)[i] = (Datum) 0; (*addInfoIsNull)[i] = true; } we++; @@ -651,7 +674,7 @@ rum_extract_tsquery(PG_FUNCTION_ARGS) text *txt; txt = cstring_to_text_with_len(GETOPERAND(query) + operands[i]->distance, - operands[i]->length); + operands[i]->length); entries[i] = PointerGetDatum(txt); partialmatch[i] = operands[i]->prefix; (*extra_data)[i] = (Pointer) map_item_operand; @@ -663,13 +686,13 @@ rum_extract_tsquery(PG_FUNCTION_ARGS) if (item[j].type == QI_VAL) { QueryOperand *val = &item[j].qoperand; - bool found = false; + bool found = false; for (i = 0; i < (*nentries); i++) { if (!tsCompareString(operand + operands[i]->distance, operands[i]->length, - operand + val->distance, val->length, - false)) + operand + val->distance, val->length, + false)) { map_item_operand[j] = i; found = true; @@ -691,19 +714,18 @@ rum_extract_tsquery(PG_FUNCTION_ARGS) Datum rum_tsquery_distance(PG_FUNCTION_ARGS) { - /* bool *check = (bool *) PG_GETARG_POINTER(0); */ + bool *check = (bool *) PG_GETARG_POINTER(0); /* StrategyNumber strategy = PG_GETARG_UINT16(1); */ TSQuery query = PG_GETARG_TSQUERY(2); - - int32 nkeys = PG_GETARG_INT32(3); - /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ + int32 nkeys = PG_GETARG_INT32(3); + Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); bool *addInfoIsNull = (bool *) PG_GETARG_POINTER(9); - float8 res; + float8 res; - res = 1.0 / (float8)calc_rank_pos(weights, query, - addInfo, addInfoIsNull, nkeys); + res = 1.0 / (float8) calc_rank_pos(weights, check, query, + addInfo, addInfoIsNull, nkeys); PG_RETURN_FLOAT8(res); } @@ -711,16 +733,22 @@ rum_tsquery_distance(PG_FUNCTION_ARGS) Datum rum_ts_distance(PG_FUNCTION_ARGS) { - return DirectFunctionCall2Coll(ts_rank_tt, - PG_GET_COLLATION(), - PG_GETARG_DATUM(0), - PG_GETARG_DATUM(1)); + float4 r = DatumGetFloat4(DirectFunctionCall2Coll(ts_rank_tt, + PG_GET_COLLATION(), + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1))); + + if (r == 0) + PG_RETURN_FLOAT4(get_float4_infinity()); + else + PG_RETURN_FLOAT4(1.0 / r); } Datum rum_tsvector_config(PG_FUNCTION_ARGS) { - RumConfig *config = (RumConfig *)PG_GETARG_POINTER(0); + RumConfig *config = (RumConfig *) PG_GETARG_POINTER(0); + config->addInfoTypeOid = BYTEAOID; PG_RETURN_VOID(); } diff --git a/rumbtree.c b/rumbtree.c index 5435dc9687..8db7c2223c 100644 --- a/rumbtree.c +++ b/rumbtree.c @@ -70,12 +70,16 @@ rumPrepareFindLeafPage(RumBtree btree, BlockNumber blkno) * Locates leaf page contained tuple */ RumBtreeStack * -rumReFindLeafPage(RumBtree btree, RumBtreeStack *stack) +rumReFindLeafPage(RumBtree btree, RumBtreeStack * stack) { + /* + * Traverse the tree upwards until we sure that requested leaf page is in + * this subtree. Or we can stop at root page. + */ while (stack->parent) { RumBtreeStack *ptr; - Page page; + Page page; OffsetNumber maxoff; LockBuffer(stack->buffer, RUM_UNLOCK); @@ -90,14 +94,20 @@ rumReFindLeafPage(RumBtree btree, RumBtreeStack *stack) page = BufferGetPage(stack->buffer); maxoff = RumPageGetOpaque(page)->maxoff; + /* + * We don't know right bound of rightmost pointer. So, we can be sure + * that requested leaf page is in this subtree only when requested + * item pointer is less than item pointer previous to rightmost. + */ if (rumCompareItemPointers( - &(((PostingItem *)RumDataPageGetItem(page, maxoff - 1))->key), - btree->items + btree->curitem) >= 0) + &(((PostingItem *) RumDataPageGetItem(page, maxoff - 1))->key), + &btree->items[btree->curitem].iptr) >= 0) { break; } } + /* Traverse tree downwards. */ stack = rumFindLeafPage(btree, stack); return stack; } @@ -106,7 +116,7 @@ rumReFindLeafPage(RumBtree btree, RumBtreeStack *stack) * Locates leaf page contained tuple */ RumBtreeStack * -rumFindLeafPage(RumBtree btree, RumBtreeStack *stack) +rumFindLeafPage(RumBtree btree, RumBtreeStack * stack) { bool isfirst = TRUE; BlockNumber rootBlkno; @@ -166,7 +176,7 @@ rumFindLeafPage(RumBtree btree, RumBtreeStack *stack) { /* in search mode we may forget path to leaf */ RumBtreeStack *ptr = (RumBtreeStack *) palloc(sizeof(RumBtreeStack)); - Buffer buffer = ReleaseAndReadBuffer(stack->buffer, btree->index, child); + Buffer buffer = ReleaseAndReadBuffer(stack->buffer, btree->index, child); ptr->parent = stack; ptr->predictNumber = stack->predictNumber; @@ -203,7 +213,7 @@ rumStepRight(Buffer buffer, Relation index, int lockmode) Page page = BufferGetPage(buffer); bool isLeaf = RumPageIsLeaf(page); bool isData = RumPageIsData(page); - BlockNumber blkno = RumPageGetOpaque(page)->rightlink; + BlockNumber blkno = RumPageGetOpaque(page)->rightlink; nextbuffer = ReadBuffer(index, blkno); LockBuffer(nextbuffer, lockmode); @@ -215,17 +225,17 @@ rumStepRight(Buffer buffer, Relation index, int lockmode) elog(ERROR, "right sibling of RUM page is of different type"); /* - * Given the proper lock sequence above, we should never land on a - * deleted page. + * Given the proper lock sequence above, we should never land on a deleted + * page. */ - if (RumPageIsDeleted(page)) + if (RumPageIsDeleted(page)) elog(ERROR, "right sibling of RUM page was deleted"); return nextbuffer; } void -freeRumBtreeStack(RumBtreeStack *stack) +freeRumBtreeStack(RumBtreeStack * stack) { while (stack) { @@ -246,7 +256,7 @@ freeRumBtreeStack(RumBtreeStack *stack) * with vacuum process */ void -rumFindParents(RumBtree btree, RumBtreeStack *stack, +rumFindParents(RumBtree btree, RumBtreeStack * stack, BlockNumber rootBlkno) { Page page; @@ -345,7 +355,7 @@ rumFindParents(RumBtree btree, RumBtreeStack *stack, * NB: the passed-in stack is freed, as though by freeRumBtreeStack. */ void -rumInsertValue(Relation index, RumBtree btree, RumBtreeStack *stack, +rumInsertValue(Relation index, RumBtree btree, RumBtreeStack * stack, GinStatsData *buildStats) { RumBtreeStack *parent; @@ -353,7 +363,7 @@ rumInsertValue(Relation index, RumBtree btree, RumBtreeStack *stack, Page page, rpage, lpage; - GenericXLogState *state; + GenericXLogState *state; /* extract root BlockNumber from stack */ Assert(stack != NULL); @@ -411,8 +421,8 @@ rumInsertValue(Relation index, RumBtree btree, RumBtreeStack *stack, GENERIC_XLOG_FULL_IMAGE); /* - * newlpage is a pointer to memory page, it doesn't associate with - * buffer, stack->buffer should be untouched + * newlpage is a pointer to memory page, it doesn't associate + * with buffer, stack->buffer should be untouched */ newlpage = btree->splitPage(btree, stack->buffer, rbuffer, page, rpage, stack->off); @@ -463,8 +473,8 @@ rumInsertValue(Relation index, RumBtree btree, RumBtreeStack *stack, rpage = GenericXLogRegisterBuffer(state, rbuffer, 0); /* - * newlpage is a pointer to memory page, it doesn't associate with - * buffer, stack->buffer should be untouched + * newlpage is a pointer to memory page, it doesn't associate + * with buffer, stack->buffer should be untouched */ newlpage = btree->splitPage(btree, stack->buffer, rbuffer, lpage, rpage, stack->off); diff --git a/rumbulk.c b/rumbulk.c index e93a760fa8..dc20b983d8 100644 --- a/rumbulk.c +++ b/rumbulk.c @@ -37,18 +37,23 @@ rumCombineData(RBNode *existing, const RBNode *newdata, void *arg) { accum->allocatedMemory -= GetMemoryChunkSpace(eo->list); eo->maxcount *= 2; - eo->list = (RumEntryAccumulatorItem *) - repalloc(eo->list, sizeof(RumEntryAccumulatorItem) * eo->maxcount); + eo->list = (RumKey *) repalloc(eo->list, sizeof(RumKey) * eo->maxcount); accum->allocatedMemory += GetMemoryChunkSpace(eo->list); } - /* If item pointers are not ordered, they will need to be sorted later */ + /* + * If item pointers are not ordered, they will need to be sorted later + * Note: if useAlternativeOrder == true then shouldSort should be true + * because anyway list isn't right ordered and code below could not check + * it correctly + */ if (eo->shouldSort == FALSE) { int res; + /* FIXME RumKey */ res = rumCompareItemPointers(&eo->list[eo->count - 1].iptr, - &en->list->iptr); + &en->list->iptr); Assert(res != 0); if (res > 0) @@ -144,7 +149,7 @@ rumInsertBAEntry(BuildAccumulator *accum, RumEntryAccumulator eatmp; RumEntryAccumulator *ea; bool isNew; - RumEntryAccumulatorItem item; + RumKey item; /* * For the moment, fill only the fields of eatmp that will be looked at by @@ -172,9 +177,14 @@ rumInsertBAEntry(BuildAccumulator *accum, ea->key = getDatumCopy(accum, attnum, key); ea->maxcount = DEF_NPTR; ea->count = 1; - ea->shouldSort = FALSE; - ea->list = - (RumEntryAccumulatorItem *) palloc(sizeof(RumEntryAccumulatorItem) * DEF_NPTR); + + /* + * if useAlternativeOrder = true then anyway we need to sort list, but + * by setting shouldSort we prevent incorrect comparison in + * rumCombineData() + */ + ea->shouldSort = accum->rumstate->useAlternativeOrder; + ea->list = (RumKey *) palloc(sizeof(RumKey) * DEF_NPTR); ea->list[0].iptr = *heapptr; ea->list[0].addInfo = addInfo; ea->list[0].addInfoIsNull = addInfoIsNull; @@ -208,7 +218,7 @@ void rumInsertBAEntries(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum, Datum *entries, Datum *addInfo, bool *addInfoIsNull, - RumNullCategory *categories, int32 nentries) + RumNullCategory * categories, int32 nentries) { uint32 step = nentries; @@ -234,7 +244,7 @@ rumInsertBAEntries(BuildAccumulator *accum, for (i = step - 1; i < nentries && i >= 0; i += step << 1 /* *2 */ ) rumInsertBAEntry(accum, heapptr, attnum, - entries[i], addInfo[i], addInfoIsNull[i], categories[i]); + entries[i], addInfo[i], addInfoIsNull[i], categories[i]); step >>= 1; /* /2 */ } @@ -250,6 +260,12 @@ qsortCompareItemPointers(const void *a, const void *b) return res; } +static int +qsortCompareRumKey(const void *a, const void *b, void *arg) +{ + return compareRumKey(arg, a, b); +} + /* Prepare to read out the rbtree contents using rumGetBAEntry */ void rumBeginBAScan(BuildAccumulator *accum) @@ -262,13 +278,13 @@ rumBeginBAScan(BuildAccumulator *accum) * This consists of a single key datum and a list (array) of one or more * heap TIDs in which that key is found. The list is guaranteed sorted. */ -RumEntryAccumulatorItem * +RumKey * rumGetBAEntry(BuildAccumulator *accum, - OffsetNumber *attnum, Datum *key, RumNullCategory *category, + OffsetNumber *attnum, Datum *key, RumNullCategory * category, uint32 *n) { RumEntryAccumulator *entry; - RumEntryAccumulatorItem *list; + RumKey *list; entry = (RumEntryAccumulator *) rb_iterate(accum->tree); @@ -283,9 +299,14 @@ rumGetBAEntry(BuildAccumulator *accum, Assert(list != NULL && entry->count > 0); - if (entry->shouldSort && entry->count > 1) - qsort(list, entry->count, sizeof(RumEntryAccumulatorItem), - qsortCompareItemPointers); + if (entry->count > 1) + { + if (accum->rumstate->useAlternativeOrder) + qsort_arg(list, entry->count, sizeof(RumKey), + qsortCompareRumKey, accum->rumstate); + else if (entry->shouldSort) + qsort(list, entry->count, sizeof(RumKey), qsortCompareItemPointers); + } return list; } diff --git a/rumdatapage.c b/rumdatapage.c index abe4531623..9d142b1fec 100644 --- a/rumdatapage.c +++ b/rumdatapage.c @@ -25,7 +25,7 @@ */ static Size rumComputeDatumSize(Size data_length, Datum val, bool typbyval, char typalign, - int16 typlen, char typstorage) + int16 typlen, char typstorage) { if (TYPE_IS_PACKABLE(typlen, typstorage) && VARATT_CAN_MAKE_SHORT(DatumGetPointer(val))) @@ -38,8 +38,9 @@ rumComputeDatumSize(Size data_length, Datum val, bool typbyval, char typalign, } else if (typbyval) { - /* do not align type pass-by-value because anyway we - * will copy Datum */ + /* + * do not align type pass-by-value because anyway we will copy Datum + */ data_length = att_addlength_datum(data_length, typlen, val); } else @@ -58,7 +59,7 @@ rumComputeDatumSize(Size data_length, Datum val, bool typbyval, char typalign, */ static Pointer rumDatumWrite(Pointer ptr, Datum datum, bool typbyval, char typalign, - int16 typlen, char typstorage) + int16 typlen, char typstorage) { Size data_length; Pointer prev_ptr = ptr; @@ -66,13 +67,14 @@ rumDatumWrite(Pointer ptr, Datum datum, bool typbyval, char typalign, if (typbyval) { /* pass-by-value */ - union { - int16 i16; - int32 i32; - } u; + union + { + int16 i16; + int32 i32; + } u; /* align-safe version of store_att_byval(ptr, datum, typlen); */ - switch(typlen) + switch (typlen) { case sizeof(char): *ptr = DatumGetChar(datum); @@ -160,24 +162,24 @@ rumDatumWrite(Pointer ptr, Datum datum, bool typbyval, char typalign, * BlockNumber is stored in incremental manner we also need a previous item * pointer. Also store addInfoIsNull flag using one bit of OffsetNumber. */ -char * +static char * rumDataPageLeafWriteItemPointer(char *ptr, ItemPointer iptr, ItemPointer prev, - bool addInfoIsNull) + bool addInfoIsNull) { - uint32 blockNumberIncr = 0; - uint16 offset = iptr->ip_posid; + uint32 blockNumberIncr = 0; + uint16 offset = iptr->ip_posid; Assert(rumCompareItemPointers(iptr, prev) > 0); Assert(OffsetNumberIsValid(iptr->ip_posid)); blockNumberIncr = iptr->ip_blkid.bi_lo + (iptr->ip_blkid.bi_hi << 16) - - (prev->ip_blkid.bi_lo + (prev->ip_blkid.bi_hi << 16)); + (prev->ip_blkid.bi_lo + (prev->ip_blkid.bi_hi << 16)); while (true) { *ptr = (blockNumberIncr & (~HIGHBIT)) | - ((blockNumberIncr >= HIGHBIT) ? HIGHBIT : 0); + ((blockNumberIncr >= HIGHBIT) ? HIGHBIT : 0); ptr++; if (blockNumberIncr < HIGHBIT) break; @@ -208,18 +210,18 @@ rumDataPageLeafWriteItemPointer(char *ptr, ItemPointer iptr, ItemPointer prev, */ Pointer rumPlaceToDataPageLeaf(Pointer ptr, OffsetNumber attnum, - ItemPointer iptr, Datum addInfo, bool addInfoIsNull, ItemPointer prev, - RumState *rumstate) + RumKey * item, ItemPointer prev, RumState * rumstate) { Form_pg_attribute attr; - ptr = rumDataPageLeafWriteItemPointer(ptr, iptr, prev, addInfoIsNull); + ptr = rumDataPageLeafWriteItemPointer(ptr, &item->iptr, prev, + item->addInfoIsNull); - if (!addInfoIsNull) + if (!item->addInfoIsNull) { attr = rumstate->addAttrs[attnum - 1]; - ptr = rumDatumWrite(ptr, addInfo, attr->attbyval, attr->attalign, - attr->attlen, attr->attstorage); + ptr = rumDatumWrite(ptr, item->addInfo, attr->attbyval, attr->attalign, + attr->attlen, attr->attstorage); } return ptr; } @@ -230,12 +232,12 @@ rumPlaceToDataPageLeaf(Pointer ptr, OffsetNumber attnum, static int rumDataPageLeafGetItemPointerSize(ItemPointer iptr, ItemPointer prev) { - uint32 blockNumberIncr = 0; - uint16 offset = iptr->ip_posid; - int size = 0; + uint32 blockNumberIncr = 0; + uint16 offset = iptr->ip_posid; + int size = 0; blockNumberIncr = iptr->ip_blkid.bi_lo + (iptr->ip_blkid.bi_hi << 16) - - (prev->ip_blkid.bi_lo + (prev->ip_blkid.bi_hi << 16)); + (prev->ip_blkid.bi_lo + (prev->ip_blkid.bi_hi << 16)); while (true) @@ -263,25 +265,24 @@ rumDataPageLeafGetItemPointerSize(ItemPointer iptr, ItemPointer prev) */ Size rumCheckPlaceToDataPageLeaf(OffsetNumber attnum, - ItemPointer iptr, Datum addInfo, bool addInfoIsNull, ItemPointer prev, - RumState *rumstate, Size size) + RumKey * item, ItemPointer prev, RumState * rumstate, Size size) { Form_pg_attribute attr; - size += rumDataPageLeafGetItemPointerSize(iptr, prev); + size += rumDataPageLeafGetItemPointerSize(&item->iptr, prev); - if (!addInfoIsNull) + if (!item->addInfoIsNull) { attr = rumstate->addAttrs[attnum - 1]; - size = rumComputeDatumSize(size, addInfo, attr->attbyval, - attr->attalign, attr->attlen, attr->attstorage); + size = rumComputeDatumSize(size, item->addInfo, attr->attbyval, + attr->attalign, attr->attlen, attr->attstorage); } return size; } int -rumCompareItemPointers(ItemPointer a, ItemPointer b) +rumCompareItemPointers(const ItemPointerData *a, const ItemPointerData *b) { BlockNumber ba = RumItemPointerGetBlockNumber(a); BlockNumber bb = RumItemPointerGetBlockNumber(b); @@ -300,39 +301,35 @@ rumCompareItemPointers(ItemPointer a, ItemPointer b) } int -compareRumKey(RumState *state, RumKey *a, RumKey *b) +compareRumKey(RumState * state, const RumKey * a, const RumKey * b) { - /* assume NULL is greate than any real value */ - if (state->useAlternativeOrder) + if (a->addInfoIsNull == false && b->addInfoIsNull == false) { - if (a->isNull == false && b->isNull == false) - { - int res; - AttrNumber attnum = state->attrnOrderByColumn; - - res = DatumGetInt32(FunctionCall2Coll( - &state->compareFn[attnum - 1], - state->supportCollation[attnum - 1], - a->addToCompare, b->addToCompare)); - if (res != 0) - return res; - /* fallback to ItemPointerCompare */ - } - else if (a->isNull == true) - { - if (b->isNull == false) - return 1; - /* fallback to ItemPointerCompare */ - } - else - { - Assert(b->isNull == true); - return -1; - } + int res; + AttrNumber attnum = state->attrnOrderByColumn; + + res = DatumGetInt32(FunctionCall2Coll( + &state->compareFn[attnum - 1], + state->supportCollation[attnum - 1], + a->addInfo, b->addInfo)); + if (res != 0) + return res; + /* fallback to ItemPointerCompare */ + } + else if (a->addInfoIsNull == true) + { + if (b->addInfoIsNull == false) + return 1; + /* fallback to ItemPointerCompare */ + } + else + { + Assert(b->addInfoIsNull == true); + return -1; } - return rumCompareItemPointers(&a->ipd, &b->ipd); + return rumCompareItemPointers(&a->iptr, &b->iptr); } /* @@ -341,54 +338,50 @@ compareRumKey(RumState *state, RumKey *a, RumKey *b) * Caller is responsible that there is enough space at *dst. */ uint32 -rumMergeItemPointers(ItemPointerData *dst, Datum *dstAddInfo, bool *dstAddInfoIsNull, - ItemPointerData *a, Datum *aAddInfo, bool *aAddInfoIsNull, uint32 na, - ItemPointerData *b, Datum *bAddInfo, bool *bAddInfoIsNull, uint32 nb) +rumMergeItemPointers(RumState * rumstate, RumKey * dst, + RumKey * a, uint32 na, RumKey * b, uint32 nb) { - ItemPointerData *dptr = dst; - ItemPointerData *aptr = a, + RumKey *dptr = dst; + RumKey *aptr = a, *bptr = b; while (aptr - a < na && bptr - b < nb) { - int cmp = rumCompareItemPointers(aptr, bptr); + int cmp; + + if (rumstate->useAlternativeOrder) + { + cmp = compareRumKey(rumstate, aptr, bptr); + } + else + { + cmp = rumCompareItemPointers(&aptr->iptr, &bptr->iptr); + } if (cmp > 0) { *dptr++ = *bptr++; - *dstAddInfo++ = *bAddInfo++; - *dstAddInfoIsNull++ = *bAddInfoIsNull++; } else if (cmp == 0) { /* we want only one copy of the identical items */ *dptr++ = *bptr++; - *dstAddInfo++ = *bAddInfo++; - *dstAddInfoIsNull++ = *bAddInfoIsNull++; aptr++; - aAddInfo++; - aAddInfoIsNull++; } else { *dptr++ = *aptr++; - *dstAddInfo++ = *aAddInfo++; - *dstAddInfoIsNull++ = *aAddInfoIsNull++; } } while (aptr - a < na) { *dptr++ = *aptr++; - *dstAddInfo++ = *aAddInfo++; - *dstAddInfoIsNull++ = *aAddInfoIsNull++; } while (bptr - b < nb) { *dptr++ = *bptr++; - *dstAddInfo++ = *bAddInfo++; - *dstAddInfoIsNull++ = *bAddInfoIsNull++; } return dptr - dst; @@ -406,7 +399,7 @@ dataIsMoveRight(RumBtree btree, Page page) if (RumPageRightMost(page)) return FALSE; - return (rumCompareItemPointers(btree->items + btree->curitem, iptr) > 0) ? TRUE : FALSE; + return (rumCompareItemPointers(&btree->items[btree->curitem].iptr, iptr) > 0) ? TRUE : FALSE; } /* @@ -414,7 +407,7 @@ dataIsMoveRight(RumBtree btree, Page page) * correctly chosen and searching value SHOULD be on page */ static BlockNumber -dataLocateItem(RumBtree btree, RumBtreeStack *stack) +dataLocateItem(RumBtree btree, RumBtreeStack * stack) { OffsetNumber low, high, @@ -456,7 +449,8 @@ dataLocateItem(RumBtree btree, RumBtreeStack *stack) else { pitem = (PostingItem *) RumDataPageGetItem(page, mid); - result = rumCompareItemPointers(btree->items + btree->curitem, &(pitem->key)); + result = rumCompareItemPointers(&btree->items[btree->curitem].iptr, + &(pitem->key)); } if (result == 0) @@ -484,13 +478,16 @@ dataLocateItem(RumBtree btree, RumBtreeStack *stack) */ static bool findInLeafPage(RumBtree btree, Page page, OffsetNumber *offset, - ItemPointerData *iptrOut, Pointer *ptrOut) + ItemPointerData *iptrOut, Pointer *ptrOut) { Pointer ptr = RumDataPageGetData(page); - OffsetNumber i, maxoff, first = FirstOffsetNumber; - ItemPointerData iptr = {{0,0},0}; - int cmp; + OffsetNumber i, + maxoff, + first = FirstOffsetNumber; + RumKey item; + int cmp; + ItemPointerSetMin(&item.iptr); maxoff = RumPageGetOpaque(page)->maxoff; /* @@ -499,17 +496,18 @@ findInLeafPage(RumBtree btree, Page page, OffsetNumber *offset, */ for (i = 0; i < RumDataLeafIndexCount; i++) { - RumDataLeafItemIndex *index = RumPageGetIndexes(page) + i; + RumDataLeafItemIndex *index = RumPageGetIndexes(page) + i; if (index->offsetNumer == InvalidOffsetNumber) break; - cmp = rumCompareItemPointers(&index->iptr, btree->items + btree->curitem); + cmp = rumCompareItemPointers(&index->iptr, + &btree->items[btree->curitem].iptr); if (cmp < 0) { ptr = RumDataPageGetData(page) + index->pageOffset; first = index->offsetNumer; - iptr = index->iptr; + item.iptr = index->iptr; } else { @@ -522,11 +520,12 @@ findInLeafPage(RumBtree btree, Page page, OffsetNumber *offset, for (i = first; i <= maxoff; i++) { *ptrOut = ptr; - *iptrOut = iptr; - ptr = rumDataPageLeafRead(ptr, btree->entryAttnum, &iptr, - NULL, NULL, btree->rumstate); + *iptrOut = item.iptr; + ptr = rumDataPageLeafReadPointer(ptr, btree->entryAttnum, &item, + btree->rumstate); - cmp = rumCompareItemPointers(btree->items + btree->curitem, &iptr); + cmp = rumCompareItemPointers(&btree->items[btree->curitem].iptr, + &item.iptr); if (cmp == 0) { *offset = i; @@ -540,7 +539,7 @@ findInLeafPage(RumBtree btree, Page page, OffsetNumber *offset, } *ptrOut = ptr; - *iptrOut = iptr; + *iptrOut = item.iptr; *offset = RumPageGetOpaque(page)->maxoff + 1; return false; } @@ -552,11 +551,11 @@ findInLeafPage(RumBtree btree, Page page, OffsetNumber *offset, * Returns true if value found on page. */ static bool -dataLocateLeafItem(RumBtree btree, RumBtreeStack *stack) +dataLocateLeafItem(RumBtree btree, RumBtreeStack * stack) { Page page = BufferGetPage(stack->buffer); ItemPointerData iptr; - Pointer ptr; + Pointer ptr; Assert(RumPageIsLeaf(page)); Assert(RumPageIsData(page)); @@ -685,6 +684,7 @@ RumPageDeletePostingItem(Page page, OffsetNumber offset) { char *dstptr = RumDataPageGetItem(page, offset), *sourceptr = RumDataPageGetItem(page, offset + 1); + memmove(dstptr, sourceptr, sizeof(PostingItem) * (maxoff - offset)); /* Adjust pd_lower */ ((PageHeader) page)->pd_lower = sourceptr - page; @@ -707,14 +707,15 @@ dataIsEnoughSpace(RumBtree btree, Buffer buf, OffsetNumber off) if (RumPageIsLeaf(page)) { - int n, j; - ItemPointerData iptr = {{0,0},0}; - Size size = 0; + int n, + j; + ItemPointerData iptr = {{0, 0}, 0}; + Size size = 0; /* * Calculate additional size using worst case assumption: varbyte - * encoding from zero item pointer. Also use worst case assumption about - * alignment. + * encoding from zero item pointer. Also use worst case assumption + * about alignment. */ n = RumPageGetOpaque(page)->maxoff; @@ -723,17 +724,16 @@ dataIsEnoughSpace(RumBtree btree, Buffer buf, OffsetNumber off) for (j = btree->curitem; j < btree->nitem; j++) { size = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, - &btree->items[j], btree->addInfo[j], btree->addInfoIsNull[j], - (j == btree->curitem) ? (&iptr) : &btree->items[j - 1], - btree->rumstate, size); + &btree->items[j], + (j == btree->curitem) ? (&iptr) : &btree->items[j - 1].iptr, + btree->rumstate, size); } } else { j = btree->curitem; size = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, - &btree->items[j], btree->addInfo[j], btree->addInfoIsNull[j], - &iptr, btree->rumstate, size); + &btree->items[j], &iptr, btree->rumstate, size); } size += MAXIMUM_ALIGNOF; @@ -785,23 +785,24 @@ dataPlaceToPage(RumBtree btree, Page page, OffsetNumber off) if (RumPageIsLeaf(page)) { - int i = 0, j, max_j; + int i = 0, + j, + max_j; Pointer ptr = RumDataPageGetData(page), copy_ptr = NULL; - ItemPointerData iptr = {{0,0},0}, copy_iptr; + ItemPointerData iptr = {{0, 0}, 0}; + RumKey copy_item; char pageCopy[BLCKSZ]; - Datum addInfo = 0; - bool addInfoIsNull = false; int maxoff = RumPageGetOpaque(page)->maxoff; int freespace; /* - * We're going to prevent var-byte re-encoding of whole page. - * Find position in page using page indexes. + * We're going to prevent var-byte re-encoding of whole page. Find + * position in page using page indexes. */ findInLeafPage(btree, page, &off, &iptr, &ptr); - freespace = RumDataPageFreeSpacePre(page,ptr); + freespace = RumDataPageFreeSpacePre(page, ptr); Assert(freespace >= 0); if (off <= maxoff) @@ -812,7 +813,7 @@ dataPlaceToPage(RumBtree btree, Page page, OffsetNumber off) */ memcpy(pageCopy, page, BLCKSZ); copy_ptr = pageCopy + (ptr - page); - copy_iptr = iptr; + copy_item.iptr = iptr; } /* Check how many items we're going to add */ @@ -825,23 +826,21 @@ dataPlaceToPage(RumBtree btree, Page page, OffsetNumber off) i = 0; for (j = btree->curitem; j < max_j; j++) { - Pointer ptr2; + Pointer ptr2; ptr2 = page + rumCheckPlaceToDataPageLeaf(btree->entryAttnum, - &btree->items[j], btree->addInfo[j], btree->addInfoIsNull[j], - &iptr, btree->rumstate, ptr - page); + &btree->items[j], &iptr, btree->rumstate, ptr - page); freespace = RumDataPageFreeSpacePre(page, ptr2); if (freespace < 0) break; ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, - &btree->items[j], btree->addInfo[j], btree->addInfoIsNull[j], - &iptr, btree->rumstate); - freespace = RumDataPageFreeSpacePre(page,ptr); + &btree->items[j], &iptr, btree->rumstate); + freespace = RumDataPageFreeSpacePre(page, ptr); Assert(freespace >= 0); - iptr = btree->items[j]; + iptr = btree->items[j].iptr; btree->curitem++; i++; } @@ -852,21 +851,20 @@ dataPlaceToPage(RumBtree btree, Page page, OffsetNumber off) for (j = off; j <= maxoff; j++) { copy_ptr = rumDataPageLeafRead(copy_ptr, btree->entryAttnum, - ©_iptr, &addInfo, &addInfoIsNull, btree->rumstate); - ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, - ©_iptr, addInfo, addInfoIsNull, - &iptr, btree->rumstate); + ©_item, btree->rumstate); + ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, ©_item, + &iptr, btree->rumstate); - freespace = RumDataPageFreeSpacePre(page,ptr); + freespace = RumDataPageFreeSpacePre(page, ptr); Assert(freespace >= 0); - iptr = copy_iptr; + iptr = copy_item.iptr; } } RumPageGetOpaque(page)->maxoff += i; - freespace = RumDataPageFreeSpacePre(page,ptr); + freespace = RumDataPageFreeSpacePre(page, ptr); if (freespace < 0) elog(ERROR, "Not enough of space in leaf page!"); @@ -880,24 +878,22 @@ dataPlaceToPage(RumBtree btree, Page page, OffsetNumber off) } /* Macro for leaf data page split: switch to right page if needed. */ -#define CHECK_SWITCH_TO_RPAGE \ - do { \ - if (ptr - RumDataPageGetData(page) > \ - totalsize / 2 && page == newlPage) \ - { \ - maxLeftIptr = iptr; \ - prevIptr.ip_blkid.bi_hi = 0; \ - prevIptr.ip_blkid.bi_lo = 0; \ - prevIptr.ip_posid = 0; \ +#define CHECK_SWITCH_TO_RPAGE \ + do { \ + if (ptr - RumDataPageGetData(page) > \ + totalsize / 2 && page == newlPage) \ + { \ + maxLeftIptr = curIptr; \ + ItemPointerSetMin(&prevIptr); \ RumPageGetOpaque(newlPage)->maxoff = j; \ - page = rPage; \ - ptr = RumDataPageGetData(rPage); \ - j = FirstOffsetNumber; \ - } \ - else \ - { \ - j++; \ - } \ + page = rPage; \ + ptr = RumDataPageGetData(rPage); \ + j = FirstOffsetNumber; \ + } \ + else \ + { \ + j++; \ + } \ } while (0) @@ -912,17 +908,21 @@ static Page dataSplitPageLeaf(RumBtree btree, Buffer lbuf, Buffer rbuf, Page lPage, Page rPage, OffsetNumber off) { - OffsetNumber i, j, + OffsetNumber i, + j, maxoff; - Size totalsize = 0, prevTotalsize; - Pointer ptr, copyPtr; + Size totalsize = 0, + prevTotalsize; + Pointer ptr, + copyPtr; Page page; Page newlPage = PageGetTempPageCopy(lPage); Size pageSize = PageGetPageSize(newlPage); Size maxItemSize = 0; - Datum addInfo = 0; - bool addInfoIsNull; - ItemPointerData iptr, prevIptr, maxLeftIptr; + ItemPointerData prevIptr, + maxLeftIptr, + curIptr; + RumKey item; int totalCount = 0; int maxItemIndex = btree->curitem; int freespace; @@ -944,35 +944,30 @@ dataSplitPageLeaf(RumBtree btree, Buffer lbuf, Buffer rbuf, /* Calculate the whole size we're going to place */ copyPtr = RumDataPageGetData(lpageCopy); - iptr.ip_blkid.bi_hi = 0; - iptr.ip_blkid.bi_lo = 0; - iptr.ip_posid = 0; + ItemPointerSetMin(&item.iptr); for (i = FirstOffsetNumber; i <= maxoff; i++) { if (i == off) { - prevIptr = iptr; - iptr = btree->items[maxItemIndex]; + prevIptr = item.iptr; + item = btree->items[maxItemIndex]; prevTotalsize = totalsize; totalsize = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, - &iptr, btree->addInfo[maxItemIndex], - btree->addInfoIsNull[maxItemIndex], - &prevIptr, btree->rumstate, totalsize); + &item, &prevIptr, btree->rumstate, totalsize); maxItemIndex++; totalCount++; maxItemSize = Max(maxItemSize, totalsize - prevTotalsize); } - prevIptr = iptr; - copyPtr = rumDataPageLeafRead(copyPtr, btree->entryAttnum, - &iptr, &addInfo, &addInfoIsNull, btree->rumstate); + prevIptr = item.iptr; + copyPtr = rumDataPageLeafRead(copyPtr, btree->entryAttnum, &item, + btree->rumstate); prevTotalsize = totalsize; totalsize = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, - &iptr, addInfo, addInfoIsNull, - &prevIptr, btree->rumstate, totalsize); + &item, &prevIptr, btree->rumstate, totalsize); totalCount++; maxItemSize = Max(maxItemSize, totalsize - prevTotalsize); @@ -980,41 +975,36 @@ dataSplitPageLeaf(RumBtree btree, Buffer lbuf, Buffer rbuf, if (off == maxoff + 1) { - prevIptr = iptr; - iptr = btree->items[maxItemIndex]; + prevIptr = item.iptr; + item = btree->items[maxItemIndex]; if (RumPageRightMost(newlPage)) { - Size newTotalsize; + Size newTotalsize; /* - * Found how many new item pointer we're going to add using - * worst case assumptions about odd placement and alignment. + * Found how many new item pointer we're going to add using worst + * case assumptions about odd placement and alignment. */ while (maxItemIndex < btree->nitem && - (newTotalsize = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, - &iptr, btree->addInfo[maxItemIndex], - btree->addInfoIsNull[maxItemIndex], - &prevIptr, btree->rumstate, totalsize)) < - 2 * RumDataPageSize - 2 * maxItemSize - 2 * MAXIMUM_ALIGNOF - ) + (newTotalsize = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, + &item, &prevIptr, btree->rumstate, totalsize)) < + 2 * RumDataPageSize - 2 * maxItemSize - 2 * MAXIMUM_ALIGNOF) { maxItemIndex++; totalCount++; maxItemSize = Max(maxItemSize, newTotalsize - totalsize); totalsize = newTotalsize; - prevIptr = iptr; + prevIptr = item.iptr; if (maxItemIndex < btree->nitem) - iptr = btree->items[maxItemIndex]; + item = btree->items[maxItemIndex]; } } else { prevTotalsize = totalsize; totalsize = rumCheckPlaceToDataPageLeaf(btree->entryAttnum, - &iptr, btree->addInfo[maxItemIndex], - btree->addInfoIsNull[maxItemIndex], - &prevIptr, btree->rumstate, totalsize); + &item, &prevIptr, btree->rumstate, totalsize); maxItemIndex++; totalCount++; @@ -1029,10 +1019,9 @@ dataSplitPageLeaf(RumBtree btree, Buffer lbuf, Buffer rbuf, ptr = RumDataPageGetData(newlPage); page = newlPage; j = FirstOffsetNumber; - iptr.ip_blkid.bi_hi = 0; - iptr.ip_blkid.bi_lo = 0; - iptr.ip_posid = 0; - prevIptr = iptr; + + ItemPointerSetMin(&item.iptr); + prevIptr = item.iptr; copyPtr = RumDataPageGetData(lpageCopy); for (i = FirstOffsetNumber; i <= maxoff; i++) { @@ -1040,30 +1029,30 @@ dataSplitPageLeaf(RumBtree btree, Buffer lbuf, Buffer rbuf, { while (btree->curitem < maxItemIndex) { + curIptr = btree->items[btree->curitem].iptr; ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, - &btree->items[btree->curitem], - btree->addInfo[btree->curitem], - btree->addInfoIsNull[btree->curitem], - &prevIptr, btree->rumstate); + &btree->items[btree->curitem], + &prevIptr, btree->rumstate); freespace = RumDataPageFreeSpacePre(page, ptr); Assert(freespace >= 0); - prevIptr = btree->items[btree->curitem]; + prevIptr = btree->items[btree->curitem].iptr; btree->curitem++; CHECK_SWITCH_TO_RPAGE; } } - copyPtr = rumDataPageLeafRead(copyPtr, btree->entryAttnum, - &iptr, &addInfo, &addInfoIsNull, btree->rumstate); + copyPtr = rumDataPageLeafRead(copyPtr, btree->entryAttnum, &item, + btree->rumstate); - ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, &iptr, - addInfo, addInfoIsNull, &prevIptr, btree->rumstate); + curIptr = item.iptr; + ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, &item, + &prevIptr, btree->rumstate); freespace = RumDataPageFreeSpacePre(page, ptr); Assert(freespace >= 0); - prevIptr = iptr; + prevIptr = item.iptr; CHECK_SWITCH_TO_RPAGE; } @@ -1072,15 +1061,13 @@ dataSplitPageLeaf(RumBtree btree, Buffer lbuf, Buffer rbuf, { while (btree->curitem < maxItemIndex) { + curIptr = btree->items[btree->curitem].iptr; ptr = rumPlaceToDataPageLeaf(ptr, btree->entryAttnum, - &btree->items[btree->curitem], - btree->addInfo[btree->curitem], - btree->addInfoIsNull[btree->curitem], - &prevIptr, btree->rumstate); + &btree->items[btree->curitem], &prevIptr, btree->rumstate); freespace = RumDataPageFreeSpacePre(page, ptr); Assert(freespace >= 0); - prevIptr = btree->items[btree->curitem]; + prevIptr = btree->items[btree->curitem].iptr; btree->curitem++; CHECK_SWITCH_TO_RPAGE; @@ -1141,7 +1128,7 @@ dataSplitPageInternal(RumBtree btree, Buffer lbuf, Buffer rbuf, maxoff * sizeof(ItemPointerData) < 2 * (freeSpace - sizeof(ItemPointerData))) { memcpy(vector + maxoff * sizeof(ItemPointerData), - btree->items + btree->curitem, + &btree->items[btree->curitem].iptr, sizeof(ItemPointerData)); maxoff++; nCopied++; @@ -1155,7 +1142,7 @@ dataSplitPageInternal(RumBtree btree, Buffer lbuf, Buffer rbuf, memmove(ptr + sizeofitem, ptr, (maxoff - off + 1) * sizeofitem); if (RumPageIsLeaf(newlPage)) { - memcpy(ptr, btree->items + btree->curitem, sizeofitem); + memcpy(ptr, &btree->items[btree->curitem].iptr, sizeofitem); btree->curitem++; } else @@ -1181,7 +1168,7 @@ dataSplitPageInternal(RumBtree btree, Buffer lbuf, Buffer rbuf, RumPageGetOpaque(newlPage)->maxoff = separator; /* Adjust pd_lower */ ((PageHeader) newlPage)->pd_lower = (ptr + separator * sizeofitem + 1) - - newlPage; + newlPage; ptr = RumDataPageGetItem(rPage, FirstOffsetNumber); memcpy(ptr, vector + separator * sizeofitem, @@ -1189,16 +1176,16 @@ dataSplitPageInternal(RumBtree btree, Buffer lbuf, Buffer rbuf, RumPageGetOpaque(rPage)->maxoff = maxoff - separator; /* Adjust pd_lower */ ((PageHeader) rPage)->pd_lower = (ptr + - (maxoff - separator) * sizeofitem + 1) - - rPage; + (maxoff - separator) * sizeofitem + 1) - + rPage; PostingItemSetBlockNumber(&(btree->pitem), BufferGetBlockNumber(lbuf)); if (RumPageIsLeaf(newlPage)) btree->pitem.key = *(ItemPointerData *) RumDataPageGetItem(newlPage, - RumPageGetOpaque(newlPage)->maxoff); + RumPageGetOpaque(newlPage)->maxoff); else btree->pitem.key = ((PostingItem *) RumDataPageGetItem(newlPage, - RumPageGetOpaque(newlPage)->maxoff))->key; + RumPageGetOpaque(newlPage)->maxoff))->key; btree->rightblkno = BufferGetBlockNumber(rbuf); /* set up right bound for left page */ @@ -1232,31 +1219,31 @@ dataSplitPage(RumBtree btree, Buffer lbuf, Buffer rbuf, * page. */ ItemPointerData -updateItemIndexes(Page page, OffsetNumber attnum, RumState *rumstate) +updateItemIndexes(Page page, OffsetNumber attnum, RumState * rumstate) { - Pointer ptr; - ItemPointerData iptr; - int j = 0, maxoff, i; + Pointer ptr; + RumKey item; + int j = 0, + maxoff, + i; /* Iterate over page */ maxoff = RumPageGetOpaque(page)->maxoff; ptr = RumDataPageGetData(page); - iptr.ip_blkid.bi_lo = 0; - iptr.ip_blkid.bi_hi = 0; - iptr.ip_posid = 0; + ItemPointerSetMin(&item.iptr); for (i = FirstOffsetNumber; i <= maxoff; i++) { /* Place next page index entry if it's time to */ if (i * (RumDataLeafIndexCount + 1) > (j + 1) * maxoff) { - RumPageGetIndexes(page)[j].iptr = iptr; + RumPageGetIndexes(page)[j].iptr = item.iptr; RumPageGetIndexes(page)[j].offsetNumer = i; RumPageGetIndexes(page)[j].pageOffset = ptr - RumDataPageGetData(page); j++; } - ptr = rumDataPageLeafRead(ptr, attnum, &iptr, NULL, NULL, rumstate); + ptr = rumDataPageLeafReadPointer(ptr, attnum, &item, rumstate); } /* Fill rest of page indexes with InvalidOffsetNumber if any */ for (; j < RumDataLeafIndexCount; j++) @@ -1267,43 +1254,48 @@ updateItemIndexes(Page page, OffsetNumber attnum, RumState *rumstate) RumPageGetOpaque(page)->freespace = RumDataPageFreeSpacePre(page, ptr); /* Adjust pd_lower and pd_upper */ ((PageHeader) page)->pd_lower = ptr - page; - ((PageHeader) page)->pd_upper = ((char*)RumPageGetIndexes(page)) - page; + ((PageHeader) page)->pd_upper = ((char *) RumPageGetIndexes(page)) - page; - return iptr; + Assert(ptr <= (char *) RumPageGetIndexes(page)); + Assert(((PageHeader) page)->pd_upper >= ((PageHeader) page)->pd_lower); + Assert(((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower == + RumPageGetOpaque(page)->freespace); + + return item.iptr; } void -checkLeafDataPage(RumState *rumstate, AttrNumber attnum, Page page) +checkLeafDataPage(RumState * rumstate, AttrNumber attnum, Page page) { - Offset maxoff, i; - char *ptr; - ItemPointerData iptr; - RumDataLeafItemIndex *index, *previndex = NULL; + Offset maxoff, + i; + char *ptr; + RumKey item; + RumDataLeafItemIndex *index, + *previndex = NULL; if (!(RumPageGetOpaque(page)->flags & RUM_DATA)) return; maxoff = RumPageGetOpaque(page)->maxoff; ptr = RumDataPageGetData(page); - iptr.ip_blkid.bi_lo = 0; - iptr.ip_blkid.bi_hi = 0; - iptr.ip_posid = 0; + ItemPointerSetMin(&item.iptr); Assert(RumPageGetOpaque(page)->flags & RUM_LEAF); - for(i = FirstOffsetNumber; i <= maxoff; i++) - ptr = rumDataPageLeafRead(ptr, attnum, &iptr, NULL, NULL, rumstate); + for (i = FirstOffsetNumber; i <= maxoff; i++) + ptr = rumDataPageLeafReadPointer(ptr, attnum, &item, rumstate); - Assert((char*)RumPageGetIndexes(page) == page + ((PageHeader)page)->pd_upper); + Assert((char *) RumPageGetIndexes(page) == page + ((PageHeader) page)->pd_upper); - for(i = 0; i offsetNumer == InvalidOffsetNumber) break; - Assert(index->pageOffset < ((PageHeader)page)->pd_lower); + Assert(index->pageOffset < ((PageHeader) page)->pd_lower); if (previndex) { @@ -1314,9 +1306,9 @@ checkLeafDataPage(RumState *rumstate, AttrNumber attnum, Page page) if (i != RumDataLeafIndexCount - 1) { - iptr = index->iptr; - rumDataPageLeafRead(RumDataPageGetData(page) + index->pageOffset, - attnum, &iptr, NULL, NULL, rumstate); + item.iptr = index->iptr; + rumDataPageLeafReadPointer(RumDataPageGetData(page) + index->pageOffset, + attnum, &item, rumstate); } } } @@ -1342,7 +1334,7 @@ rumDataFillRoot(RumBtree btree, Buffer root, Buffer lbuf, Buffer rbuf, } void -rumPrepareDataScan(RumBtree btree, Relation index, OffsetNumber attnum, RumState *rumstate) +rumPrepareDataScan(RumBtree btree, Relation index, OffsetNumber attnum, RumState * rumstate) { memset(btree, 0, sizeof(RumBtreeData)); @@ -1370,7 +1362,7 @@ rumPrepareDataScan(RumBtree btree, Relation index, OffsetNumber attnum, RumState RumPostingTreeScan * rumPrepareScanPostingTree(Relation index, BlockNumber rootBlkno, - bool searchMode, OffsetNumber attnum, RumState *rumstate) + bool searchMode, OffsetNumber attnum, RumState * rumstate) { RumPostingTreeScan *gdi = (RumPostingTreeScan *) palloc0(sizeof(RumPostingTreeScan)); @@ -1388,22 +1380,15 @@ rumPrepareScanPostingTree(Relation index, BlockNumber rootBlkno, * Inserts array of item pointers, may execute several tree scan (very rare) */ void -rumInsertItemPointers(RumState *rumstate, +rumInsertItemPointers(RumState * rumstate, OffsetNumber attnum, - RumPostingTreeScan *gdi, - ItemPointerData *items, - Datum *addInfo, - bool *addInfoIsNull, - uint32 nitem, + RumPostingTreeScan * gdi, + RumKey * items, uint32 nitem, GinStatsData *buildStats) { BlockNumber rootBlkno = gdi->stack->blkno; - gdi->btree.items = items; - gdi->btree.addInfo = addInfo; - gdi->btree.addInfoIsNull = addInfoIsNull; - gdi->btree.nitem = nitem; gdi->btree.curitem = 0; @@ -1431,7 +1416,7 @@ rumInsertItemPointers(RumState *rumstate, } Buffer -rumScanBeginPostingTree(RumPostingTreeScan *gdi) +rumScanBeginPostingTree(RumPostingTreeScan * gdi) { gdi->stack = rumFindLeafPage(&gdi->btree, gdi->stack); return gdi->stack->buffer; diff --git a/rumentrypage.c b/rumentrypage.c index 51fb16d34f..c8b3916e8f 100644 --- a/rumentrypage.c +++ b/rumentrypage.c @@ -20,31 +20,40 @@ * Information is stored in the same manner as in leaf data pages. */ void -rumReadTuple(RumState *rumstate, OffsetNumber attnum, - IndexTuple itup, ItemPointerData *ipd, Datum *addInfo, bool *addInfoIsNull) +rumReadTuple(RumState * rumstate, OffsetNumber attnum, + IndexTuple itup, RumKey * items) { - Pointer ptr; - int nipd = RumGetNPosting(itup), i; - ItemPointerData ip = {{0,0},0}; + Pointer ptr = RumGetPosting(itup); + RumKey item; + int nipd = RumGetNPosting(itup), + i; - ptr = RumGetPosting(itup); - - if (addInfo && addInfoIsNull) + ItemPointerSetMin(&item.iptr); + for (i = 0; i < nipd; i++) { - for (i = 0; i < nipd; i++) - { - ptr = rumDataPageLeafRead(ptr, attnum, &ip, &addInfo[i], - &addInfoIsNull[i], rumstate); - ipd[i] = ip; - } + ptr = rumDataPageLeafRead(ptr, attnum, &item, rumstate); + items[i] = item; } - else +} + +/* + * Read only item pointers from leaf data page. + * Information is stored in the same manner as in leaf data pages. + */ +void +rumReadTuplePointers(RumState * rumstate, OffsetNumber attnum, + IndexTuple itup, ItemPointerData *ipd) +{ + Pointer ptr = RumGetPosting(itup); + int nipd = RumGetNPosting(itup), + i; + RumKey item; + + ItemPointerSetMin(&item.iptr); + for (i = 0; i < nipd; i++) { - for (i = 0; i < nipd; i++) - { - ptr = rumDataPageLeafRead(ptr, attnum, &ip, NULL, NULL, rumstate); - ipd[i] = ip; - } + ptr = rumDataPageLeafReadPointer(ptr, attnum, &item, rumstate); + ipd[i] = item.iptr; } } @@ -125,7 +134,7 @@ entryIsMoveRight(RumBtree btree, Page page) * page correctly chosen and searching value SHOULD be on page */ static BlockNumber -entryLocateEntry(RumBtree btree, RumBtreeStack *stack) +entryLocateEntry(RumBtree btree, RumBtreeStack * stack) { OffsetNumber low, high, @@ -201,7 +210,7 @@ entryLocateEntry(RumBtree btree, RumBtreeStack *stack) * Returns true if value found on page. */ static bool -entryLocateLeafEntry(RumBtree btree, RumBtreeStack *stack) +entryLocateLeafEntry(RumBtree btree, RumBtreeStack * stack) { Page page = BufferGetPage(stack->buffer); OffsetNumber low, @@ -523,7 +532,7 @@ rumEntryFillRoot(RumBtree btree, Buffer root, Buffer lbuf, Buffer rbuf, void rumPrepareEntryScan(RumBtree btree, OffsetNumber attnum, Datum key, RumNullCategory category, - RumState *rumstate) + RumState * rumstate) { memset(btree, 0, sizeof(RumBtreeData)); diff --git a/rumfast.c b/rumfast.c index e05d268d10..1e3c840e14 100644 --- a/rumfast.c +++ b/rumfast.c @@ -30,12 +30,13 @@ typedef struct KeyArray { - Datum *keys; /* expansible array of keys */ - Datum *addInfo; /* expansible array of additional information */ - bool *addInfoIsNull; /* expansible array of NULL flag of additional information */ - RumNullCategory *categories; /* another expansible array */ - int32 nvalues; /* current number of valid entries */ - int32 maxvalues; /* allocated size of arrays */ + Datum *keys; /* expansible array of keys */ + Datum *addInfo; /* expansible array of additional information */ + bool *addInfoIsNull; /* expansible array of NULL flag of + * additional information */ + RumNullCategory *categories; /* another expansible array */ + int32 nvalues; /* current number of valid entries */ + int32 maxvalues; /* allocated size of arrays */ } KeyArray; @@ -51,9 +52,9 @@ writeListPage(Relation index, Buffer buffer, Page page; int32 i, freesize; - OffsetNumber l, - off; - GenericXLogState *state; + OffsetNumber l, + off; + GenericXLogState *state; state = GenericXLogStart(index); @@ -102,7 +103,7 @@ writeListPage(Relation index, Buffer buffer, static void makeSublist(Relation index, IndexTuple *tuples, int32 ntuples, - RumMetaPageData *res) + RumMetaPageData * res) { Buffer curBuffer = InvalidBuffer; Buffer prevBuffer = InvalidBuffer; @@ -175,7 +176,7 @@ makeSublist(Relation index, IndexTuple *tuples, int32 ntuples, * preserving order */ void -rumHeapTupleFastInsert(RumState *rumstate, RumTupleCollector *collector) +rumHeapTupleFastInsert(RumState * rumstate, RumTupleCollector * collector) { Relation index = rumstate->index; Buffer metabuffer; @@ -338,10 +339,10 @@ rumHeapTupleFastInsert(RumState *rumstate, RumTupleCollector *collector) } static IndexTuple -RumFastFormTuple(RumState *rumstate, - OffsetNumber attnum, Datum key, RumNullCategory category, - Datum addInfo, - bool addInfoIsNull) +RumFastFormTuple(RumState * rumstate, + OffsetNumber attnum, Datum key, RumNullCategory category, + Datum addInfo, + bool addInfoIsNull) { Datum datums[3]; bool isnull[3]; @@ -392,11 +393,11 @@ RumFastFormTuple(RumState *rumstate, { ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", - (unsigned long) newsize, - (unsigned long) Min(INDEX_SIZE_MASK, - RumMaxItemSize), - RelationGetRelationName(rumstate->index)))); + errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", + (unsigned long) newsize, + (unsigned long) Min(INDEX_SIZE_MASK, + RumMaxItemSize), + RelationGetRelationName(rumstate->index)))); pfree(itup); return NULL; } @@ -435,8 +436,8 @@ RumFastFormTuple(RumState *rumstate, * rumHeapTupleFastInsert. */ void -rumHeapTupleFastCollect(RumState *rumstate, - RumTupleCollector *collector, +rumHeapTupleFastCollect(RumState * rumstate, + RumTupleCollector * collector, OffsetNumber attnum, Datum value, bool isNull, ItemPointer ht_ctid) { @@ -451,7 +452,7 @@ rumHeapTupleFastCollect(RumState *rumstate, * Extract the key values that need to be inserted in the index */ entries = rumExtractEntries(rumstate, attnum, value, isNull, - &nentries, &categories, &addInfo, &addInfoIsNull); + &nentries, &categories, &addInfo, &addInfoIsNull); /* * Allocate/reallocate memory for storing collected tuples @@ -497,10 +498,10 @@ static bool shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, IndexBulkDeleteResult *stats) { - Page metapage; - RumMetaPageData *metadata; - BlockNumber blknoToDelete; - GenericXLogState *metastate; + Page metapage; + RumMetaPageData *metadata; + BlockNumber blknoToDelete; + GenericXLogState *metastate; metastate = GenericXLogStart(index); metapage = GenericXLogRegisterBuffer(metastate, metabuffer, @@ -515,7 +516,7 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, int64 nDeletedHeapTuples = 0; rumxlogDeleteListPages data; Buffer buffers[RUM_NDELETE_AT_ONCE]; - GenericXLogState *state; + GenericXLogState *state; data.ndeleted = 0; while (data.ndeleted < RUM_NDELETE_AT_ONCE && blknoToDelete != newHead) @@ -647,7 +648,8 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, { IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i)); OffsetNumber curattnum; - Datum curkey, addInfo = 0; + Datum curkey, + addInfo = 0; bool addInfoIsNull = true; RumNullCategory curcategory; @@ -657,6 +659,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, if (OidIsValid(accum->rumstate->addInfoTypeOid[curattnum - 1])) { Form_pg_attribute attr = accum->rumstate->addAttrs[curattnum - 1]; + if (accum->rumstate->oneCol) addInfo = index_getattr(itup, 2, accum->rumstate->tupdesc[curattnum - 1], &addInfoIsNull); @@ -693,7 +696,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, /* Dump out all remaining keys */ rumInsertBAEntries(accum, &heapptr, attrnum, - ka->keys, ka->addInfo, ka->addInfoIsNull, ka->categories, ka->nvalues); + ka->keys, ka->addInfo, ka->addInfoIsNull, ka->categories, ka->nvalues); } /* @@ -717,7 +720,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, * If stats isn't null, we count deleted pending pages into the counts. */ void -rumInsertCleanup(RumState *rumstate, +rumInsertCleanup(RumState * rumstate, bool vac_delay, IndexBulkDeleteResult *stats) { Relation index = rumstate->index; @@ -803,7 +806,7 @@ rumInsertCleanup(RumState *rumstate, (RumPageHasFullRow(page) && (accum.allocatedMemory >= maintenance_work_mem * 1024L))) { - RumEntryAccumulatorItem *list; + RumKey *items; uint32 nlist; Datum key; RumNullCategory category; @@ -824,22 +827,11 @@ rumInsertCleanup(RumState *rumstate, * list. */ rumBeginBAScan(&accum); - while ((list = rumGetBAEntry(&accum, + while ((items = rumGetBAEntry(&accum, &attnum, &key, &category, &nlist)) != NULL) { - ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(ItemPointerData) *nlist); - Datum *addInfo = (Datum *)palloc(sizeof(Datum) * nlist); - bool *addInfoIsNull = (bool *)palloc(sizeof(bool) * nlist); - int i; - - for (i = 0; i < nlist; i++) - { - iptrs[i] = list[i].iptr; - addInfo[i] = list[i].addInfo; - addInfoIsNull[i] = list[i].addInfoIsNull; - } rumEntryInsert(rumstate, attnum, key, category, - iptrs, addInfo, addInfoIsNull, nlist, NULL); + items, nlist, NULL); vacuum_delay_point(); } @@ -871,23 +863,11 @@ rumInsertCleanup(RumState *rumstate, processPendingPage(&accum, &datums, page, maxoff + 1); rumBeginBAScan(&accum); - while ((list = rumGetBAEntry(&accum, + while ((items = rumGetBAEntry(&accum, &attnum, &key, &category, &nlist)) != NULL) { - ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(ItemPointerData) *nlist); - Datum *addInfo = (Datum *)palloc(sizeof(Datum) * nlist); - bool *addInfoIsNull = (bool *)palloc(sizeof(bool) * nlist); - int i; - - for (i = 0; i < nlist; i++) - { - iptrs[i] = list[i].iptr; - addInfo[i] = list[i].addInfo; - addInfoIsNull[i] = list[i].addInfoIsNull; - } - rumEntryInsert(rumstate, attnum, key, category, - iptrs, addInfo, addInfoIsNull, nlist, NULL); + items, nlist, NULL); } } @@ -899,8 +879,8 @@ rumInsertCleanup(RumState *rumstate, * locking */ /* - * remove read pages from pending list, at this point all - * content of read pages is in regular structure + * remove read pages from pending list, at this point all content + * of read pages is in regular structure */ if (shiftList(index, metabuffer, blkno, stats)) { diff --git a/rumget.c b/rumget.c index 923dc799ca..83da38a830 100644 --- a/rumget.c +++ b/rumget.c @@ -34,20 +34,22 @@ typedef struct pendingPosition bool *hasMatchKey; } pendingPosition; -static bool scanPage(RumState *rumstate, RumScanEntry entry, ItemPointer item, - Page page, bool equalOk); +static bool scanPage(RumState * rumstate, RumScanEntry entry, ItemPointer item, + Page page, bool equalOk); static void insertScanItem(RumScanOpaque so, bool recheck); -static int scan_entry_cmp(const void *p1, const void *p2); -static void entryGetItem(RumState *rumstate, RumScanEntry entry); +static int scan_entry_cmp(const void *p1, const void *p2); +static int rum_key_cmp_with_check(const void *p1, const void *p2, void *arg); +static void entryGetItem(RumState * rumstate, RumScanEntry entry); /* * Convenience function for invoking a key's consistentFn */ static bool -callConsistentFn(RumState *rumstate, RumScanKey key) +callConsistentFn(RumState * rumstate, RumScanKey key) { - bool res; + bool res; + /* * If we're dealing with a dummy EVERYTHING key, we don't want to call the * consistentFn; just claim it matches. @@ -55,47 +57,53 @@ callConsistentFn(RumState *rumstate, RumScanKey key) if (key->searchMode == GIN_SEARCH_MODE_EVERYTHING) { key->recheckCurItem = false; - return true; + res = true; } + else + { + /* + * Initialize recheckCurItem in case the consistentFn doesn't know it + * should set it. The safe assumption in that case is to force + * recheck. + */ + key->recheckCurItem = true; - /* - * Initialize recheckCurItem in case the consistentFn doesn't know it - * should set it. The safe assumption in that case is to force recheck. - */ - key->recheckCurItem = true; - - res = DatumGetBool(FunctionCall10Coll(&rumstate->consistentFn[key->attnum - 1], + res = DatumGetBool(FunctionCall10Coll(&rumstate->consistentFn[key->attnum - 1], rumstate->supportCollation[key->attnum - 1], - PointerGetDatum(key->entryRes), - UInt16GetDatum(key->strategy), - key->query, - UInt32GetDatum(key->nuserentries), - PointerGetDatum(key->extra_data), + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), PointerGetDatum(&key->recheckCurItem), - PointerGetDatum(key->queryValues), - PointerGetDatum(key->queryCategories), - PointerGetDatum(key->addInfo), + PointerGetDatum(key->queryValues), + PointerGetDatum(key->queryCategories), + PointerGetDatum(key->addInfo), PointerGetDatum(key->addInfoIsNull) - )); + )); + } if (res && key->attnum == rumstate->attrnAddToColumn) { - int i; + uint32 i; - /* remember some addinfo value for later ordering by addinfo - from another column */ + /* + * remember some addinfo value for later ordering by addinfo from + * another column + */ key->outerAddInfoIsNull = true; - for(i=0; inuserentries; i++) + for (i = 0; i < key->nentries; i++) { if (key->entryRes[i] && key->addInfoIsNull[0] == false) { key->outerAddInfoIsNull = false; + /* - * XXX FIXME only pass-by-value!!! - * Value should be copied to long-lived memory context and, - * somehow, freeed. Seems, the last is real problem + * XXX FIXME only pass-by-value!!! Value should be copied to + * long-lived memory context and, somehow, freeed. Seems, the + * last is real problem */ key->outerAddInfo = key->addInfo[0]; break; @@ -111,26 +119,29 @@ callConsistentFn(RumState *rumstate, RumScanKey key) */ static bool findItemInPostingPage(Page page, ItemPointer item, OffsetNumber *off, - OffsetNumber attnum, RumState *rumstate) + OffsetNumber attnum, RumState * rumstate) { OffsetNumber maxoff = RumPageGetOpaque(page)->maxoff; int res; Pointer ptr; - ItemPointerData iptr = {{0, 0}, 0}; + RumKey iter_item; + + ItemPointerSetMin(&iter_item.iptr); if (RumPageGetOpaque(page)->flags & RUM_DELETED) /* page was deleted by concurrent vacuum */ return false; ptr = RumDataPageGetData(page); + /* * scan page to find equal or first greater value */ for (*off = FirstOffsetNumber; *off <= maxoff; (*off)++) { - ptr = rumDataPageLeafRead(ptr, attnum, &iptr, NULL, NULL, rumstate); + ptr = rumDataPageLeafReadPointer(ptr, attnum, &iter_item, rumstate); - res = rumCompareItemPointers(item, &iptr); + res = rumCompareItemPointers(item, &iter_item.iptr); if (res <= 0) return true; } @@ -142,7 +153,7 @@ findItemInPostingPage(Page page, ItemPointer item, OffsetNumber *off, * Goes to the next page if current offset is outside of bounds */ static bool -moveRightIfItNeeded(RumBtreeData *btree, RumBtreeStack *stack) +moveRightIfItNeeded(RumBtreeData * btree, RumBtreeStack * stack) { Page page = BufferGetPage(stack->buffer); @@ -168,7 +179,7 @@ moveRightIfItNeeded(RumBtreeData *btree, RumBtreeStack *stack) */ static void scanPostingTree(Relation index, RumScanEntry scanEntry, - BlockNumber rootPostingTree, OffsetNumber attnum, RumState *rumstate) + BlockNumber rootPostingTree, OffsetNumber attnum, RumState * rumstate) { RumPostingTreeScan *gdi; Buffer buffer; @@ -188,7 +199,8 @@ scanPostingTree(Relation index, RumScanEntry scanEntry, */ for (;;) { - OffsetNumber maxoff, i; + OffsetNumber maxoff, + i; page = BufferGetPage(buffer); maxoff = RumPageGetOpaque(page)->maxoff; @@ -196,15 +208,16 @@ scanPostingTree(Relation index, RumScanEntry scanEntry, if ((RumPageGetOpaque(page)->flags & RUM_DELETED) == 0 && maxoff >= FirstOffsetNumber) { - ItemPointerData iptr = {{0, 0}, 0}; - Pointer ptr; + RumKey item; + Pointer ptr; + + ItemPointerSetMin(&item.iptr); ptr = RumDataPageGetData(page); for (i = FirstOffsetNumber; i <= maxoff; i++) { - ptr = rumDataPageLeafRead(ptr, attnum, &iptr, NULL, NULL, - rumstate); - tbm_add_tuples(scanEntry->matchBitmap, &iptr, 1, false); + ptr = rumDataPageLeafReadPointer(ptr, attnum, &item, rumstate); + tbm_add_tuples(scanEntry->matchBitmap, &item.iptr, 1, false); } scanEntry->predictNumberResult += maxoff; @@ -233,7 +246,7 @@ scanPostingTree(Relation index, RumScanEntry scanEntry, * Returns true if done, false if it's necessary to restart scan from scratch */ static bool -collectMatchBitmap(RumBtreeData *btree, RumBtreeStack *stack, +collectMatchBitmap(RumBtreeData * btree, RumBtreeStack * stack, RumScanEntry scanEntry) { OffsetNumber attnum; @@ -392,10 +405,10 @@ collectMatchBitmap(RumBtreeData *btree, RumBtreeStack *stack, } else { - ItemPointerData *ipd = (ItemPointerData *)palloc( - sizeof(ItemPointerData) * RumGetNPosting(itup)); - rumReadTuple(btree->rumstate, scanEntry->attnum, - itup, ipd, NULL, NULL); + ItemPointerData *ipd = (ItemPointerData *) palloc( + sizeof(ItemPointerData) * RumGetNPosting(itup)); + + rumReadTuplePointers(btree->rumstate, scanEntry->attnum, itup, ipd); tbm_add_tuples(scanEntry->matchBitmap, ipd, RumGetNPosting(itup), false); @@ -410,11 +423,254 @@ collectMatchBitmap(RumBtreeData *btree, RumBtreeStack *stack, } } +/* + * Sort array of RumKey and remove duplicates. + * + * Returns new size of the array. + */ +static uint32 +sortAndUniqRumKeys(RumKey * list, uint32 nlist) +{ + uint32 i, + j; + bool haveDups = false; + + if (nlist < 2) + return nlist; + + qsort_arg(list, nlist, sizeof(RumKey), rum_key_cmp_with_check, + (void *) &haveDups); + + /* There are duplicates, remove them */ + if (haveDups) + { + j = 1; + for (i = 1; i < nlist; i++) + { + if (rumCompareItemPointers(&list[i - 1].iptr, &list[i].iptr) != 0) + { + list[j] = list[i]; + j++; + } + } + return j; + } + else + return nlist; +} + +static void +collectMatchRumKey(RumBtreeData * btree, RumBtreeStack * stack, + RumScanEntry entry) +{ + OffsetNumber attnum; + + /* Null query cannot partial-match anything */ + if (entry->isPartialMatch && + entry->queryCategory != RUM_CAT_NORM_KEY) + return; + + /* Locate tupdesc entry for key column (for attbyval/attlen data) */ + attnum = entry->attnum; + + for (;;) + { + Page page; + IndexTuple itup; + Datum idatum; + RumNullCategory icategory; + + /* + * stack->off points to the interested entry, buffer is already locked + */ + if (moveRightIfItNeeded(btree, stack) == false) + return; + + page = BufferGetPage(stack->buffer); + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stack->off)); + + /* + * If tuple stores another attribute then stop scan + */ + if (rumtuple_get_attrnum(btree->rumstate, itup) != attnum) + return; + + /* Safe to fetch attribute value */ + idatum = rumtuple_get_key(btree->rumstate, itup, &icategory); + + /* + * Check for appropriate scan stop conditions + */ + if (entry->isPartialMatch) + { + int32 cmp; + + /* + * In partial match, stop scan at any null (including + * placeholders); partial matches never match nulls + */ + if (icategory != RUM_CAT_NORM_KEY) + return; + + /*---------- + * Check of partial match. + * case cmp == 0 => match + * case cmp > 0 => not match and finish scan + * case cmp < 0 => not match and continue scan + *---------- + */ + cmp = DatumGetInt32(FunctionCall4Coll(&btree->rumstate->comparePartialFn[attnum - 1], + btree->rumstate->supportCollation[attnum - 1], + entry->queryKey, + idatum, + UInt16GetDatum(entry->strategy), + PointerGetDatum(entry->extra_data))); + + if (cmp > 0) + return; + else if (cmp < 0) + { + stack->off++; + continue; + } + } + else if (entry->searchMode == GIN_SEARCH_MODE_ALL) + { + /* + * In ALL mode, we are not interested in null items, so we can + * stop if we get to a null-item placeholder (which will be the + * last entry for a given attnum). We do want to include NULL_KEY + * and EMPTY_ITEM entries, though. + */ + if (icategory == RUM_CAT_NULL_ITEM) + return; + } + + if (RumIsPostingTree(itup)) + { + BlockNumber rootPostingTree = RumGetPostingTree(itup); + RumPostingTreeScan *gdi; + Page page; + OffsetNumber maxoff, + i, + j; + Pointer ptr; + RumKey item; + + ItemPointerSetMin(&item.iptr); + + /* + * We should unlock entry page before touching posting tree to + * prevent deadlocks with vacuum processes. Because entry is never + * deleted from page and posting tree is never reduced to the + * posting list, we can unlock page after getting BlockNumber of + * root of posting tree. + */ + LockBuffer(stack->buffer, RUM_UNLOCK); + gdi = rumPrepareScanPostingTree(btree->rumstate->index, + rootPostingTree, TRUE, + entry->attnum, btree->rumstate); + + /* + * We lock again the entry page and while it was unlocked insert + * might have occurred, so we need to re-find our position. + */ + LockBuffer(stack->buffer, RUM_SHARE); + page = BufferGetPage(stack->buffer); + if (!RumPageIsLeaf(page)) + { + /* + * Root page becomes non-leaf while we unlock it. We will + * start again, this situation doesn't occur often - root can + * became a non-leaf only once per life of index. + */ + return; + } + + entry->buffer = rumScanBeginPostingTree(gdi); + entry->gdi = gdi; + entry->context = AllocSetContextCreate(CurrentMemoryContext, + "GiST temporary context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + + /* + * We keep buffer pinned because we need to prevent deletion of + * page during scan. See RUM's vacuum implementation. RefCount is + * increased to keep buffer pinned after freeRumBtreeStack() call. + */ + page = BufferGetPage(entry->buffer); + entry->predictNumberResult = gdi->stack->predictNumber * RumPageGetOpaque(page)->maxoff; + + /* + * Keep page content in memory to prevent durable page locking + */ + maxoff = RumPageGetOpaque(page)->maxoff; + j = entry->nlist; + entry->nlist += maxoff; + if (entry->nalloc == 0) + { + entry->nalloc = Max(maxoff, 32); + entry->list = (RumKey *) palloc(entry->nalloc * sizeof(RumKey)); + } + else if (entry->nlist > entry->nalloc) + { + entry->nalloc *= 2; + entry->list = (RumKey *) + repalloc(entry->list, entry->nalloc * sizeof(RumKey)); + } + + ptr = RumDataPageGetData(page); + + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) + { + ptr = rumDataPageLeafRead(ptr, entry->attnum, &item, + btree->rumstate); + entry->list[i - FirstOffsetNumber + j] = item; + } + + LockBuffer(entry->buffer, RUM_UNLOCK); + entry->isFinished = FALSE; + } + else if (RumGetNPosting(itup) > 0) + { + uint32 off, + count; + + count = RumGetNPosting(itup); + + off = entry->nlist; + entry->nlist += count; + entry->predictNumberResult += count; + if (entry->nalloc == 0) + { + entry->nalloc = Max(count, 32); + entry->list = (RumKey *) palloc(entry->nalloc * sizeof(RumKey)); + } + else if (entry->nlist > entry->nalloc) + { + entry->nalloc *= 2; + entry->list = (RumKey *) + repalloc(entry->list, entry->nalloc * sizeof(RumKey)); + } + + rumReadTuple(btree->rumstate, entry->attnum, itup, entry->list + off); + entry->isFinished = FALSE; + } + + /* + * Done with this entry, go to the next + */ + stack->off++; + } +} + /* * Start* functions setup beginning state of searches: finds correct buffer and pins it. */ static void -startScanEntry(RumState *rumstate, RumScanEntry entry) +startScanEntry(RumState * rumstate, RumScanEntry entry) { RumBtreeData btreeEntry; RumBtreeStack *stackEntry; @@ -423,13 +679,9 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) restartScanEntry: entry->buffer = InvalidBuffer; - ItemPointerSetMin(&entry->curItem); - entry->curAddInfo = (Datum) 0; - entry->curAddInfoIsNull = true; + RumItemSetMin(&entry->curItem); entry->offset = InvalidOffsetNumber; entry->list = NULL; - entry->addInfo = NULL; - entry->addInfoIsNull = NULL; entry->gdi = NULL; entry->nlist = 0; entry->matchBitmap = NULL; @@ -452,7 +704,8 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) entry->isFinished = TRUE; if (entry->isPartialMatch || - entry->queryCategory == RUM_CAT_EMPTY_QUERY) + (entry->queryCategory == RUM_CAT_EMPTY_QUERY && + entry->searchMode != GIN_SEARCH_MODE_EVERYTHING)) { /* * btreeEntry.findItem locates the first item >= given search key. @@ -488,6 +741,13 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) entry->isFinished = FALSE; } } + else if (entry->queryCategory == RUM_CAT_EMPTY_QUERY && + entry->searchMode == GIN_SEARCH_MODE_EVERYTHING) + { + btreeEntry.findItem(&btreeEntry, stackEntry); + collectMatchRumKey(&btreeEntry, stackEntry, entry); + entry->nlist = sortAndUniqRumKeys(entry->list, entry->nlist); + } else if (btreeEntry.findItem(&btreeEntry, stackEntry)) { IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stackEntry->off)); @@ -497,9 +757,12 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) BlockNumber rootPostingTree = RumGetPostingTree(itup); RumPostingTreeScan *gdi; Page page; - OffsetNumber maxoff, i; - Pointer ptr; - ItemPointerData iptr = {{0,0},0}; + OffsetNumber maxoff, + i; + Pointer ptr; + RumKey item; + + ItemPointerSetMin(&item.iptr); /* * We should unlock entry page before touching posting tree to @@ -515,10 +778,10 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) entry->buffer = rumScanBeginPostingTree(gdi); entry->gdi = gdi; entry->context = AllocSetContextCreate(CurrentMemoryContext, - "GiST temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "GiST temporary context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); /* * We keep buffer pinned because we need to prevent deletion of @@ -531,9 +794,7 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) /* * Keep page content in memory to prevent durable page locking */ - entry->list = (ItemPointerData *) palloc(BLCKSZ * sizeof(ItemPointerData)); - entry->addInfo = (Datum *) palloc(BLCKSZ * sizeof(Datum)); - entry->addInfoIsNull = (bool *) palloc(BLCKSZ * sizeof(bool)); + entry->list = (RumKey *) palloc(BLCKSZ * sizeof(RumKey)); maxoff = RumPageGetOpaque(page)->maxoff; entry->nlist = maxoff; @@ -541,10 +802,8 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { - ptr = rumDataPageLeafRead(ptr, entry->attnum, &iptr, - &entry->addInfo[i - FirstOffsetNumber], - &entry->addInfoIsNull[i - FirstOffsetNumber], rumstate); - entry->list[i - FirstOffsetNumber] = iptr; + ptr = rumDataPageLeafRead(ptr, entry->attnum, &item, rumstate); + entry->list[i - FirstOffsetNumber] = item; } LockBuffer(entry->buffer, RUM_UNLOCK); @@ -554,13 +813,9 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) { entry->nlist = RumGetNPosting(itup); entry->predictNumberResult = entry->nlist; - entry->list = (ItemPointerData *) palloc(sizeof(ItemPointerData) * entry->nlist); - entry->addInfo = (Datum *) palloc(sizeof(Datum) * entry->nlist); - entry->addInfoIsNull = (bool *) palloc(sizeof(bool) * entry->nlist); - - rumReadTuple(rumstate, entry->attnum, itup, - entry->list, entry->addInfo, entry->addInfoIsNull); + entry->list = (RumKey *) palloc(sizeof(RumKey) * entry->nlist); + rumReadTuple(rumstate, entry->attnum, itup, entry->list); entry->isFinished = FALSE; } } @@ -571,7 +826,7 @@ startScanEntry(RumState *rumstate, RumScanEntry entry) } static void -startScanKey(RumState *rumstate, RumScanKey key) +startScanKey(RumState * rumstate, RumScanKey key) { ItemPointerSetMin(&key->curItem); key->curItemMatches = false; @@ -579,6 +834,10 @@ startScanKey(RumState *rumstate, RumScanKey key) key->isFinished = false; } +/* + * Compare entries position. At first consider isFinished flag, then compare + * item pointers. + */ static int cmpEntries(RumScanEntry e1, RumScanEntry e2) { @@ -591,18 +850,34 @@ cmpEntries(RumScanEntry e1, RumScanEntry e2) } if (e2->isFinished) return -1; - return rumCompareItemPointers(&e1->curItem, &e2->curItem); + return rumCompareItemPointers(&e1->curItem.iptr, &e2->curItem.iptr); } static int scan_entry_cmp(const void *p1, const void *p2) { - RumScanEntry e1 = *((RumScanEntry *)p1); - RumScanEntry e2 = *((RumScanEntry *)p2); + RumScanEntry e1 = *((RumScanEntry *) p1); + RumScanEntry e2 = *((RumScanEntry *) p2); return -cmpEntries(e1, e2); } +static int +rum_key_cmp_with_check(const void *p1, const void *p2, void *arg) +{ + const RumKey *k1 = (const RumKey *) p1; + const RumKey *k2 = (const RumKey *) p2; + bool *haveDups = (bool *) arg; + int res; + + res = rumCompareItemPointers(&k1->iptr, &k2->iptr); + + if (res == 0) + *haveDups = true; + + return res; +} + static void startScan(IndexScanDesc scan) { @@ -650,9 +925,13 @@ startScan(IndexScanDesc scan) for (i = 0; i < so->nkeys; i++) startScanKey(rumstate, so->keys + i); + /* + * Check if we can use a fast scan: should exists at least one + * preConsistent method. + */ for (i = 0; i < so->nkeys; i++) { - RumScanKey key = &so->keys[i]; + RumScanKey key = &so->keys[i]; if (so->rumstate.canPreConsistent[key->attnum - 1]) { @@ -666,6 +945,7 @@ startScan(IndexScanDesc scan) for (i = 0; i < so->totalentries; i++) { RumScanEntry entry = so->entries[i]; + if (entry->isPartialMatch) { useFastScan = false; @@ -678,17 +958,21 @@ startScan(IndexScanDesc scan) if (useFastScan) { - so->sortedEntries = (RumScanEntry *)palloc(sizeof(RumScanEntry) * - so->totalentries); + /* + * We are going to use fast scan. Do some preliminaries. Start scan of + * each entry and sort entries by descending item pointers. + */ + so->sortedEntries = (RumScanEntry *) palloc(sizeof(RumScanEntry) * + so->totalentries); memcpy(so->sortedEntries, so->entries, sizeof(RumScanEntry) * - so->totalentries); + so->totalentries); for (i = 0; i < so->totalentries; i++) { if (!so->sortedEntries[i]->isFinished) entryGetItem(&so->rumstate, so->sortedEntries[i]); } qsort(so->sortedEntries, so->totalentries, sizeof(RumScanEntry), - scan_entry_cmp); + scan_entry_cmp); } so->useFastScan = useFastScan; @@ -700,7 +984,7 @@ startScan(IndexScanDesc scan) * to prevent interference with vacuum */ static void -entryGetNextItem(RumState *rumstate, RumScanEntry entry) +entryGetNextItem(RumState * rumstate, RumScanEntry entry) { Page page; @@ -709,8 +993,6 @@ entryGetNextItem(RumState *rumstate, RumScanEntry entry) if (entry->offset < entry->nlist) { entry->curItem = entry->list[entry->offset]; - entry->curAddInfo = entry->addInfo[entry->offset]; - entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset]; entry->offset++; return; } @@ -718,7 +1000,7 @@ entryGetNextItem(RumState *rumstate, RumScanEntry entry) LockBuffer(entry->buffer, RUM_SHARE); page = BufferGetPage(entry->buffer); - if (scanPage(rumstate, entry, &entry->curItem, + if (scanPage(rumstate, entry, &entry->curItem.iptr, BufferGetPage(entry->buffer), false)) { @@ -735,7 +1017,7 @@ entryGetNextItem(RumState *rumstate, RumScanEntry entry) if (RumPageRightMost(page)) { UnlockReleaseBuffer(entry->buffer); - ItemPointerSetInvalid(&entry->curItem); + ItemPointerSetInvalid(&entry->curItem.iptr); entry->buffer = InvalidBuffer; entry->isFinished = TRUE; @@ -751,13 +1033,16 @@ entryGetNextItem(RumState *rumstate, RumScanEntry entry) page = BufferGetPage(entry->buffer); entry->offset = InvalidOffsetNumber; - if (!ItemPointerIsValid(&entry->curItem) || - findItemInPostingPage(page, &entry->curItem, &entry->offset, - entry->attnum, rumstate)) + if (!ItemPointerIsValid(&entry->curItem.iptr) || + findItemInPostingPage(page, &entry->curItem.iptr, &entry->offset, + entry->attnum, rumstate)) { - OffsetNumber maxoff, i; - Pointer ptr; - ItemPointerData iptr = {{0,0},0}; + OffsetNumber maxoff, + i; + Pointer ptr; + RumKey item; + + ItemPointerSetMin(&item.iptr); /* * Found position equal to or greater than stored @@ -769,16 +1054,16 @@ entryGetNextItem(RumState *rumstate, RumScanEntry entry) for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { - ptr = rumDataPageLeafRead(ptr, entry->attnum, &iptr, - &entry->addInfo[i - FirstOffsetNumber], &entry->addInfoIsNull[i - FirstOffsetNumber], rumstate); - entry->list[i - FirstOffsetNumber] = iptr; + ptr = rumDataPageLeafRead(ptr, entry->attnum, &item, + rumstate); + entry->list[i - FirstOffsetNumber] = item; } LockBuffer(entry->buffer, RUM_UNLOCK); - if (!ItemPointerIsValid(&entry->curItem) || - rumCompareItemPointers(&entry->curItem, - entry->list + entry->offset - 1) == 0) + if (!ItemPointerIsValid(&entry->curItem.iptr) || + rumCompareItemPointers(&entry->curItem.iptr, + &entry->list[entry->offset - 1].iptr) == 0) { /* * First pages are deleted or empty, or we found exact @@ -791,8 +1076,6 @@ entryGetNextItem(RumState *rumstate, RumScanEntry entry) * Find greater than entry->curItem position, store it. */ entry->curItem = entry->list[entry->offset - 1]; - entry->curAddInfo = entry->addInfo[entry->offset - 1]; - entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset - 1]; return; } @@ -817,7 +1100,7 @@ entryGetNextItem(RumState *rumstate, RumScanEntry entry) * current implementation this is guaranteed by the behavior of tidbitmaps. */ static void -entryGetItem(RumState *rumstate, RumScanEntry entry) +entryGetItem(RumState * rumstate, RumScanEntry entry) { Assert(!entry->isFinished); @@ -832,7 +1115,7 @@ entryGetItem(RumState *rumstate, RumScanEntry entry) if (entry->matchResult == NULL) { - ItemPointerSetInvalid(&entry->curItem); + ItemPointerSetInvalid(&entry->curItem.iptr); tbm_end_iterate(entry->matchIterator); entry->matchIterator = NULL; entry->isFinished = TRUE; @@ -853,7 +1136,7 @@ entryGetItem(RumState *rumstate, RumScanEntry entry) /* * lossy result, so we need to check the whole page */ - ItemPointerSetLossyPage(&entry->curItem, + ItemPointerSetLossyPage(&entry->curItem.iptr, entry->matchResult->blockno); /* @@ -864,7 +1147,7 @@ entryGetItem(RumState *rumstate, RumScanEntry entry) break; } - ItemPointerSet(&entry->curItem, + ItemPointerSet(&entry->curItem.iptr, entry->matchResult->blockno, entry->matchResult->offsets[entry->offset]); entry->offset++; @@ -876,12 +1159,10 @@ entryGetItem(RumState *rumstate, RumScanEntry entry) if (entry->offset <= entry->nlist) { entry->curItem = entry->list[entry->offset - 1]; - entry->curAddInfo = entry->addInfo[entry->offset - 1]; - entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset - 1]; } else { - ItemPointerSetInvalid(&entry->curItem); + ItemPointerSetInvalid(&entry->curItem.iptr); entry->isFinished = TRUE; } } @@ -917,7 +1198,7 @@ entryGetItem(RumState *rumstate, RumScanEntry entry) * logic in scanGetItem.) */ static void -keyGetItem(RumState *rumstate, MemoryContext tempCtx, RumScanKey key) +keyGetItem(RumState * rumstate, MemoryContext tempCtx, RumScanKey key) { ItemPointerData minItem; ItemPointerData curPageLossy; @@ -944,8 +1225,8 @@ keyGetItem(RumState *rumstate, MemoryContext tempCtx, RumScanKey key) { entry = key->scanEntry[i]; if (entry->isFinished == FALSE && - rumCompareItemPointers(&entry->curItem, &minItem) < 0) - minItem = entry->curItem; + rumCompareItemPointers(&entry->curItem.iptr, &minItem) < 0) + minItem = entry->curItem.iptr; } if (ItemPointerIsMax(&minItem)) @@ -1001,7 +1282,7 @@ keyGetItem(RumState *rumstate, MemoryContext tempCtx, RumScanKey key) { entry = key->scanEntry[i]; if (entry->isFinished == FALSE && - rumCompareItemPointers(&entry->curItem, &curPageLossy) == 0) + rumCompareItemPointers(&entry->curItem.iptr, &curPageLossy) == 0) { if (haveLossyEntry) { @@ -1060,11 +1341,11 @@ keyGetItem(RumState *rumstate, MemoryContext tempCtx, RumScanKey key) { entry = key->scanEntry[i]; if (entry->isFinished == FALSE && - rumCompareItemPointers(&entry->curItem, &key->curItem) == 0) + rumCompareItemPointers(&entry->curItem.iptr, &key->curItem) == 0) { key->entryRes[i] = TRUE; - key->addInfo[i] = entry->curAddInfo; - key->addInfoIsNull[i] = entry->curAddInfoIsNull; + key->addInfo[i] = entry->curItem.addInfo; + key->addInfoIsNull[i] = entry->curItem.addInfoIsNull; } else { @@ -1113,7 +1394,7 @@ keyGetItem(RumState *rumstate, MemoryContext tempCtx, RumScanKey key) */ static bool scanGetItemRegular(IndexScanDesc scan, ItemPointer advancePast, - ItemPointerData *item, bool *recheck) + ItemPointerData *item, bool *recheck) { RumScanOpaque so = (RumScanOpaque) scan->opaque; RumState *rumstate = &so->rumstate; @@ -1136,7 +1417,7 @@ scanGetItemRegular(IndexScanDesc scan, ItemPointer advancePast, RumScanEntry entry = so->entries[i]; while (entry->isFinished == FALSE && - rumCompareItemPointers(&entry->curItem, + rumCompareItemPointers(&entry->curItem.iptr, &myAdvancePast) <= 0) entryGetItem(rumstate, entry); @@ -1249,15 +1530,24 @@ scanGetItemRegular(IndexScanDesc scan, ItemPointer advancePast, return TRUE; } +/* + * Finds part of page containing requested item using small index at the end + * of page. + */ static bool -scanPage(RumState *rumstate, RumScanEntry entry, ItemPointer item, Page page, bool equalOk) +scanPage(RumState * rumstate, RumScanEntry entry, ItemPointer item, Page page, + bool equalOk) { - int j; - ItemPointerData iptr = {{0,0},0}; - Pointer ptr; - OffsetNumber first = FirstOffsetNumber, i, maxoff; - bool found; - int cmp; + int j; + RumKey iter_item; + Pointer ptr; + OffsetNumber first = FirstOffsetNumber, + i, + maxoff; + bool found; + int cmp; + + ItemPointerSetMin(&iter_item.iptr); if (!RumPageRightMost(page)) { @@ -1271,6 +1561,7 @@ scanPage(RumState *rumstate, RumScanEntry entry, ItemPointer item, Page page, bo for (j = 0; j < RumDataLeafIndexCount; j++) { RumDataLeafItemIndex *index = &RumPageGetIndexes(page)[j]; + if (index->offsetNumer == InvalidOffsetNumber) break; @@ -1279,7 +1570,7 @@ scanPage(RumState *rumstate, RumScanEntry entry, ItemPointer item, Page page, bo { ptr = RumDataPageGetData(page) + index->pageOffset; first = index->offsetNumer; - iptr = index->iptr; + iter_item.iptr = index->iptr; } else { @@ -1293,13 +1584,11 @@ scanPage(RumState *rumstate, RumScanEntry entry, ItemPointer item, Page page, bo found = false; for (i = first; i <= maxoff; i++) { - ptr = rumDataPageLeafRead(ptr, entry->attnum, &iptr, - &entry->addInfo[i - first], - &entry->addInfoIsNull[i - first], - rumstate); - entry->list[i - first] = iptr; - cmp = rumCompareItemPointers(item, &iptr); - if ((cmp < 0 || (cmp <= 0 && equalOk))&& entry->offset == InvalidOffsetNumber) + ptr = rumDataPageLeafRead(ptr, entry->attnum, &iter_item, rumstate); + entry->list[i - first] = iter_item; + + cmp = rumCompareItemPointers(item, &iter_item.iptr); + if ((cmp < 0 || (cmp <= 0 && equalOk)) && entry->offset == InvalidOffsetNumber) { found = true; entry->offset = i - first + 1; @@ -1309,15 +1598,16 @@ scanPage(RumState *rumstate, RumScanEntry entry, ItemPointer item, Page page, bo return false; entry->curItem = entry->list[entry->offset - 1]; - entry->curAddInfo = entry->addInfo[entry->offset - 1]; - entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset - 1]; return true; } +/* + * Find item of scan entry wich is greater or equal to the given item. + */ static void -entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) +entryFindItem(RumState * rumstate, RumScanEntry entry, RumKey * item) { - Page page = NULL; + Page page = NULL; if (entry->nlist == 0) { @@ -1325,17 +1615,18 @@ entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) return; } - if (rumCompareItemPointers(&entry->list[entry->nlist - 1], item) >= 0) + /* Try to find in loaded part of page */ + if (rumCompareItemPointers(&entry->list[entry->nlist - 1].iptr, + &item->iptr) >= 0) { - if (rumCompareItemPointers(&entry->curItem, item) >= 0) + if (rumCompareItemPointers(&entry->curItem.iptr, &item->iptr) >= 0) return; while (entry->offset < entry->nlist) { - if (rumCompareItemPointers(&entry->list[entry->offset], item) >= 0) + if (rumCompareItemPointers(&entry->list[entry->offset].iptr, + &item->iptr) >= 0) { entry->curItem = entry->list[entry->offset]; - entry->curAddInfo = entry->addInfo[entry->offset]; - entry->curAddInfoIsNull = entry->addInfoIsNull[entry->offset]; entry->offset++; return; } @@ -1343,16 +1634,16 @@ entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) } } - if (!BufferIsValid(entry->buffer)) { entry->isFinished = TRUE; return; } + /* Check rest of page */ LockBuffer(entry->buffer, RUM_SHARE); - if (scanPage(rumstate, entry, item, + if (scanPage(rumstate, entry, &item->iptr, BufferGetPage(entry->buffer), true)) { @@ -1360,6 +1651,7 @@ entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) return; } + /* Try to traverse to another leaf page */ entry->gdi->btree.items = item; entry->gdi->btree.curitem = 0; @@ -1369,7 +1661,7 @@ entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) page = BufferGetPage(entry->buffer); - if (scanPage(rumstate, entry, item, + if (scanPage(rumstate, entry, &item->iptr, BufferGetPage(entry->buffer), true)) { @@ -1377,11 +1669,12 @@ entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) return; } + /* At last try to traverse by right links */ for (;;) { /* - * It's needed to go by right link. During that we should refind - * first ItemPointer greater that stored + * It's needed to go by right link. During that we should refind first + * ItemPointer greater that stored */ BlockNumber blkno; @@ -1391,7 +1684,7 @@ entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) if (blkno == InvalidBlockNumber) { ReleaseBuffer(entry->buffer); - ItemPointerSetInvalid(&entry->curItem); + ItemPointerSetInvalid(&entry->curItem.iptr); entry->buffer = InvalidBuffer; entry->gdi->stack->buffer = InvalidBuffer; entry->isFinished = TRUE; @@ -1406,7 +1699,7 @@ entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) LockBuffer(entry->buffer, RUM_SHARE); page = BufferGetPage(entry->buffer); - if (scanPage(rumstate, entry, item, + if (scanPage(rumstate, entry, &item->iptr, BufferGetPage(entry->buffer), true)) { @@ -1416,27 +1709,35 @@ entryFindItem(RumState *rumstate, RumScanEntry entry, ItemPointer item) } } +/* + * Do preConsistent check for all the key where applicable. + */ static bool preConsistentCheck(RumScanOpaque so) { - RumState *rumstate = &so->rumstate; - int i, j; - bool recheck; + RumState *rumstate = &so->rumstate; + uint32 i, + j; + bool recheck; for (j = 0; j < so->nkeys; j++) { - RumScanKey key = &so->keys[j]; - bool hasFalse = false; + RumScanKey key = &so->keys[j]; + bool hasFalse = false; if (key->orderBy) continue; + if (key->searchMode == GIN_SEARCH_MODE_EVERYTHING) + continue; + if (!so->rumstate.canPreConsistent[key->attnum - 1]) continue; for (i = 0; i < key->nentries; i++) { RumScanEntry entry = key->scanEntry[i]; + key->entryRes[i] = entry->preValue; if (!entry->preValue) hasFalse = true; @@ -1446,47 +1747,62 @@ preConsistentCheck(RumScanOpaque so) continue; if (!DatumGetBool(FunctionCall8Coll(&rumstate->preConsistentFn[key->attnum - 1], - rumstate->supportCollation[key->attnum - 1], - PointerGetDatum(key->entryRes), - UInt16GetDatum(key->strategy), - key->query, - UInt32GetDatum(key->nuserentries), - PointerGetDatum(key->extra_data), - PointerGetDatum(&recheck), - PointerGetDatum(key->queryValues), - PointerGetDatum(key->queryCategories) - - ))) + rumstate->supportCollation[key->attnum - 1], + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), + PointerGetDatum(&recheck), + PointerGetDatum(key->queryValues), + PointerGetDatum(key->queryCategories) + + ))) return false; } return true; } +/* + * Shift value of some entry which index in so->sortedEntries is equal or greater + * to i. + */ static void entryShift(int i, RumScanOpaque so, bool find) { - int minIndex = -1, j; - uint32 minPredictNumberResult = 0; - RumState *rumstate = &so->rumstate; + int minIndex = -1, + j; + uint32 minPredictNumberResult = 0; + RumState *rumstate = &so->rumstate; + /* + * It's more efficient to move entry with smallest posting list/tree. So + * find one. + */ for (j = i; j < so->totalentries; j++) { - if (minIndex < 0 || so->sortedEntries[j]->predictNumberResult < minPredictNumberResult) + if (minIndex < 0 || + so->sortedEntries[j]->predictNumberResult < minPredictNumberResult) { minIndex = j; minPredictNumberResult = so->sortedEntries[j]->predictNumberResult; } } + /* Do shift of required type */ if (find) - entryFindItem(rumstate, so->sortedEntries[minIndex], &so->sortedEntries[i - 1]->curItem); + entryFindItem(rumstate, so->sortedEntries[minIndex], + &so->sortedEntries[i - 1]->curItem); else if (!so->sortedEntries[minIndex]->isFinished) entryGetItem(rumstate, so->sortedEntries[minIndex]); + /* Restore order of so->sortedEntries */ while (minIndex > 0 && - cmpEntries(so->sortedEntries[minIndex], so->sortedEntries[minIndex - 1]) > 0) + cmpEntries(so->sortedEntries[minIndex], + so->sortedEntries[minIndex - 1]) > 0) { RumScanEntry tmp; + tmp = so->sortedEntries[minIndex]; so->sortedEntries[minIndex] = so->sortedEntries[minIndex - 1]; so->sortedEntries[minIndex - 1] = tmp; @@ -1494,13 +1810,19 @@ entryShift(int i, RumScanOpaque so, bool find) } } +/* + * Get next item pointer using fast scan. + */ static bool scanGetItemFast(IndexScanDesc scan, ItemPointer advancePast, - ItemPointerData *item, bool *recheck) + ItemPointerData *item, bool *recheck) { RumScanOpaque so = (RumScanOpaque) scan->opaque; - int i, j, k; - bool preConsistentFalse, consistentFalse; + int i, + j, + k; + bool preConsistentResult, + consistentResult; if (so->entriesIncrIndex >= 0) { @@ -1510,7 +1832,11 @@ scanGetItemFast(IndexScanDesc scan, ItemPointer advancePast, for (;;) { - preConsistentFalse = false; + /* + * Our entries is ordered by descending of item pointers. The first + * goal is to find border where preConsistent becomes false. + */ + preConsistentResult = true; j = 0; k = 0; for (i = 0; i < so->totalentries; i++) @@ -1523,24 +1849,26 @@ scanGetItemFast(IndexScanDesc scan, ItemPointer advancePast, for (; j < i; j++) so->sortedEntries[j]->preValue = false; - if (!preConsistentCheck(so)) - { - preConsistentFalse = true; + if ((preConsistentResult = preConsistentCheck(so)) == false) break; - } } } + /* + * If we found false in preConsistent then we can safely move entries + * which was true in preConsistent argument. + */ if (so->sortedEntries[i - 1]->isFinished == TRUE) return false; - if (preConsistentFalse) + if (preConsistentResult == false) { entryShift(i, so, true); continue; } - consistentFalse = false; + /* Call consistent method */ + consistentResult = true; for (i = 0; i < so->nkeys; i++) { RumScanKey key = so->keys + i; @@ -1551,13 +1879,14 @@ scanGetItemFast(IndexScanDesc scan, ItemPointer advancePast, for (j = 0; j < key->nentries; j++) { RumScanEntry entry = key->scanEntry[j]; + if (entry->isFinished == FALSE && - rumCompareItemPointers(&entry->curItem, - &so->sortedEntries[so->totalentries - 1]->curItem) == 0) + rumCompareItemPointers(&entry->curItem.iptr, + &so->sortedEntries[so->totalentries - 1]->curItem.iptr) == 0) { key->entryRes[j] = TRUE; - key->addInfo[j] = entry->curAddInfo; - key->addInfoIsNull[j] = entry->curAddInfoIsNull; + key->addInfo[j] = entry->curItem.addInfo; + key->addInfoIsNull[j] = entry->curItem.addInfoIsNull; } else { @@ -1568,15 +1897,16 @@ scanGetItemFast(IndexScanDesc scan, ItemPointer advancePast, } if (!callConsistentFn(&so->rumstate, key)) { - consistentFalse = true; + consistentResult = false; entryShift(k, so, false); continue; } } - if (consistentFalse) + if (consistentResult == false) continue; + /* Calculate recheck from each key */ *recheck = false; for (i = 0; i < so->nkeys; i++) { @@ -1592,7 +1922,7 @@ scanGetItemFast(IndexScanDesc scan, ItemPointer advancePast, } } - *item = so->sortedEntries[so->totalentries - 1]->curItem; + *item = so->sortedEntries[so->totalentries - 1]->curItem.iptr; so->entriesIncrIndex = k; return true; @@ -1600,6 +1930,9 @@ scanGetItemFast(IndexScanDesc scan, ItemPointer advancePast, return false; } +/* + * Get next item whether using regular or fast scan. + */ static bool scanGetItem(IndexScanDesc scan, ItemPointer advancePast, ItemPointerData *item, bool *recheck) @@ -1716,10 +2049,10 @@ scanGetCandidate(IndexScanDesc scan, pendingPosition *pos) * of rumtuple_get_key() on the current page. */ static bool -matchPartialInPendingList(RumState *rumstate, Page page, +matchPartialInPendingList(RumState * rumstate, Page page, OffsetNumber off, OffsetNumber maxoff, RumScanEntry entry, - Datum *datum, RumNullCategory *category, + Datum *datum, RumNullCategory * category, bool *datumExtracted) { IndexTuple itup; @@ -1790,7 +2123,7 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) OffsetNumber attrnum; Page page; IndexTuple itup; - int i, + uint32 i, j; /* @@ -1925,9 +2258,9 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) key->entryRes[j] = true; if (OidIsValid(so->rumstate.addInfoTypeOid[i])) key->addInfo[j] = index_getattr(itup, - so->rumstate.oneCol ? 2 : 3, - so->rumstate.tupdesc[attrnum - 1], - &key->addInfoIsNull[j]); + so->rumstate.oneCol ? 2 : 3, + so->rumstate.tupdesc[attrnum - 1], + &key->addInfoIsNull[j]); } /* done with binary search */ @@ -2011,7 +2344,7 @@ scanPendingInsert(IndexScanDesc scan) MemoryContext oldCtx; bool recheck, match; - int i; + uint32 i; pendingPosition pos; Buffer metabuffer = ReadBuffer(scan->indexRelation, RUM_METAPAGE_BLKNO); BlockNumber blkno; @@ -2103,7 +2436,7 @@ scanPendingInsert(IndexScanDesc scan) int64 rumgetbitmap(IndexScanDesc scan, TIDBitmap *tbm) { - RumScanOpaque so = (RumScanOpaque)scan->opaque; + RumScanOpaque so = (RumScanOpaque) scan->opaque; int64 ntids; bool recheck; @@ -2155,11 +2488,11 @@ rumgetbitmap(IndexScanDesc scan, TIDBitmap *tbm) } static float8 -keyGetOrdering(RumState *rumstate, MemoryContext tempCtx, RumScanKey key, +keyGetOrdering(RumState * rumstate, MemoryContext tempCtx, RumScanKey key, ItemPointer iptr) { RumScanEntry entry; - int i; + uint32 i; if (key->useAddToColumn) { @@ -2171,20 +2504,20 @@ keyGetOrdering(RumState *rumstate, MemoryContext tempCtx, RumScanKey key, return DatumGetFloat8(FunctionCall3( &rumstate->outerOrderingFn[rumstate->attrnOrderByColumn - 1], - key->outerAddInfo, - key->queryValues[0], - UInt16GetDatum(key->strategy) - )); + key->outerAddInfo, + key->queryValues[0], + UInt16GetDatum(key->strategy) + )); } for (i = 0; i < key->nentries; i++) { entry = key->scanEntry[i]; if (entry->isFinished == FALSE && - rumCompareItemPointers(&entry->curItem, iptr) == 0) + rumCompareItemPointers(&entry->curItem.iptr, iptr) == 0) { - key->addInfo[i] = entry->curAddInfo; - key->addInfoIsNull[i] = entry->curAddInfoIsNull; + key->addInfo[i] = entry->curItem.addInfo; + key->addInfoIsNull[i] = entry->curItem.addInfoIsNull; key->entryRes[i] = true; } else @@ -2196,25 +2529,26 @@ keyGetOrdering(RumState *rumstate, MemoryContext tempCtx, RumScanKey key, } return DatumGetFloat8(FunctionCall10Coll(&rumstate->orderingFn[key->attnum - 1], - rumstate->supportCollation[key->attnum - 1], - PointerGetDatum(key->entryRes), - UInt16GetDatum(key->strategy), - key->query, - UInt32GetDatum(key->nuserentries), - PointerGetDatum(key->extra_data), - PointerGetDatum(&key->recheckCurItem), - PointerGetDatum(key->queryValues), - PointerGetDatum(key->queryCategories), - PointerGetDatum(key->addInfo), + rumstate->supportCollation[key->attnum - 1], + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), + PointerGetDatum(&key->recheckCurItem), + PointerGetDatum(key->queryValues), + PointerGetDatum(key->queryCategories), + PointerGetDatum(key->addInfo), PointerGetDatum(key->addInfoIsNull) - )); + )); } static void insertScanItem(RumScanOpaque so, bool recheck) { RumSortItem *item; - int i, j; + uint32 i, + j; item = (RumSortItem *) MemoryContextAlloc(rum_tuplesort_get_memorycontext(so->sortstate), @@ -2224,11 +2558,13 @@ insertScanItem(RumScanOpaque so, bool recheck) if (AttributeNumberIsValid(so->rumstate.attrnAddToColumn)) { - int nOrderByAnother = 0, count = 0; + int nOrderByAnother = 0, + count = 0; for (i = 0; i < so->nkeys; i++) { - if (so->keys[i].useAddToColumn) { + if (so->keys[i].useAddToColumn) + { so->keys[i].outerAddInfoIsNull = true; nOrderByAnother++; } @@ -2242,7 +2578,7 @@ insertScanItem(RumScanOpaque so, bool recheck) Assert(!so->keys[i].orderBy); Assert(!so->keys[i].useAddToColumn); - for(j = i; j < so->nkeys; j++) + for (j = i; j < so->nkeys; j++) { if (so->keys[j].useAddToColumn && so->keys[j].outerAddInfoIsNull == true) @@ -2263,6 +2599,12 @@ insertScanItem(RumScanOpaque so, bool recheck) continue; item->data[j] = keyGetOrdering(&so->rumstate, so->tempCtx, &so->keys[i], &so->iptr); + + /* + * elog(NOTICE, "%f %u:%u", item->data[j], + * ItemPointerGetBlockNumber(&item->iptr), + * ItemPointerGetOffsetNumber(&item->iptr)); + */ j++; } rum_tuplesort_putrum(so->sortstate, item); @@ -2272,9 +2614,9 @@ bool rumgettuple(IndexScanDesc scan, ScanDirection direction) { bool recheck; - RumScanOpaque so = (RumScanOpaque)scan->opaque; + RumScanOpaque so = (RumScanOpaque) scan->opaque; RumSortItem *item; - bool should_free; + bool should_free; if (so->firstCall) { @@ -2311,7 +2653,8 @@ rumgettuple(IndexScanDesc scan, ScanDirection direction) item = rum_tuplesort_getrum(so->sortstate, true, &should_free); if (item) { - int i, j = 0; + uint32 i, + j = 0; scan->xs_ctup.t_self = item->iptr; scan->xs_recheck = item->recheck; diff --git a/ruminsert.c b/ruminsert.c index 2fbaf0e39a..50d1606812 100644 --- a/ruminsert.c +++ b/ruminsert.c @@ -29,7 +29,7 @@ typedef struct MemoryContext tmpCtx; MemoryContext funcCtx; BuildAccumulator accum; -} RumBuildState; +} RumBuildState; /* * Creates new posting tree with one page, containing the given TIDs. @@ -38,8 +38,8 @@ typedef struct * items[] must be in sorted order with no duplicates. */ static BlockNumber -createPostingTree(RumState *rumstate, OffsetNumber attnum, Relation index, - ItemPointerData *items, Datum *addInfo, bool *addInfoIsNull, uint32 nitems) +createPostingTree(RumState * rumstate, OffsetNumber attnum, Relation index, + RumKey * items, uint32 nitems) { BlockNumber blkno; Buffer buffer = RumNewBuffer(index); @@ -47,7 +47,7 @@ createPostingTree(RumState *rumstate, OffsetNumber attnum, Relation index, int i, freespace; Pointer ptr; - ItemPointerData prev_iptr = {{0,0},0}; + ItemPointerData prev_iptr = {{0, 0}, 0}; GenericXLogState *state; state = GenericXLogStart(index); @@ -62,9 +62,9 @@ createPostingTree(RumState *rumstate, OffsetNumber attnum, Relation index, for (i = 0; i < nitems; i++) { if (i > 0) - prev_iptr = items[i - 1]; - ptr = rumPlaceToDataPageLeaf(ptr, attnum, &items[i], addInfo[i], - addInfoIsNull[i], &prev_iptr, rumstate); + prev_iptr = items[i - 1].iptr; + ptr = rumPlaceToDataPageLeaf(ptr, attnum, &items[i], + &prev_iptr, rumstate); } freespace = RumDataPageFreeSpacePre(page, ptr); Assert(freespace >= 0); @@ -93,20 +93,16 @@ createPostingTree(RumState *rumstate, OffsetNumber attnum, Relation index, * for filling the posting list afterwards, if ipd = NULL and nipd > 0. */ static IndexTuple -RumFormTuple(RumState *rumstate, +RumFormTuple(RumState * rumstate, OffsetNumber attnum, Datum key, RumNullCategory category, - ItemPointerData *ipd, - Datum *addInfo, - bool *addInfoIsNull, - uint32 nipd, - bool errorTooBig) + RumKey * items, uint32 nipd, bool errorTooBig) { Datum datums[3]; bool isnull[3]; IndexTuple itup; uint32 newsize; int i; - ItemPointerData nullItemPointer = {{0,0},0}; + ItemPointerData nullItemPointer = {{0, 0}, 0}; /* Build the basic tuple: optional column number, plus key datum */ if (rumstate->oneCol) @@ -149,15 +145,18 @@ RumFormTuple(RumState *rumstate, if (nipd > 0) { - newsize = rumCheckPlaceToDataPageLeaf(attnum, &ipd[0], addInfo[0], - addInfoIsNull[0], &nullItemPointer, rumstate, newsize); + newsize = rumCheckPlaceToDataPageLeaf(attnum, &items[0], + &nullItemPointer, + rumstate, newsize); for (i = 1; i < nipd; i++) { - newsize = rumCheckPlaceToDataPageLeaf(attnum, &ipd[i], addInfo[i], - addInfoIsNull[i], &ipd[i - 1], rumstate, newsize); + newsize = rumCheckPlaceToDataPageLeaf(attnum, &items[i], + &items[i - 1].iptr, + rumstate, newsize); } } + if (category != RUM_CAT_NORM_KEY) { Assert(IndexTupleHasNulls(itup)); @@ -196,14 +195,19 @@ RumFormTuple(RumState *rumstate, */ if (nipd > 0) { - char *ptr = RumGetPosting(itup); - ptr = rumPlaceToDataPageLeaf(ptr, attnum, &ipd[0], addInfo[0], - addInfoIsNull[0], &nullItemPointer, rumstate); + char *ptr = RumGetPosting(itup); + + ptr = rumPlaceToDataPageLeaf(ptr, attnum, &items[0], + &nullItemPointer, rumstate); for (i = 1; i < nipd; i++) { - ptr = rumPlaceToDataPageLeaf(ptr, attnum, &ipd[i], addInfo[i], - addInfoIsNull[i], &ipd[i-1], rumstate); + ptr = rumPlaceToDataPageLeaf(ptr, attnum, &items[i], + &items[i - 1].iptr, rumstate); } + + Assert(MAXALIGN((ptr - ((char *) itup)) + + ((category == RUM_CAT_NORM_KEY) ? 0 : sizeof(RumNullCategory))) + == newsize); } /* @@ -214,6 +218,7 @@ RumFormTuple(RumState *rumstate, Assert(IndexTupleHasNulls(itup)); RumSetNullCategory(itup, rumstate, category); } + return itup; } @@ -225,20 +230,18 @@ RumFormTuple(RumState *rumstate, * items[] must be in sorted order with no duplicates. */ static IndexTuple -addItemPointersToLeafTuple(RumState *rumstate, - IndexTuple old, - ItemPointerData *items, Datum *addInfo, - bool *addInfoIsNull, uint32 nitem, +addItemPointersToLeafTuple(RumState * rumstate, + IndexTuple old, RumKey * items, uint32 nitem, GinStatsData *buildStats) { OffsetNumber attnum; Datum key; RumNullCategory category; IndexTuple res; - Datum *oldAddInfo, *newAddInfo; - bool *oldAddInfoIsNull, *newAddInfoIsNull; - ItemPointerData *newItems, *oldItems; - int oldNPosting, newNPosting; + RumKey *newItems, + *oldItems; + int oldNPosting, + newNPosting; Assert(!RumIsPostingTree(old)); @@ -246,28 +249,20 @@ addItemPointersToLeafTuple(RumState *rumstate, key = rumtuple_get_key(rumstate, old, &category); oldNPosting = RumGetNPosting(old); - - oldItems = (ItemPointerData *)palloc(sizeof(ItemPointerData) * oldNPosting); - oldAddInfo = (Datum *)palloc(sizeof(Datum) * oldNPosting); - oldAddInfoIsNull = (bool *)palloc(sizeof(bool) * oldNPosting); + oldItems = (RumKey *) palloc(sizeof(RumKey) * oldNPosting); newNPosting = oldNPosting + nitem; + newItems = (RumKey *) palloc(sizeof(RumKey) * newNPosting); - newItems = (ItemPointerData *)palloc(sizeof(ItemPointerData) * newNPosting); - newAddInfo = (Datum *)palloc(sizeof(Datum) * newNPosting); - newAddInfoIsNull = (bool *)palloc(sizeof(bool) * newNPosting); + rumReadTuple(rumstate, attnum, old, oldItems); - rumReadTuple(rumstate, attnum, old, oldItems, oldAddInfo, oldAddInfoIsNull); - - newNPosting = rumMergeItemPointers(newItems, newAddInfo, newAddInfoIsNull, - items, addInfo, addInfoIsNull, nitem, - oldItems, oldAddInfo, oldAddInfoIsNull, oldNPosting); + newNPosting = rumMergeItemPointers(rumstate, newItems, + items, nitem, oldItems, oldNPosting); /* try to build tuple with room for all the items */ res = RumFormTuple(rumstate, attnum, key, category, - newItems, newAddInfo, newAddInfoIsNull, newNPosting, - false); + newItems, newNPosting, false); if (!res) { @@ -284,8 +279,6 @@ addItemPointersToLeafTuple(RumState *rumstate, attnum, rumstate->index, oldItems, - oldAddInfo, - oldAddInfoIsNull, oldNPosting); /* During index build, count the newly-added data page */ @@ -296,12 +289,12 @@ addItemPointersToLeafTuple(RumState *rumstate, gdi = rumPrepareScanPostingTree(rumstate->index, postingRoot, FALSE, attnum, rumstate); gdi->btree.isBuild = (buildStats != NULL); - rumInsertItemPointers(rumstate, attnum, gdi, items, addInfo, addInfoIsNull, nitem, buildStats); + rumInsertItemPointers(rumstate, attnum, gdi, items, nitem, buildStats); pfree(gdi); /* And build a new posting-tree-only result tuple */ - res = RumFormTuple(rumstate, attnum, key, category, NULL, NULL, NULL, 0, true); + res = RumFormTuple(rumstate, attnum, key, category, NULL, 0, true); RumSetPostingTree(res, postingRoot); } @@ -317,43 +310,41 @@ addItemPointersToLeafTuple(RumState *rumstate, * but working from slightly different input. */ static IndexTuple -buildFreshLeafTuple(RumState *rumstate, +buildFreshLeafTuple(RumState * rumstate, OffsetNumber attnum, Datum key, RumNullCategory category, - ItemPointerData *items, Datum *addInfo, - bool *addInfoIsNull, uint32 nitem, - GinStatsData *buildStats) + RumKey * items, uint32 nitem, GinStatsData *buildStats) { IndexTuple res; /* try to build tuple with room for all the items */ - res = RumFormTuple(rumstate, attnum, key, category, - items, addInfo, addInfoIsNull, nitem, false); + res = RumFormTuple(rumstate, attnum, key, category, items, nitem, false); if (!res) { /* posting list would be too big, build posting tree */ BlockNumber postingRoot; - ItemPointerData prevIptr = {{0,0},0}; - Size size = 0; - int itemsCount = 0; + ItemPointerData prevIptr = {{0, 0}, 0}; + Size size = 0; + int itemsCount = 0; do { size = rumCheckPlaceToDataPageLeaf(attnum, &items[itemsCount], - addInfo[itemsCount], addInfoIsNull[itemsCount], &prevIptr, - rumstate, size); - prevIptr = items[itemsCount]; + &prevIptr, rumstate, size); + prevIptr = items[itemsCount].iptr; itemsCount++; } while (itemsCount < nitem && size < RumDataPageSize); - itemsCount--; + + if (size >= RumDataPageSize) + itemsCount--; /* * Build posting-tree-only result tuple. We do this first so as to * fail quickly if the key is too big. */ - res = RumFormTuple(rumstate, attnum, key, category, NULL, NULL, NULL, 0, true); + res = RumFormTuple(rumstate, attnum, key, category, NULL, 0, true); /* * Initialize posting tree with as many TIDs as will fit on the first @@ -363,8 +354,6 @@ buildFreshLeafTuple(RumState *rumstate, attnum, rumstate->index, items, - addInfo, - addInfoIsNull, itemsCount); /* During index build, count the newly-added data page */ @@ -376,15 +365,14 @@ buildFreshLeafTuple(RumState *rumstate, { RumPostingTreeScan *gdi; - gdi = rumPrepareScanPostingTree(rumstate->index, postingRoot, FALSE, attnum, rumstate); + gdi = rumPrepareScanPostingTree(rumstate->index, postingRoot, FALSE, + attnum, rumstate); gdi->btree.isBuild = (buildStats != NULL); rumInsertItemPointers(rumstate, attnum, gdi, items + itemsCount, - addInfo + itemsCount, - addInfoIsNull + itemsCount, nitem - itemsCount, buildStats); @@ -406,30 +394,15 @@ buildFreshLeafTuple(RumState *rumstate, * it contains should be incremented as needed. */ void -rumEntryInsert(RumState *rumstate, +rumEntryInsert(RumState * rumstate, OffsetNumber attnum, Datum key, RumNullCategory category, - ItemPointerData *items, - Datum *addInfo, - bool *addInfoIsNull, - uint32 nitem, + RumKey * items, uint32 nitem, GinStatsData *buildStats) { RumBtreeData btree; RumBtreeStack *stack; IndexTuple itup; Page page; - int i; - - if (!addInfoIsNull || !addInfo) - { - addInfoIsNull = (bool *)palloc(sizeof(bool) * nitem); - addInfo = (Datum *)palloc(sizeof(Datum) * nitem); - for (i = 0; i < nitem; i++) - { - addInfoIsNull[i] = true; - addInfo[i] = (Datum) 0; - } - } /* During index build, count the to-be-inserted entry */ if (buildStats) @@ -456,9 +429,11 @@ rumEntryInsert(RumState *rumstate, freeRumBtreeStack(stack); /* insert into posting tree */ - gdi = rumPrepareScanPostingTree(rumstate->index, rootPostingTree, FALSE, attnum, rumstate); + gdi = rumPrepareScanPostingTree(rumstate->index, rootPostingTree, + FALSE, attnum, rumstate); gdi->btree.isBuild = (buildStats != NULL); - rumInsertItemPointers(rumstate, attnum, gdi, items, addInfo, addInfoIsNull, nitem, buildStats); + rumInsertItemPointers(rumstate, attnum, gdi, items, + nitem, buildStats); pfree(gdi); return; @@ -466,7 +441,7 @@ rumEntryInsert(RumState *rumstate, /* modify an existing leaf entry */ itup = addItemPointersToLeafTuple(rumstate, itup, - items, addInfo, addInfoIsNull, nitem, buildStats); + items, nitem, buildStats); btree.isDelete = TRUE; } @@ -474,7 +449,7 @@ rumEntryInsert(RumState *rumstate, { /* no match, so construct a new leaf entry */ itup = buildFreshLeafTuple(rumstate, attnum, key, category, - items, addInfo, addInfoIsNull, nitem, buildStats); + items, nitem, buildStats); } /* Insert the new or modified leaf tuple */ @@ -490,11 +465,11 @@ rumEntryInsert(RumState *rumstate, * This function is used only during initial index creation. */ static void -rumHeapTupleBulkInsert(RumBuildState *buildstate, OffsetNumber attnum, +rumHeapTupleBulkInsert(RumBuildState * buildstate, OffsetNumber attnum, Datum value, bool isNull, ItemPointer heapptr, - Datum outerAddInfo, - bool outerAddInfoIsNull) + Datum outerAddInfo, + bool outerAddInfoIsNull) { Datum *entries; RumNullCategory *categories; @@ -516,7 +491,7 @@ rumHeapTupleBulkInsert(RumBuildState *buildstate, OffsetNumber attnum, addInfo = palloc(sizeof(*addInfo) * nentries); addInfoIsNull = palloc(sizeof(*addInfoIsNull) * nentries); - for(i=0; irumstate.attrnOrderByColumn)) @@ -567,37 +542,20 @@ rumBuildCallback(Relation index, HeapTuple htup, Datum *values, /* If we've maxed out our available memory, dump everything to the index */ if (buildstate->accum.allocatedMemory >= maintenance_work_mem * 1024L) { - RumEntryAccumulatorItem *list; + RumKey *items; Datum key; RumNullCategory category; uint32 nlist; OffsetNumber attnum; rumBeginBAScan(&buildstate->accum); - while ((list = rumGetBAEntry(&buildstate->accum, + while ((items = rumGetBAEntry(&buildstate->accum, &attnum, &key, &category, &nlist)) != NULL) { - ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(*iptrs) *nlist); - Datum *addInfo = (Datum *)palloc(sizeof(*addInfo) * nlist); - bool *addInfoIsNull = (bool *)palloc(sizeof(*addInfoIsNull) * nlist); - int i; - - for (i = 0; i < nlist; i++) - { - iptrs[i] = list[i].iptr; - addInfo[i] = list[i].addInfo; - addInfoIsNull[i] = list[i].addInfoIsNull; - } - - /* there could be many entries, so be willing to abort here */ CHECK_FOR_INTERRUPTS(); rumEntryInsert(&buildstate->rumstate, attnum, key, category, - iptrs, addInfo, addInfoIsNull, nlist, &buildstate->buildStats); - - pfree(addInfoIsNull); - pfree(addInfo); - pfree(iptrs); + items, nlist, &buildstate->buildStats); } MemoryContextReset(buildstate->tmpCtx); @@ -611,17 +569,17 @@ IndexBuildResult * rumbuild(Relation heap, Relation index, struct IndexInfo *indexInfo) { IndexBuildResult *result; - double reltuples; - RumBuildState buildstate; - Buffer RootBuffer, - MetaBuffer; - RumEntryAccumulatorItem *list; - Datum key; - RumNullCategory category; - uint32 nlist; - MemoryContext oldCtx; - OffsetNumber attnum; - GenericXLogState *state; + double reltuples; + RumBuildState buildstate; + Buffer RootBuffer, + MetaBuffer; + RumKey *items; + Datum key; + RumNullCategory category; + uint32 nlist; + MemoryContext oldCtx; + OffsetNumber attnum; + GenericXLogState *state; if (RelationGetNumberOfBlocks(index) != 0) elog(ERROR, "index \"%s\" already contains data", @@ -678,25 +636,13 @@ rumbuild(Relation heap, Relation index, struct IndexInfo *indexInfo) /* dump remaining entries to the index */ oldCtx = MemoryContextSwitchTo(buildstate.tmpCtx); rumBeginBAScan(&buildstate.accum); - while ((list = rumGetBAEntry(&buildstate.accum, - &attnum, &key, &category, &nlist)) != NULL) + while ((items = rumGetBAEntry(&buildstate.accum, + &attnum, &key, &category, &nlist)) != NULL) { - ItemPointerData *iptrs = (ItemPointerData *)palloc(sizeof(ItemPointerData) *nlist); - Datum *addInfo = (Datum *)palloc(sizeof(Datum) * nlist); - bool *addInfoIsNull = (bool *)palloc(sizeof(bool) * nlist); - int i; - - for (i = 0; i < nlist; i++) - { - iptrs[i] = list[i].iptr; - addInfo[i] = list[i].addInfo; - addInfoIsNull[i] = list[i].addInfoIsNull; - } - /* there could be many entries, so be willing to abort here */ CHECK_FOR_INTERRUPTS(); rumEntryInsert(&buildstate.rumstate, attnum, key, category, - iptrs, addInfo, addInfoIsNull, nlist, &buildstate.buildStats); + items, nlist, &buildstate.buildStats); } MemoryContextSwitchTo(oldCtx); @@ -728,7 +674,7 @@ rumbuildempty(Relation index) { Buffer RootBuffer, MetaBuffer; - GenericXLogState *state; + GenericXLogState *state; state = GenericXLogStart(index); @@ -750,7 +696,7 @@ rumbuildempty(Relation index) UnlockReleaseBuffer(MetaBuffer); UnlockReleaseBuffer(RootBuffer); - return ; + return; } /* @@ -758,11 +704,11 @@ rumbuildempty(Relation index) * (non-fast-update) insertion */ static void -rumHeapTupleInsert(RumState *rumstate, OffsetNumber attnum, +rumHeapTupleInsert(RumState * rumstate, OffsetNumber attnum, Datum value, bool isNull, ItemPointer item, - Datum outerAddInfo, - bool outerAddInfoIsNull) + Datum outerAddInfo, + bool outerAddInfoIsNull) { Datum *entries; RumNullCategory *categories; @@ -772,14 +718,14 @@ rumHeapTupleInsert(RumState *rumstate, OffsetNumber attnum, bool *addInfoIsNull; entries = rumExtractEntries(rumstate, attnum, value, isNull, - &nentries, &categories, &addInfo, &addInfoIsNull); + &nentries, &categories, &addInfo, &addInfoIsNull); if (attnum == rumstate->attrnAddToColumn) { addInfo = palloc(sizeof(*addInfo) * nentries); addInfoIsNull = palloc(sizeof(*addInfoIsNull) * nentries); - for(i=0; ikeyCtx = AllocSetContextCreate(CurrentMemoryContext, - "Gin scan key context", + "Rum scan key context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); @@ -107,12 +107,13 @@ rumFillScanEntry(RumScanOpaque so, OffsetNumber attnum, scanEntry->attnum = attnum; scanEntry->buffer = InvalidBuffer; - ItemPointerSetMin(&scanEntry->curItem); + RumItemSetMin(&scanEntry->curItem); scanEntry->matchBitmap = NULL; scanEntry->matchIterator = NULL; scanEntry->matchResult = NULL; scanEntry->list = NULL; scanEntry->nlist = 0; + scanEntry->nalloc = 0; scanEntry->offset = InvalidOffsetNumber; scanEntry->isFinished = false; scanEntry->reduceResult = false; @@ -136,7 +137,7 @@ static void rumFillScanKey(RumScanOpaque so, OffsetNumber attnum, StrategyNumber strategy, int32 searchMode, Datum query, uint32 nQueryValues, - Datum *queryValues, RumNullCategory *queryCategories, + Datum *queryValues, RumNullCategory * queryCategories, bool *partial_matches, Pointer *extra_data, bool orderBy) { @@ -169,7 +170,7 @@ rumFillScanKey(RumScanOpaque so, OffsetNumber attnum, if (nQueryValues != 1) elog(ERROR, "extractQuery should return only one value"); if (rumstate->canOuterOrdering[attnum - 1] == false) - elog(ERROR,"doesn't support ordering as additional info"); + elog(ERROR, "doesn't support ordering as additional info"); key->useAddToColumn = true; key->attnum = rumstate->attrnAddToColumn; @@ -273,10 +274,6 @@ freeScanKeys(RumScanOpaque so) } if (entry->list) pfree(entry->list); - if (entry->addInfo) - pfree(entry->addInfo); - if (entry->addInfoIsNull) - pfree(entry->addInfoIsNull); if (entry->matchIterator) tbm_end_iterate(entry->matchIterator); if (entry->matchBitmap) @@ -301,7 +298,7 @@ initScanKey(RumScanOpaque so, ScanKey skey, bool *hasNullQuery) Datum *queryValues; int32 nQueryValues = 0; bool *partial_matches = NULL; - Pointer *extra_data = NULL; + Pointer *extra_data = NULL; bool *nullFlags = NULL; int32 searchMode = GIN_SEARCH_MODE_DEFAULT; @@ -318,20 +315,20 @@ initScanKey(RumScanOpaque so, ScanKey skey, bool *hasNullQuery) /* OK to call the extractQueryFn */ queryValues = (Datum *) DatumGetPointer(FunctionCall7Coll(&so->rumstate.extractQueryFn[skey->sk_attno - 1], - so->rumstate.supportCollation[skey->sk_attno - 1], - skey->sk_argument, - PointerGetDatum(&nQueryValues), - UInt16GetDatum(skey->sk_strategy), - PointerGetDatum(&partial_matches), - PointerGetDatum(&extra_data), - PointerGetDatum(&nullFlags), - PointerGetDatum(&searchMode))); + so->rumstate.supportCollation[skey->sk_attno - 1], + skey->sk_argument, + PointerGetDatum(&nQueryValues), + UInt16GetDatum(skey->sk_strategy), + PointerGetDatum(&partial_matches), + PointerGetDatum(&extra_data), + PointerGetDatum(&nullFlags), + PointerGetDatum(&searchMode))); /* - * If bogus searchMode is returned, treat as RUM_SEARCH_MODE_ALL; note - * in particular we don't allow extractQueryFn to select - * RUM_SEARCH_MODE_EVERYTHING. - */ + * If bogus searchMode is returned, treat as RUM_SEARCH_MODE_ALL; note in + * particular we don't allow extractQueryFn to select + * RUM_SEARCH_MODE_EVERYTHING. + */ if (searchMode < GIN_SEARCH_MODE_DEFAULT || searchMode > GIN_SEARCH_MODE_ALL) searchMode = GIN_SEARCH_MODE_ALL; @@ -341,8 +338,8 @@ initScanKey(RumScanOpaque so, ScanKey skey, bool *hasNullQuery) *hasNullQuery = true; /* - * In default mode, no keys means an unsatisfiable query. - */ + * In default mode, no keys means an unsatisfiable query. + */ if (queryValues == NULL || nQueryValues <= 0) { if (searchMode == GIN_SEARCH_MODE_DEFAULT) @@ -350,16 +347,16 @@ initScanKey(RumScanOpaque so, ScanKey skey, bool *hasNullQuery) so->isVoidRes = true; return; } - nQueryValues = 0; /* ensure sane value */ + nQueryValues = 0; /* ensure sane value */ } /* - * If the extractQueryFn didn't create a nullFlags array, create one, - * assuming that everything's non-null. Otherwise, run through the - * array and make sure each value is exactly 0 or 1; this ensures - * binary compatibility with the RumNullCategory representation. While - * at it, detect whether any null keys are present. - */ + * If the extractQueryFn didn't create a nullFlags array, create one, + * assuming that everything's non-null. Otherwise, run through the array + * and make sure each value is exactly 0 or 1; this ensures binary + * compatibility with the RumNullCategory representation. While at it, + * detect whether any null keys are present. + */ if (nullFlags == NULL) nullFlags = (bool *) palloc0(nQueryValues * sizeof(bool)); else @@ -370,7 +367,7 @@ initScanKey(RumScanOpaque so, ScanKey skey, bool *hasNullQuery) { if (nullFlags[j]) { - nullFlags[j] = true; /* not any other nonzero value */ + nullFlags[j] = true; /* not any other nonzero value */ *hasNullQuery = true; } } @@ -378,11 +375,11 @@ initScanKey(RumScanOpaque so, ScanKey skey, bool *hasNullQuery) /* now we can use the nullFlags as category codes */ rumFillScanKey(so, skey->sk_attno, - skey->sk_strategy, searchMode, - skey->sk_argument, nQueryValues, - queryValues, (RumNullCategory *) nullFlags, - partial_matches, extra_data, - (skey->sk_flags & SK_ORDER_BY) ? true: false); + skey->sk_strategy, searchMode, + skey->sk_argument, nQueryValues, + queryValues, (RumNullCategory *) nullFlags, + partial_matches, extra_data, + (skey->sk_flags & SK_ORDER_BY) ? true : false); } void @@ -402,8 +399,8 @@ rumNewScanKey(IndexScanDesc scan) /* if no scan keys provided, allocate extra EVERYTHING RumScanKey */ so->keys = (RumScanKey) - palloc(Max(scan->numberOfKeys + scan->numberOfOrderBys, 1) * - sizeof(RumScanKeyData)); + palloc((Max(scan->numberOfKeys, 1) + scan->numberOfOrderBys) * + sizeof(RumScanKeyData)); so->nkeys = 0; /* initialize expansible array of RumScanEntry pointers */ @@ -422,6 +419,20 @@ rumNewScanKey(IndexScanDesc scan) break; } + /* + * If there are no regular scan keys, generate an EVERYTHING scankey to + * drive a full-index scan. + */ + if (so->nkeys == 0 && !so->isVoidRes) + { + hasNullQuery = true; + rumFillScanKey(so, FirstOffsetNumber, + InvalidStrategy, + GIN_SEARCH_MODE_EVERYTHING, + (Datum) 0, 0, + NULL, NULL, NULL, NULL, false); + } + for (i = 0; i < scan->numberOfOrderBys; i++) { initScanKey(so, &scan->orderByData[i], &hasNullQuery); @@ -437,19 +448,6 @@ rumNewScanKey(IndexScanDesc scan) scan->numberOfOrderBys); } - /* - * If there are no regular scan keys, generate an EVERYTHING scankey to - * drive a full-index scan. - */ - if (so->nkeys == 0 && !so->isVoidRes) - { - hasNullQuery = true; - rumFillScanKey(so, FirstOffsetNumber, - InvalidStrategy, GIN_SEARCH_MODE_EVERYTHING, - (Datum) 0, 0, - NULL, NULL, NULL, NULL, false); - } - MemoryContextSwitchTo(oldCtx); pgstat_count_index_scan(scan->indexRelation); @@ -467,12 +465,11 @@ rumrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys, freeScanKeys(so); if (scankey && scan->numberOfKeys > 0) - { memmove(scan->keyData, scankey, scan->numberOfKeys * sizeof(ScanKeyData)); + if (orderbys && scan->numberOfOrderBys > 0) memmove(scan->orderByData, orderbys, scan->numberOfOrderBys * sizeof(ScanKeyData)); - } if (so->sortstate) { diff --git a/rumsort.c b/rumsort.c index 3a7298398c..a2c71d1aba 100644 --- a/rumsort.c +++ b/rumsort.c @@ -484,7 +484,7 @@ static void dumptuples(Tuplesortstate *state, bool alltuples); static void make_bounded_heap(Tuplesortstate *state); static void sort_bounded_heap(Tuplesortstate *state); static void rum_tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple, - int tupleindex, bool checkIndex); + int tupleindex, bool checkIndex); static void rum_tuplesort_heap_siftup(Tuplesortstate *state, bool checkIndex); static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK); static void markrunend(Tuplesortstate *state, int tapenum); @@ -524,12 +524,12 @@ static void readtup_datum(Tuplesortstate *state, SortTuple *stup, static void reversedirection_datum(Tuplesortstate *state); static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup); static int comparetup_rum(const SortTuple *a, const SortTuple *b, - Tuplesortstate *state); + Tuplesortstate *state); static void copytup_rum(Tuplesortstate *state, SortTuple *stup, void *tup); static void writetup_rum(Tuplesortstate *state, int tapenum, - SortTuple *stup); + SortTuple *stup); static void readtup_rum(Tuplesortstate *state, SortTuple *stup, - int tapenum, unsigned int len); + int tapenum, unsigned int len); static void reversedirection_rum(Tuplesortstate *state); /* @@ -539,14 +539,15 @@ static void reversedirection_rum(Tuplesortstate *state); * reduces to ApplySortComparator(), that is single-key MinimalTuple sorts * and Datum sorts. */ -//#include "qsort_tuple.c" +/* #include "qsort_tuple.c" */ static void swapfunc(SortTuple *a, SortTuple *b, size_t n) { do { - SortTuple t = *a; + SortTuple t = *a; + *a++ = *b; *b++ = t; } while (--n > 0); @@ -557,7 +558,7 @@ swapfunc(SortTuple *a, SortTuple *b, size_t n) (b)->datum1, (b)->isnull1, ssup) #define swap(a, b) \ - do { \ + do { \ SortTuple t = *(a); \ *(a) = *(b); \ *(b) = t; \ @@ -570,9 +571,9 @@ med3_tuple(SortTuple *a, SortTuple *b, SortTuple *c, SortTupleComparator cmp_tup { return cmp_tuple(a, b, state) < 0 ? (cmp_tuple(b, c, state) < 0 ? b : - (cmp_tuple(a, c, state) < 0 ? c : a)) + (cmp_tuple(a, c, state) < 0 ? c : a)) : (cmp_tuple(b, c, state) > 0 ? b : - (cmp_tuple(a, c, state) < 0 ? a : c)); + (cmp_tuple(a, c, state) < 0 ? a : c)); } static SortTuple * @@ -580,9 +581,9 @@ med3_ssup(SortTuple *a, SortTuple *b, SortTuple *c, SortSupport ssup) { return cmp_ssup(a, b, ssup) < 0 ? (cmp_ssup(b, c, ssup) < 0 ? b : - (cmp_ssup(a, c, ssup) < 0 ? c : a)) + (cmp_ssup(a, c, ssup) < 0 ? c : a)) : (cmp_ssup(b, c, ssup) > 0 ? b : - (cmp_ssup(a, c, ssup) < 0 ? a : c)); + (cmp_ssup(a, c, ssup) < 0 ? a : c)); } static void @@ -919,10 +920,10 @@ rum_tuplesort_get_memorycontext(Tuplesortstate *state) Tuplesortstate * rum_tuplesort_begin_heap(TupleDesc tupDesc, - int nkeys, AttrNumber *attNums, - Oid *sortOperators, Oid *sortCollations, - bool *nullsFirstFlags, - int workMem, bool randomAccess) + int nkeys, AttrNumber *attNums, + Oid *sortOperators, Oid *sortCollations, + bool *nullsFirstFlags, + int workMem, bool randomAccess) { Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; @@ -983,8 +984,8 @@ rum_tuplesort_begin_heap(TupleDesc tupDesc, Tuplesortstate * rum_tuplesort_begin_cluster(TupleDesc tupDesc, - Relation indexRel, - int workMem, bool randomAccess) + Relation indexRel, + int workMem, bool randomAccess) { Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; @@ -1044,9 +1045,9 @@ rum_tuplesort_begin_cluster(TupleDesc tupDesc, Tuplesortstate * rum_tuplesort_begin_index_btree(Relation heapRel, - Relation indexRel, - bool enforceUnique, - int workMem, bool randomAccess) + Relation indexRel, + bool enforceUnique, + int workMem, bool randomAccess) { Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; @@ -1087,9 +1088,9 @@ rum_tuplesort_begin_index_btree(Relation heapRel, Tuplesortstate * rum_tuplesort_begin_index_hash(Relation heapRel, - Relation indexRel, - uint32 hash_mask, - int workMem, bool randomAccess) + Relation indexRel, + uint32 hash_mask, + int workMem, bool randomAccess) { Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; @@ -1159,8 +1160,8 @@ rum_tuplesort_begin_rum(int workMem, int nKeys, bool randomAccess) Tuplesortstate * rum_tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, - bool nullsFirstFlag, - int workMem, bool randomAccess) + bool nullsFirstFlag, + int workMem, bool randomAccess) { Tuplesortstate *state = rum_tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; @@ -1538,7 +1539,7 @@ rum_tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull) } void -rum_tuplesort_putrum(Tuplesortstate *state, RumSortItem *item) +rum_tuplesort_putrum(Tuplesortstate *state, RumSortItem * item) { MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; @@ -1779,7 +1780,7 @@ rum_tuplesort_performsort(Tuplesortstate *state) */ static bool rum_tuplesort_gettuple_common(Tuplesortstate *state, bool forward, - SortTuple *stup, bool *should_free) + SortTuple *stup, bool *should_free) { unsigned int tuplen; @@ -1980,7 +1981,7 @@ rum_tuplesort_gettuple_common(Tuplesortstate *state, bool forward, */ bool rum_tuplesort_gettupleslot(Tuplesortstate *state, bool forward, - TupleTableSlot *slot) + TupleTableSlot *slot) { MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; @@ -2029,7 +2030,7 @@ rum_tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_fre */ IndexTuple rum_tuplesort_getindextuple(Tuplesortstate *state, bool forward, - bool *should_free) + bool *should_free) { MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; @@ -2051,7 +2052,7 @@ rum_tuplesort_getindextuple(Tuplesortstate *state, bool forward, */ bool rum_tuplesort_getdatum(Tuplesortstate *state, bool forward, - Datum *val, bool *isNull) + Datum *val, bool *isNull) { MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; @@ -2168,7 +2169,8 @@ inittapes(Tuplesortstate *state) * account for tuple space, so we don't care if LACKMEM becomes * inaccurate.) */ - tapeSpace = (long) maxTapes * TAPE_BUFFER_OVERHEAD; + tapeSpace = (long) maxTapes *TAPE_BUFFER_OVERHEAD; + if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem) USEMEM(state, tapeSpace); @@ -2824,9 +2826,9 @@ rum_tuplesort_restorepos(Tuplesortstate *state) */ void rum_tuplesort_get_stats(Tuplesortstate *state, - const char **sortMethod, - const char **spaceType, - long *spaceUsed) + const char **sortMethod, + const char **spaceType, + long *spaceUsed) { /* * Note: it might seem we should provide both memory and disk usage for a @@ -2987,7 +2989,7 @@ sort_bounded_heap(Tuplesortstate *state) */ static void rum_tuplesort_heap_insert(Tuplesortstate *state, SortTuple *tuple, - int tupleindex, bool checkIndex) + int tupleindex, bool checkIndex) { SortTuple *memtuples; int j; @@ -3568,7 +3570,7 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b, errmsg("could not create unique index \"%s\"", RelationGetRelationName(state->indexRel)), key_desc ? errdetail("Key %s is duplicated.", key_desc) : - errdetail("Duplicate keys exist."), + errdetail("Duplicate keys exist."), errtableconstraint(state->heapRel, RelationGetRelationName(state->indexRel)))); } @@ -3842,18 +3844,19 @@ free_sort_tuple(Tuplesortstate *state, SortTuple *stup) static int comparetup_rum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state) { - RumSortItem *i1, *i2; - float8 v1 = DatumGetFloat8(a->datum1); - float8 v2 = DatumGetFloat8(b->datum1); - int i; + RumSortItem *i1, + *i2; + float8 v1 = DatumGetFloat8(a->datum1); + float8 v2 = DatumGetFloat8(b->datum1); + int i; if (v1 < v2) return -1; else if (v1 > v2) return 1; - i1 = (RumSortItem *)a; - i2 = (RumSortItem *)b; + i1 = (RumSortItem *) a; + i2 = (RumSortItem *) b; for (i = 1; i < state->nKeys; i++) { if (i1->data[i] < i2->data[i]) @@ -3867,7 +3870,7 @@ comparetup_rum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state) static void copytup_rum(Tuplesortstate *state, SortTuple *stup, void *tup) { - RumSortItem *item = (RumSortItem *)tup; + RumSortItem *item = (RumSortItem *) tup; stup->datum1 = Float8GetDatum(state->nKeys > 0 ? item->data[0] : 0); stup->isnull1 = false; @@ -3878,12 +3881,12 @@ copytup_rum(Tuplesortstate *state, SortTuple *stup, void *tup) static void writetup_rum(Tuplesortstate *state, int tapenum, SortTuple *stup) { - RumSortItem *item = (RumSortItem *)stup->tuple; + RumSortItem *item = (RumSortItem *) stup->tuple; unsigned int writtenlen = RumSortItemSize(state->nKeys) + sizeof(unsigned int); LogicalTapeWrite(state->tapeset, tapenum, - (void *) &writtenlen, sizeof(writtenlen)); + (void *) &writtenlen, sizeof(writtenlen)); LogicalTapeWrite(state->tapeset, tapenum, (void *) item, RumSortItemSize(state->nKeys)); if (state->randomAccess) /* need trailing length word? */ @@ -3899,13 +3902,13 @@ readtup_rum(Tuplesortstate *state, SortTuple *stup, int tapenum, unsigned int len) { unsigned int tuplen = len - sizeof(unsigned int); - RumSortItem *item = (RumSortItem *)palloc(RumSortItemSize(state->nKeys)); + RumSortItem *item = (RumSortItem *) palloc(RumSortItemSize(state->nKeys)); Assert(tuplen == RumSortItemSize(state->nKeys)); USEMEM(state, GetMemoryChunkSpace(item)); LogicalTapeReadExact(state->tapeset, tapenum, - (void *)item, RumSortItemSize(state->nKeys)); + (void *) item, RumSortItemSize(state->nKeys)); stup->datum1 = Float8GetDatum(state->nKeys > 0 ? item->data[0] : 0); stup->isnull1 = false; stup->tuple = item; diff --git a/rumsort.h b/rumsort.h index 95afa82251..aa01108146 100644 --- a/rumsort.h +++ b/rumsort.h @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * rumsort.h - * Generalized tuple sorting routines. + * Generalized tuple sorting routines. * * This module handles sorting of heap tuples, index tuples, or single * Datums (and could easily support other kinds of sortable objects, @@ -61,65 +61,65 @@ typedef struct Tuplesortstate Tuplesortstate; typedef struct { ItemPointerData iptr; - bool recheck; - float8 data[FLEXIBLE_ARRAY_MEMBER]; -} RumSortItem; + bool recheck; + float8 data[FLEXIBLE_ARRAY_MEMBER]; +} RumSortItem; #define RumSortItemSize(nKeys) (offsetof(RumSortItem,data)+(nKeys)*sizeof(float8)) extern MemoryContext rum_tuplesort_get_memorycontext(Tuplesortstate *state); extern Tuplesortstate *rum_tuplesort_begin_heap(TupleDesc tupDesc, - int nkeys, AttrNumber *attNums, - Oid *sortOperators, Oid *sortCollations, - bool *nullsFirstFlags, - int workMem, bool randomAccess); + int nkeys, AttrNumber *attNums, + Oid *sortOperators, Oid *sortCollations, + bool *nullsFirstFlags, + int workMem, bool randomAccess); extern Tuplesortstate *rum_tuplesort_begin_cluster(TupleDesc tupDesc, - Relation indexRel, - int workMem, bool randomAccess); -extern Tuplesortstate *rum_tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, - bool enforceUnique, int workMem, bool randomAccess); +extern Tuplesortstate *rum_tuplesort_begin_index_btree(Relation heapRel, + Relation indexRel, + bool enforceUnique, + int workMem, bool randomAccess); extern Tuplesortstate *rum_tuplesort_begin_index_hash(Relation heapRel, - Relation indexRel, - uint32 hash_mask, - int workMem, bool randomAccess); + Relation indexRel, + uint32 hash_mask, + int workMem, bool randomAccess); extern Tuplesortstate *rum_tuplesort_begin_datum(Oid datumType, - Oid sortOperator, Oid sortCollation, - bool nullsFirstFlag, - int workMem, bool randomAccess); + Oid sortOperator, Oid sortCollation, + bool nullsFirstFlag, + int workMem, bool randomAccess); extern Tuplesortstate *rum_tuplesort_begin_rum(int workMem, int nKeys, bool randomAccess); extern void rum_tuplesort_set_bound(Tuplesortstate *state, int64 bound); extern void rum_tuplesort_puttupleslot(Tuplesortstate *state, - TupleTableSlot *slot); + TupleTableSlot *slot); extern void rum_tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup); extern void rum_tuplesort_putindextuple(Tuplesortstate *state, IndexTuple tuple); extern void rum_tuplesort_putdatum(Tuplesortstate *state, Datum val, - bool isNull); -extern void rum_tuplesort_putrum(Tuplesortstate *state, RumSortItem *item); + bool isNull); +extern void rum_tuplesort_putrum(Tuplesortstate *state, RumSortItem * item); extern void rum_tuplesort_performsort(Tuplesortstate *state); extern bool rum_tuplesort_gettupleslot(Tuplesortstate *state, bool forward, - TupleTableSlot *slot); + TupleTableSlot *slot); extern HeapTuple rum_tuplesort_getheaptuple(Tuplesortstate *state, bool forward, - bool *should_free); + bool *should_free); extern IndexTuple rum_tuplesort_getindextuple(Tuplesortstate *state, bool forward, - bool *should_free); + bool *should_free); extern bool rum_tuplesort_getdatum(Tuplesortstate *state, bool forward, - Datum *val, bool *isNull); + Datum *val, bool *isNull); extern RumSortItem *rum_tuplesort_getrum(Tuplesortstate *state, bool forward, - bool *should_free); + bool *should_free); extern void rum_tuplesort_end(Tuplesortstate *state); extern void rum_tuplesort_get_stats(Tuplesortstate *state, - const char **sortMethod, - const char **spaceType, - long *spaceUsed); + const char **sortMethod, + const char **spaceType, + long *spaceUsed); extern int rum_tuplesort_merge_order(long allowedMem); diff --git a/rumtsquery.c b/rumtsquery.c index 9ecb01598c..57a9c5652d 100644 --- a/rumtsquery.c +++ b/rumtsquery.c @@ -22,23 +22,24 @@ typedef struct QueryItemWrap { - QueryItemType type; - int8 oper; - bool not; - int operandsCount, - operandsAllocated; - struct QueryItemWrap *operands; - struct QueryItemWrap *parent; - int distance, - length; - int sum; - int num; -} QueryItemWrap; + QueryItemType type; + int8 oper; + bool not; + int operandsCount, + operandsAllocated; + struct QueryItemWrap *operands; + struct QueryItemWrap *parent; + int distance, + length; + int sum; + int num; +} QueryItemWrap; static QueryItemWrap * -add_child(QueryItemWrap *parent) +add_child(QueryItemWrap * parent) { QueryItemWrap *result; + if (!parent) { result = (QueryItemWrap *) palloc0(sizeof(QueryItemWrap)); @@ -67,12 +68,12 @@ add_child(QueryItemWrap *parent) } static QueryItemWrap * -make_query_item_wrap(QueryItem *item, QueryItemWrap *parent, bool not) +make_query_item_wrap(QueryItem *item, QueryItemWrap * parent, bool not) { if (item->type == QI_VAL) { - QueryOperand *operand = (QueryOperand *) item; - QueryItemWrap *wrap = add_child(parent); + QueryOperand *operand = (QueryOperand *) item; + QueryItemWrap *wrap = add_child(parent); if (operand->prefix) elog(ERROR, "Indexing of prefix tsqueries isn't supported yet"); @@ -91,29 +92,30 @@ make_query_item_wrap(QueryItem *item, QueryItemWrap *parent, bool not) case OP_AND: case OP_OR: - { - uint8 oper = item->qoperator.oper; - if (not) - oper = (oper == OP_AND) ? OP_OR : OP_AND; - - if (!parent || oper != parent->oper) { - QueryItemWrap *wrap = add_child(parent); + uint8 oper = item->qoperator.oper; - wrap->type = QI_OPR; - wrap->oper = oper; + if (not) + oper = (oper == OP_AND) ? OP_OR : OP_AND; - make_query_item_wrap(item + item->qoperator.left, wrap, not); - make_query_item_wrap(item + 1, wrap, not); - return wrap; - } - else - { - make_query_item_wrap(item + item->qoperator.left, parent, not); - make_query_item_wrap(item + 1, parent, not); - return NULL; + if (!parent || oper != parent->oper) + { + QueryItemWrap *wrap = add_child(parent); + + wrap->type = QI_OPR; + wrap->oper = oper; + + make_query_item_wrap(item + item->qoperator.left, wrap, not); + make_query_item_wrap(item + 1, wrap, not); + return wrap; + } + else + { + make_query_item_wrap(item + item->qoperator.left, parent, not); + make_query_item_wrap(item + 1, parent, not); + return NULL; + } } - } case OP_PHRASE: elog(ERROR, "Indexing of phrase tsqueries isn't supported yet"); default: @@ -125,9 +127,11 @@ make_query_item_wrap(QueryItem *item, QueryItemWrap *parent, bool not) } static int -calc_wraps(QueryItemWrap *wrap, int *num) +calc_wraps(QueryItemWrap * wrap, int *num) { - int i, notCount = 0, result; + int i, + notCount = 0, + result; for (i = 0; i < wrap->operandsCount; i++) { @@ -155,7 +159,7 @@ calc_wraps(QueryItemWrap *wrap, int *num) } static bool -check_allnegative(QueryItemWrap *wrap) +check_allnegative(QueryItemWrap * wrap) { if (wrap->type == QI_VAL) { @@ -163,7 +167,8 @@ check_allnegative(QueryItemWrap *wrap) } else if (wrap->oper == OP_AND) { - int i; + int i; + for (i = 0; i < wrap->operandsCount; i++) { if (!check_allnegative(&wrap->operands[i])) @@ -173,7 +178,8 @@ check_allnegative(QueryItemWrap *wrap) } else if (wrap->oper == OP_OR) { - int i; + int i; + for (i = 0; i < wrap->operandsCount; i++) { if (check_allnegative(&wrap->operands[i])) @@ -249,30 +255,31 @@ decode_varbyte(unsigned char **ptr) typedef struct { - Datum *addInfo; - bool *addInfoIsNull; - Datum *entries; - int index; - char *operand; -} ExtractContext; + Datum *addInfo; + bool *addInfoIsNull; + Datum *entries; + int index; + char *operand; +} ExtractContext; static void -extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) +extract_wraps(QueryItemWrap * wrap, ExtractContext * context, int level) { if (wrap->type == QI_VAL) { - bytea *addinfo; - unsigned char *ptr; - int index = context->index; + bytea *addinfo; + unsigned char *ptr; + int index = context->index; for (index = 0; index < context->index; index++) { - text *entry; + text *entry; + entry = DatumGetByteaP(context->entries[index]); if (VARSIZE_ANY_EXHDR(entry) == wrap->length && !memcmp(context->operand + wrap->distance, VARDATA_ANY(entry), wrap->length)) - break; + break; } if (index >= context->index) @@ -284,26 +291,40 @@ extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) context->addInfo[index] = PointerGetDatum(addinfo); context->addInfoIsNull[index] = false; context->index++; - /*ptrEnd = (unsigned char *) VARDATA(addinfo) + VARHDRSZ + 2 * Max(level, 1) * MAX_ENCODED_LEN;*/ + + /* + * ptrEnd = (unsigned char *) VARDATA(addinfo) + VARHDRSZ + 2 * + * Max(level, 1) * MAX_ENCODED_LEN; + */ } else { addinfo = DatumGetByteaP(context->addInfo[index]); addinfo = (bytea *) repalloc(addinfo, - VARSIZE(addinfo) + 2 * Max(level, 1) * MAX_ENCODED_LEN); + VARSIZE(addinfo) + 2 * Max(level, 1) * MAX_ENCODED_LEN); context->addInfo[index] = PointerGetDatum(addinfo); ptr = (unsigned char *) VARDATA(addinfo) + VARSIZE_ANY_EXHDR(addinfo); - /*ptrEnd = (unsigned char *) VARDATA(addinfo) + VARSIZE_ANY_EXHDR(addinfo) + 2 * Max(level, 1) * MAX_ENCODED_LEN;*/ + + /* + * ptrEnd = (unsigned char *) VARDATA(addinfo) + + * VARSIZE_ANY_EXHDR(addinfo) + 2 * Max(level, 1) * + * MAX_ENCODED_LEN; + */ } - /*elog(NOTICE, "%s", text_to_cstring(DatumGetTextP(context->entries[index])));*/ + + /* + * elog(NOTICE, "%s", + * text_to_cstring(DatumGetTextP(context->entries[index]))); + */ while (wrap->parent) { - QueryItemWrap *parent = wrap->parent; - uint32 sum; - /*elog(NOTICE, "%d %d %d", parent->num, parent->sum, wrap->not);*/ + QueryItemWrap *parent = wrap->parent; + uint32 sum; + + /* elog(NOTICE, "%d %d %d", parent->num, parent->sum, wrap->not); */ encode_varbyte((uint32) parent->num, &ptr); - sum = (uint32)abs(parent->sum); + sum = (uint32) abs(parent->sum); sum <<= 2; if (parent->sum < 0) sum |= 2; @@ -317,13 +338,18 @@ extract_wraps(QueryItemWrap *wrap, ExtractContext *context, int level) encode_varbyte(1, &ptr); encode_varbyte(4 | 1, &ptr); } - /*Assert(ptr <= ptrEnd);*/ - SET_VARSIZE(addinfo, ptr - (unsigned char *)addinfo); - /*elog(NOTICE, "%s", DatumGetPointer(DirectFunctionCall1(byteaout, PointerGetDatum(addinfo))));*/ + /* Assert(ptr <= ptrEnd); */ + SET_VARSIZE(addinfo, ptr - (unsigned char *) addinfo); + + /* + * elog(NOTICE, "%s", DatumGetPointer(DirectFunctionCall1(byteaout, + * PointerGetDatum(addinfo)))); + */ } else if (wrap->type == QI_OPR) { - int i; + int i; + for (i = 0; i < wrap->operandsCount; i++) extract_wraps(&wrap->operands[i], context, level + 1); } @@ -351,8 +377,8 @@ ruminv_extract_tsquery(PG_FUNCTION_ARGS) TSQuery query = PG_GETARG_TSQUERY(0); int32 *nentries = (int32 *) PG_GETARG_POINTER(1); bool **nullFlags = (bool **) PG_GETARG_POINTER(2); - Datum **addInfo = (Datum **) PG_GETARG_POINTER(3); - bool **addInfoIsNull = (bool **) PG_GETARG_POINTER(4); + Datum **addInfo = (Datum **) PG_GETARG_POINTER(3); + bool **addInfoIsNull = (bool **) PG_GETARG_POINTER(4); Datum *entries = NULL; QueryItem *item = GETQUERY(query); QueryItemWrap *wrap; @@ -382,7 +408,7 @@ ruminv_extract_tsquery(PG_FUNCTION_ARGS) count = context.index; if (extractNull) { - int i; + int i; count++; *nullFlags = (bool *) palloc(sizeof(bool) * count); @@ -414,7 +440,7 @@ ruminv_extract_tsvector(PG_FUNCTION_ARGS) bool **ptr_partialmatch = (bool **) PG_GETARG_POINTER(3); Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4); - bool **nullFlags = (bool **) PG_GETARG_POINTER(5); + bool **nullFlags = (bool **) PG_GETARG_POINTER(5); int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); Datum *entries = NULL; @@ -452,19 +478,21 @@ ruminv_extract_tsvector(PG_FUNCTION_ARGS) typedef struct { - int sum; - int parent; - bool not; -} TmpNode; + int sum; + int parent; + bool not; +} TmpNode; PG_FUNCTION_INFO_V1(ruminv_tsvector_consistent); Datum ruminv_tsvector_consistent(PG_FUNCTION_ARGS) { bool *check = (bool *) PG_GETARG_POINTER(0); + /* StrategyNumber strategy = PG_GETARG_UINT16(1); */ - /* TSVector vector = PG_GETARG_TSVECTOR(2); */ + /* TSVector vector = PG_GETARG_TSVECTOR(2); */ int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); Datum *addInfo = (Datum *) PG_GETARG_POINTER(8); @@ -480,9 +508,9 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) for (i = 0; i < nkeys - 1; i++) { unsigned char *ptr, - *ptrEnd; - int size; - TmpNode *child = NULL; + *ptrEnd; + int size; + TmpNode *child = NULL; if (!check[i]) continue; @@ -492,10 +520,13 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) if (addInfoIsNull[i]) elog(ERROR, "Unexpected addInfoIsNull"); - ptr = (unsigned char *)VARDATA_ANY(DatumGetPointer(addInfo[i])); + ptr = (unsigned char *) VARDATA_ANY(DatumGetPointer(addInfo[i])); size = VARSIZE_ANY_EXHDR(DatumGetPointer(addInfo[i])); - /*elog(NOTICE, "%d %s", i, DatumGetPointer(DirectFunctionCall1(byteaout, addInfo[i])));*/ + /* + * elog(NOTICE, "%d %s", i, + * DatumGetPointer(DirectFunctionCall1(byteaout, addInfo[i]))); + */ if (size == 0) { @@ -506,10 +537,11 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) ptrEnd = ptr + size; while (ptr < ptrEnd) { - uint32 num = decode_varbyte(&ptr), - sumVal = decode_varbyte(&ptr); - int sum, index; - bool not; + uint32 num = decode_varbyte(&ptr), + sumVal = decode_varbyte(&ptr); + int sum, + index; + bool not; not = (sumVal & 1) ? true : false; sum = sumVal >> 2; @@ -517,7 +549,7 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) index = num - 1; - /*elog(NOTICE, "a %d %d %d %d", i, index, sum, not);*/ + /* elog(NOTICE, "a %d %d %d %d", i, index, sum, not); */ if (child) { @@ -558,10 +590,10 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) } else { - /*for (i = 0; i < lastIndex; i++) - { - elog(NOTICE, "s %d %d %d %d", i, nodes[i].sum, nodes[i].parent, nodes[i].not); - }*/ + /* + * for (i = 0; i < lastIndex; i++) { elog(NOTICE, "s %d %d %d %d", i, + * nodes[i].sum, nodes[i].parent, nodes[i].not); } + */ for (i = lastIndex - 1; i >= 0; i--) { @@ -576,7 +608,8 @@ ruminv_tsvector_consistent(PG_FUNCTION_ARGS) } else { - int parent = nodes[i].parent; + int parent = nodes[i].parent; + nodes[parent].sum += nodes[i].not ? -1 : 1; } } @@ -593,8 +626,8 @@ PG_FUNCTION_INFO_V1(ruminv_tsquery_config); Datum ruminv_tsquery_config(PG_FUNCTION_ARGS) { - RumConfig *config = (RumConfig *)PG_GETARG_POINTER(0); + RumConfig *config = (RumConfig *) PG_GETARG_POINTER(0); + config->addInfoTypeOid = BYTEAOID; PG_RETURN_VOID(); } - diff --git a/rumutil.c b/rumutil.c index 43c99594f3..ad71160320 100644 --- a/rumutil.c +++ b/rumutil.c @@ -32,6 +32,7 @@ PG_FUNCTION_INFO_V1(rumhandler); /* Kind of relation optioms for rum index */ static relopt_kind rum_relopt_kind; + /* * Module load callback */ @@ -40,12 +41,12 @@ _PG_init(void) { /* Define custom GUC variables. */ DefineCustomIntVariable("rum_fuzzy_search_limit", - "Sets the maximum allowed result for exact search by RUM.", - NULL, - &RumFuzzySearchLimit, - 0, 0, INT_MAX, - PGC_USERSET, 0, - NULL, NULL, NULL); + "Sets the maximum allowed result for exact search by RUM.", + NULL, + &RumFuzzySearchLimit, + 0, 0, INT_MAX, + PGC_USERSET, 0, + NULL, NULL, NULL); rum_relopt_kind = add_reloption_kind(); @@ -56,7 +57,7 @@ _PG_init(void) "Column name to add a order by column", NULL, NULL); add_bool_reloption(rum_relopt_kind, "use_alternative_order", - "Use (addinfo, itempointer) order instead of just itempointer", + "Use (addinfo, itempointer) order instead of just itempointer", false); } @@ -110,7 +111,7 @@ rumhandler(PG_FUNCTION_ARGS) * Note: assorted subsidiary data is allocated in the CurrentMemoryContext. */ void -initRumState(RumState *state, Relation index) +initRumState(RumState * state, Relation index) { TupleDesc origTupdesc = RelationGetDescr(index); int i; @@ -125,11 +126,11 @@ initRumState(RumState *state, Relation index) state->attrnAddToColumn = InvalidAttrNumber; if (index->rd_options) { - RumOptions *options = (RumOptions*) index->rd_options; + RumOptions *options = (RumOptions *) index->rd_options; if (options->orderByColumn > 0) { - char *colname = (char *) options + options->orderByColumn; + char *colname = (char *) options + options->orderByColumn; AttrNumber attrnOrderByHeapColumn; attrnOrderByHeapColumn = get_attnum(index->rd_index->indrelid, colname); @@ -145,7 +146,7 @@ initRumState(RumState *state, Relation index) if (options->addToColumn > 0) { - char *colname = (char *) options + options->addToColumn; + char *colname = (char *) options + options->addToColumn; AttrNumber attrnAddToHeapColumn; attrnAddToHeapColumn = get_attnum(index->rd_index->indrelid, colname); @@ -175,7 +176,7 @@ initRumState(RumState *state, Relation index) for (i = 0; i < origTupdesc->natts; i++) { - RumConfig rumConfig; + RumConfig rumConfig; rumConfig.addInfoTypeOid = InvalidOid; @@ -188,13 +189,13 @@ initRumState(RumState *state, Relation index) FunctionCall1(&state->configFn[i], PointerGetDatum(&rumConfig)); } - if (state->attrnAddToColumn == i+1) + if (state->attrnAddToColumn == i + 1) { - if (OidIsValid(rumConfig.addInfoTypeOid)) + if (OidIsValid(rumConfig.addInfoTypeOid)) elog(ERROR, "AddTo could should not have AddInfo"); state->addInfoTypeOid[i] = origTupdesc->attrs[ - state->attrnOrderByColumn - 1]->atttypid; + state->attrnOrderByColumn - 1]->atttypid; } else { @@ -204,7 +205,7 @@ initRumState(RumState *state, Relation index) if (state->oneCol) { state->tupdesc[i] = CreateTemplateTupleDesc( - OidIsValid(state->addInfoTypeOid[i]) ? 2 : 1, false); + OidIsValid(state->addInfoTypeOid[i]) ? 2 : 1, false); TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 1, NULL, origTupdesc->attrs[i]->atttypid, origTupdesc->attrs[i]->atttypmod, @@ -214,7 +215,7 @@ initRumState(RumState *state, Relation index) if (OidIsValid(state->addInfoTypeOid[i])) { TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 2, NULL, - state->addInfoTypeOid[i], -1, 0); + state->addInfoTypeOid[i], -1, 0); state->addAttrs[i] = state->tupdesc[i]->attrs[1]; } else @@ -225,7 +226,7 @@ initRumState(RumState *state, Relation index) else { state->tupdesc[i] = CreateTemplateTupleDesc( - OidIsValid(state->addInfoTypeOid[i]) ? 3 : 2, false); + OidIsValid(state->addInfoTypeOid[i]) ? 3 : 2, false); TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 1, NULL, INT2OID, -1, 0); TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 2, NULL, @@ -237,7 +238,7 @@ initRumState(RumState *state, Relation index) if (OidIsValid(state->addInfoTypeOid[i])) { TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 3, NULL, - state->addInfoTypeOid[i], -1, 0); + state->addInfoTypeOid[i], -1, 0); state->addAttrs[i] = state->tupdesc[i]->attrs[2]; } else @@ -280,7 +281,7 @@ initRumState(RumState *state, Relation index) if (index_getprocid(index, i + 1, RUM_PRE_CONSISTENT_PROC) != InvalidOid) { fmgr_info_copy(&(state->preConsistentFn[i]), - index_getprocinfo(index, i + 1, RUM_PRE_CONSISTENT_PROC), + index_getprocinfo(index, i + 1, RUM_PRE_CONSISTENT_PROC), CurrentMemoryContext); state->canPreConsistent[i] = true; } @@ -295,7 +296,7 @@ initRumState(RumState *state, Relation index) if (index_getprocid(index, i + 1, RUM_ORDERING_PROC) != InvalidOid) { fmgr_info_copy(&(state->orderingFn[i]), - index_getprocinfo(index, i + 1, RUM_ORDERING_PROC), + index_getprocinfo(index, i + 1, RUM_ORDERING_PROC), CurrentMemoryContext); state->canOrdering[i] = true; } @@ -307,8 +308,8 @@ initRumState(RumState *state, Relation index) if (index_getprocid(index, i + 1, RUM_OUTER_ORDERING_PROC) != InvalidOid) { fmgr_info_copy(&(state->outerOrderingFn[i]), - index_getprocinfo(index, i + 1, RUM_OUTER_ORDERING_PROC), - CurrentMemoryContext); + index_getprocinfo(index, i + 1, RUM_OUTER_ORDERING_PROC), + CurrentMemoryContext); state->canOuterOrdering[i] = true; } else @@ -346,7 +347,7 @@ initRumState(RumState *state, Relation index) * Extract attribute (column) number of stored entry from RUM tuple */ OffsetNumber -rumtuple_get_attrnum(RumState *rumstate, IndexTuple tuple) +rumtuple_get_attrnum(RumState * rumstate, IndexTuple tuple) { OffsetNumber colN; @@ -379,8 +380,8 @@ rumtuple_get_attrnum(RumState *rumstate, IndexTuple tuple) * Extract stored datum (and possible null category) from RUM tuple */ Datum -rumtuple_get_key(RumState *rumstate, IndexTuple tuple, - RumNullCategory *category) +rumtuple_get_key(RumState * rumstate, IndexTuple tuple, + RumNullCategory * category) { Datum res; bool isnull; @@ -497,7 +498,7 @@ void RumInitMetabuffer(GenericXLogState *state, Buffer metaBuffer) { Page metaPage; - RumMetaPageData *metadata; + RumMetaPageData *metadata; /* Initialize contents of meta page */ metaPage = GenericXLogRegisterBuffer(state, metaBuffer, @@ -524,7 +525,7 @@ RumInitMetabuffer(GenericXLogState *state, Buffer metaBuffer) * Compare two keys of the same index column */ int -rumCompareEntries(RumState *rumstate, OffsetNumber attnum, +rumCompareEntries(RumState * rumstate, OffsetNumber attnum, Datum a, RumNullCategory categorya, Datum b, RumNullCategory categoryb) { @@ -546,7 +547,7 @@ rumCompareEntries(RumState *rumstate, OffsetNumber attnum, * Compare two keys of possibly different index columns */ int -rumCompareAttEntries(RumState *rumstate, +rumCompareAttEntries(RumState * rumstate, OffsetNumber attnuma, Datum a, RumNullCategory categorya, OffsetNumber attnumb, Datum b, RumNullCategory categoryb) { @@ -621,9 +622,9 @@ cmpEntries(const void *a, const void *b, void *arg) * This avoids generating redundant index entries. */ Datum * -rumExtractEntries(RumState *rumstate, OffsetNumber attnum, +rumExtractEntries(RumState * rumstate, OffsetNumber attnum, Datum value, bool isNull, - int32 *nentries, RumNullCategory **categories, + int32 *nentries, RumNullCategory ** categories, Datum **addInfo, bool **addInfoIsNull) { Datum *entries; @@ -660,7 +661,7 @@ rumExtractEntries(RumState *rumstate, OffsetNumber attnum, PointerGetDatum(&nullFlags), PointerGetDatum(addInfo), PointerGetDatum(addInfoIsNull) - )); + )); /* * Generate a placeholder if the item contained no keys. @@ -681,13 +682,13 @@ rumExtractEntries(RumState *rumstate, OffsetNumber attnum, if (!(*addInfo)) { - (*addInfo) = (Datum *)palloc(sizeof(Datum) * *nentries); + (*addInfo) = (Datum *) palloc(sizeof(Datum) * *nentries); for (i = 0; i < *nentries; i++) (*addInfo)[i] = (Datum) 0; } if (!(*addInfoIsNull)) { - (*addInfoIsNull) = (bool *)palloc(sizeof(bool) * *nentries); + (*addInfoIsNull) = (bool *) palloc(sizeof(bool) * *nentries); for (i = 0; i < *nentries; i++) (*addInfoIsNull)[i] = true; } @@ -847,8 +848,8 @@ rumUpdateStats(Relation index, const GinStatsData *stats) { Buffer metabuffer; Page metapage; - RumMetaPageData *metadata; - GenericXLogState *state; + RumMetaPageData *metadata; + GenericXLogState *state; state = GenericXLogStart(index); @@ -869,9 +870,9 @@ rumUpdateStats(Relation index, const GinStatsData *stats) Datum FunctionCall10Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, - Datum arg3, Datum arg4, Datum arg5, - Datum arg6, Datum arg7, Datum arg8, - Datum arg9, Datum arg10) + Datum arg3, Datum arg4, Datum arg5, + Datum arg6, Datum arg7, Datum arg8, + Datum arg9, Datum arg10) { FunctionCallInfoData fcinfo; Datum result; diff --git a/rumvacuum.c b/rumvacuum.c index bce208563e..77810f2fd4 100644 --- a/rumvacuum.c +++ b/rumvacuum.c @@ -28,7 +28,7 @@ typedef struct void *callback_state; RumState rumstate; BufferAccessStrategy strategy; -} RumVacuumState; +} RumVacuumState; /* @@ -40,27 +40,29 @@ typedef struct */ static uint32 -rumVacuumPostingList(RumVacuumState *gvs, OffsetNumber attnum, Pointer src, +rumVacuumPostingList(RumVacuumState * gvs, OffsetNumber attnum, Pointer src, uint32 nitem, Pointer *cleaned, Size size, Size *newSize) { uint32 i, j = 0; - ItemPointerData iptr = {{0,0},0}, prevIptr; - Datum addInfo = 0; - bool addInfoIsNull; - Pointer dst = NULL, prev, ptr = src; + RumKey item; + ItemPointerData prevIptr; + Pointer dst = NULL, + prev, + ptr = src; + + ItemPointerSetMin(&item.iptr); /* * just scan over ItemPointer array */ - prevIptr = iptr; + prevIptr = item.iptr; for (i = 0; i < nitem; i++) { prev = ptr; - ptr = rumDataPageLeafRead(ptr, attnum, &iptr, &addInfo, &addInfoIsNull, - &gvs->rumstate); - if (gvs->callback(&iptr, gvs->callback_state)) + ptr = rumDataPageLeafRead(ptr, attnum, &item, &gvs->rumstate); + if (gvs->callback(&item.iptr, gvs->callback_state)) { gvs->result->tuples_removed += 1; if (!dst) @@ -78,12 +80,10 @@ rumVacuumPostingList(RumVacuumState *gvs, OffsetNumber attnum, Pointer src, { gvs->result->num_index_tuples += 1; if (i != j) - dst = rumPlaceToDataPageLeaf(dst, attnum, &iptr, - addInfo, - addInfoIsNull, + dst = rumPlaceToDataPageLeaf(dst, attnum, &item, &prevIptr, &gvs->rumstate); j++; - prevIptr = iptr; + prevIptr = item.iptr; } } @@ -97,7 +97,7 @@ rumVacuumPostingList(RumVacuumState *gvs, OffsetNumber attnum, Pointer src, * with additional information. */ static IndexTuple -RumFormTuple(RumState *rumstate, +RumFormTuple(RumState * rumstate, OffsetNumber attnum, Datum key, RumNullCategory category, Pointer data, Size dataSize, @@ -191,7 +191,8 @@ RumFormTuple(RumState *rumstate, */ if (nipd > 0) { - char *ptr = RumGetPosting(itup); + char *ptr = RumGetPosting(itup); + memcpy(ptr, data, dataSize); } @@ -207,7 +208,7 @@ RumFormTuple(RumState *rumstate, } static bool -rumVacuumPostingTreeLeaves(RumVacuumState *gvs, OffsetNumber attnum, +rumVacuumPostingTreeLeaves(RumVacuumState * gvs, OffsetNumber attnum, BlockNumber blkno, bool isRoot, Buffer *rootBuffer) { Buffer buffer; @@ -244,8 +245,8 @@ rumVacuumPostingTreeLeaves(RumVacuumState *gvs, OffsetNumber attnum, page = GenericXLogRegisterBuffer(state, buffer, 0); newMaxOff = rumVacuumPostingList(gvs, attnum, - RumDataPageGetData(page), oldMaxOff, &cleaned, - RumDataPageSize - RumPageGetOpaque(page)->freespace, &newSize); + RumDataPageGetData(page), oldMaxOff, &cleaned, + RumDataPageSize - RumPageGetOpaque(page)->freespace, &newSize); /* saves changes about deleted tuple ... */ if (oldMaxOff != newMaxOff) @@ -276,7 +277,7 @@ rumVacuumPostingTreeLeaves(RumVacuumState *gvs, OffsetNumber attnum, PostingItem *pitem = (PostingItem *) RumDataPageGetItem(page, i); if (rumVacuumPostingTreeLeaves(gvs, attnum, - PostingItemGetBlockNumber(pitem), FALSE, NULL)) + PostingItemGetBlockNumber(pitem), FALSE, NULL)) isChildHasVoid = TRUE; } @@ -305,7 +306,7 @@ rumVacuumPostingTreeLeaves(RumVacuumState *gvs, OffsetNumber attnum, * Delete a posting tree page. */ static void -rumDeletePage(RumVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno, +rumDeletePage(RumVacuumState * gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno, BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot) { Buffer dBuffer; @@ -314,7 +315,7 @@ rumDeletePage(RumVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn Page lPage, dPage, parentPage; - BlockNumber rightlink; + BlockNumber rightlink; GenericXLogState *state; state = GenericXLogStart(gvs->index); @@ -386,7 +387,7 @@ typedef struct DataPageDeleteStack * scans posting tree and deletes empty pages */ static bool -rumScanToDelete(RumVacuumState *gvs, BlockNumber blkno, bool isRoot, DataPageDeleteStack *parent, OffsetNumber myoff) +rumScanToDelete(RumVacuumState * gvs, BlockNumber blkno, bool isRoot, DataPageDeleteStack *parent, OffsetNumber myoff) { DataPageDeleteStack *me; Buffer buffer; @@ -450,7 +451,7 @@ rumScanToDelete(RumVacuumState *gvs, BlockNumber blkno, bool isRoot, DataPageDel } static void -rumVacuumPostingTree(RumVacuumState *gvs, OffsetNumber attnum, BlockNumber rootBlkno) +rumVacuumPostingTree(RumVacuumState * gvs, OffsetNumber attnum, BlockNumber rootBlkno) { Buffer rootBuffer = InvalidBuffer; DataPageDeleteStack root, @@ -488,7 +489,7 @@ rumVacuumPostingTree(RumVacuumState *gvs, OffsetNumber attnum, BlockNumber rootB * then page is copied into temporary one. */ static Page -rumVacuumEntryPage(RumVacuumState *gvs, Buffer buffer, BlockNumber *roots, OffsetNumber *attnums, uint32 *nroot) +rumVacuumEntryPage(RumVacuumState * gvs, Buffer buffer, BlockNumber *roots, OffsetNumber *attnums, uint32 *nroot) { Page origpage = BufferGetPage(buffer), tmppage; @@ -519,13 +520,13 @@ rumVacuumEntryPage(RumVacuumState *gvs, Buffer buffer, BlockNumber *roots, Offse * if we already create temporary page, we will make changes in * place */ - Size cleanedSize; - Pointer cleaned = NULL; + Size cleanedSize; + Pointer cleaned = NULL; uint32 newN = - rumVacuumPostingList(gvs, rumtuple_get_attrnum(&gvs->rumstate, itup), - RumGetPosting(itup), RumGetNPosting(itup), &cleaned, - IndexTupleSize(itup) - RumGetPostingOffset(itup), - &cleanedSize); + rumVacuumPostingList(gvs, rumtuple_get_attrnum(&gvs->rumstate, itup), + RumGetPosting(itup), RumGetNPosting(itup), &cleaned, + IndexTupleSize(itup) - RumGetPostingOffset(itup), + &cleanedSize); if (RumGetNPosting(itup) != newN) { diff --git a/rumvalidate.c b/rumvalidate.c index befc8f694b..92a57dc201 100644 --- a/rumvalidate.c +++ b/rumvalidate.c @@ -267,7 +267,7 @@ rumvalidate(Oid opclassoid) result = false; } if (!opclassgroup || - (opclassgroup->functionset & (1 << GIN_CONSISTENT_PROC)) == 0) + (opclassgroup->functionset & (1 << GIN_CONSISTENT_PROC)) == 0) { ereport(INFO, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), diff --git a/sql/orderby.sql b/sql/orderby.sql index cf4d90e3d4..389caa0fdb 100644 --- a/sql/orderby.sql +++ b/sql/orderby.sql @@ -49,8 +49,6 @@ EXPLAIN (costs off) SELECT id, d, d |-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d |-> '2016-05-16 14:21:25' LIMIT 5; SELECT id, d, d |-> '2016-05-16 14:21:25' FROM tsts WHERE t @@ 'wr&qh' ORDER BY d |-> '2016-05-16 14:21:25' LIMIT 5; ---to be fixed ---EXPLAIN (costs off) ---SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; ---SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; - +EXPLAIN (costs off) +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; +SELECT id, d, d <-> '2016-05-16 14:21:25' FROM tsts ORDER BY d <-> '2016-05-16 14:21:25' LIMIT 5; diff --git a/sql/rum.sql b/sql/rum.sql index a80a133465..f60724852e 100644 --- a/sql/rum.sql +++ b/sql/rum.sql @@ -39,6 +39,12 @@ SELECT rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way & (go | half)')) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'way & (go | half)') ORDER BY a <-> to_tsquery('pg_catalog.english', 'way & (go | half)'); +SELECT + a <-> to_tsquery('pg_catalog.english', 'way & (go | half)'), + rum_ts_distance(a, to_tsquery('pg_catalog.english', 'way & (go | half)')), + * + FROM test_rum + ORDER BY a <-> to_tsquery('pg_catalog.english', 'way & (go | half)') limit 3; INSERT INTO test_rum (t) VALUES ('foo bar foo the over foo qq bar'); INSERT INTO test_rum (t) VALUES ('345 qwerty copyright'); @@ -52,10 +58,6 @@ SELECT count(*) FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'rat') SELECT a FROM test_rum WHERE a @@ to_tsquery('pg_catalog.english', 'bar') ORDER BY a; -DELETE FROM test_rum; - -SELECT count(*) from test_rum; - CREATE TABLE tst (i int4, t tsvector); INSERT INTO tst SELECT i%10, to_tsvector('simple', substr(md5(i::text), 1, 1)) FROM generate_series(1,100000) i; CREATE INDEX tstidx ON tst USING rum (t rum_tsvector_ops);