Skip to content

Commit 7e7aa13

Browse files
committed
Fix collations for in-memory filtering
Scan keys created for in-memory filtering were using collation from the hypertable tuple descriptor. Since the scan keys are meant to be ran on chunk tuples, switching to using the collation from the chunk tuple descriptor fixes the issue.
1 parent d2706a2 commit 7e7aa13

File tree

4 files changed

+75
-3
lines changed

4 files changed

+75
-3
lines changed

.unreleased/pr_7345

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Fixes: #7342 Fix collation for in-memory tuple filtering
2+
Thanks: @hackbnw for reporting an issue with collation during tuple filtering

tsl/src/compression/compression_scankey.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@ build_mem_scankeys_from_slot(Oid ht_relid, CompressionSettings *settings, Relati
3535
ScanKeyData *scankeys = NULL;
3636
int key_index = 0;
3737
TupleDesc out_desc = RelationGetDescr(out_rel);
38-
TupleDesc in_desc = slot->tts_tupleDescriptor;
3938

4039
if (bms_is_empty(constraints->key_columns))
4140
{
@@ -99,8 +98,8 @@ build_mem_scankeys_from_slot(Oid ht_relid, CompressionSettings *settings, Relati
9998
isnull ? SK_ISNULL : 0,
10099
attno,
101100
BTEqualStrategyNumber,
102-
in_desc->attrs[AttrNumberGetAttrOffset(ht_attno)].atttypid,
103-
in_desc->attrs[AttrNumberGetAttrOffset(ht_attno)].attcollation,
101+
atttypid,
102+
TupleDescAttr(out_desc, AttrNumberGetAttrOffset(attno))->attcollation,
104103
get_opcode(opr),
105104
isnull ? 0 : value);
106105
}

tsl/test/expected/compression_conflicts.out

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -708,3 +708,43 @@ ERROR: inserting into compressed chunk with unique constraints disabled
708708
INSERT INTO compressed_ht VALUES ('2022-01-24 01:10:28.192199+05:30', '7', 0.876, 4.123, 'new insert row');
709709
ERROR: inserting into compressed chunk with unique constraints disabled
710710
\set ON_ERROR_STOP 1
711+
RESET timescaledb.enable_dml_decompression;
712+
-- gh issue #7342
713+
CREATE TABLE test_collation (
714+
time int8 NOT NULL,
715+
device_id int4 NOT NULL,
716+
name TEXT NOT NULL,
717+
CONSTRAINT test_collation_pkey PRIMARY KEY (time, device_id, name)
718+
);
719+
SELECT create_hypertable('test_collation', 'time', chunk_time_interval => 2419200000);
720+
create_hypertable
721+
------------------------------
722+
(11,public,test_collation,t)
723+
(1 row)
724+
725+
ALTER TABLE test_collation
726+
SET (
727+
timescaledb.compress,
728+
timescaledb.compress_segmentby = 'device_id',
729+
timescaledb.compress_orderby = 'time DESC, name'
730+
);
731+
INSERT INTO "test_collation"
732+
("time", "device_id", "name")
733+
VALUES
734+
(1609477200000, 41, 'val1'),
735+
(1609478100000, 41, 'val1')
736+
ON CONFLICT DO NOTHING;
737+
SELECT compress_chunk(ch) FROM show_chunks('test_collation') ch;
738+
INFO: using tuplesort to scan rows from "_hyper_11_15_chunk" for compression
739+
compress_chunk
740+
------------------------------------------
741+
_timescaledb_internal._hyper_11_15_chunk
742+
(1 row)
743+
744+
INSERT INTO "test_collation"
745+
("device_id", "time", "name")
746+
VALUES
747+
(41, 1609477200000, 'val1'),
748+
(41, 1609478100000, 'val1')
749+
ON CONFLICT DO NOTHING;
750+
INFO: Using index scan with scan keys: index 1, heap 4, memory 2.

tsl/test/sql/compression_conflicts.sql

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -487,3 +487,34 @@ DO NOTHING;
487487
-- Even a regular insert will fail due to unique constrant checks for dml decompression
488488
INSERT INTO compressed_ht VALUES ('2022-01-24 01:10:28.192199+05:30', '7', 0.876, 4.123, 'new insert row');
489489
\set ON_ERROR_STOP 1
490+
491+
RESET timescaledb.enable_dml_decompression;
492+
493+
-- gh issue #7342
494+
CREATE TABLE test_collation (
495+
time int8 NOT NULL,
496+
device_id int4 NOT NULL,
497+
name TEXT NOT NULL,
498+
CONSTRAINT test_collation_pkey PRIMARY KEY (time, device_id, name)
499+
);
500+
SELECT create_hypertable('test_collation', 'time', chunk_time_interval => 2419200000);
501+
ALTER TABLE test_collation
502+
SET (
503+
timescaledb.compress,
504+
timescaledb.compress_segmentby = 'device_id',
505+
timescaledb.compress_orderby = 'time DESC, name'
506+
);
507+
INSERT INTO "test_collation"
508+
("time", "device_id", "name")
509+
VALUES
510+
(1609477200000, 41, 'val1'),
511+
(1609478100000, 41, 'val1')
512+
ON CONFLICT DO NOTHING;
513+
SELECT compress_chunk(ch) FROM show_chunks('test_collation') ch;
514+
INSERT INTO "test_collation"
515+
("device_id", "time", "name")
516+
VALUES
517+
(41, 1609477200000, 'val1'),
518+
(41, 1609478100000, 'val1')
519+
ON CONFLICT DO NOTHING;
520+

0 commit comments

Comments
 (0)