@@ -39,10 +39,6 @@ select compress_chunk(show_chunks('readings'), hypercore_use_access_method => tr
39
39
_timescaledb_internal._hyper_1_4_chunk
40
40
(4 rows)
41
41
42
- -- Insert some extra data to get some non-compressed data as well.
43
- insert into readings (time, location, device, temp, humidity, jdata)
44
- select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
45
- from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
46
42
select chunk, amname from chunk_info where hypertable = 'readings'::regclass;
47
43
chunk | amname
48
44
----------------------------------------+-----------
@@ -52,10 +48,163 @@ select chunk, amname from chunk_info where hypertable = 'readings'::regclass;
52
48
_timescaledb_internal._hyper_1_4_chunk | hypercore
53
49
(4 rows)
54
50
55
- -- Pick a chunk to truncate that is not the first chunk. This is
51
+ -- Pick a chunk to play with that is not the first chunk. This is
56
52
-- mostly a precaution to make sure that there is no bias towards the
57
53
-- first chunk and we could just as well pick the first chunk.
58
54
select chunk from show_chunks('readings') x(chunk) limit 1 offset 3 \gset
55
+ ----------------------------------------------------------------
56
+ -- Test ALTER TABLE .... ALTER COLUMN commands
57
+ -- This should fail since "location" is NOT NULL
58
+ \set ON_ERROR_STOP 0
59
+ insert into readings(time,device,temp,humidity,jdata)
60
+ values ('2024-01-01 00:00:10', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
61
+ ERROR: null value in column "location" of relation "_hyper_1_9_chunk" violates not-null constraint
62
+ \set ON_ERROR_STOP 1
63
+ -- Test altering column definitions to drop NOT NULL and check that it
64
+ -- propagates to the chunks. We just pick one chunk here and check
65
+ -- that the setting propagates.
66
+ alter table readings alter column location drop not null;
67
+ \d readings
68
+ Table "public.readings"
69
+ Column | Type | Collation | Nullable | Default
70
+ ----------+--------------------------+-----------+----------+---------
71
+ time | timestamp with time zone | | not null |
72
+ location | integer | | |
73
+ device | integer | | not null |
74
+ temp | numeric(4,1) | | |
75
+ humidity | double precision | | |
76
+ jdata | jsonb | | |
77
+ Indexes:
78
+ "readings_time_key" UNIQUE CONSTRAINT, btree ("time")
79
+ Triggers:
80
+ ts_insert_blocker BEFORE INSERT ON readings FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
81
+ Number of child tables: 4 (Use \d+ to list them.)
82
+
83
+ \d :chunk
84
+ Table "_timescaledb_internal._hyper_1_4_chunk"
85
+ Column | Type | Collation | Nullable | Default
86
+ ----------+--------------------------+-----------+----------+---------
87
+ time | timestamp with time zone | | not null |
88
+ location | integer | | |
89
+ device | integer | | not null |
90
+ temp | numeric(4,1) | | |
91
+ humidity | double precision | | |
92
+ jdata | jsonb | | |
93
+ Indexes:
94
+ "4_4_readings_time_key" UNIQUE CONSTRAINT, btree ("time")
95
+ Check constraints:
96
+ "constraint_4" CHECK ("time" >= 'Fri Jun 03 17:00:00 2022 PDT'::timestamp with time zone AND "time" < 'Sat Jun 04 17:00:00 2022 PDT'::timestamp with time zone)
97
+ Inherits: readings
98
+
99
+ -- This should now work since we allow NULL values
100
+ insert into readings(time,device,temp,humidity,jdata)
101
+ values ('2024-01-01 00:00:10', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
102
+ select count(*) from readings where location is null;
103
+ count
104
+ -------
105
+ 1
106
+ (1 row)
107
+
108
+ select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
109
+ NOTICE: chunk "_hyper_1_1_chunk" is already compressed
110
+ NOTICE: chunk "_hyper_1_2_chunk" is already compressed
111
+ NOTICE: chunk "_hyper_1_3_chunk" is already compressed
112
+ NOTICE: chunk "_hyper_1_4_chunk" is already compressed
113
+ compress_chunk
114
+ -----------------------------------------
115
+ _timescaledb_internal._hyper_1_1_chunk
116
+ _timescaledb_internal._hyper_1_2_chunk
117
+ _timescaledb_internal._hyper_1_3_chunk
118
+ _timescaledb_internal._hyper_1_4_chunk
119
+ _timescaledb_internal._hyper_1_10_chunk
120
+ (5 rows)
121
+
122
+ select count(*) from readings where location is null;
123
+ count
124
+ -------
125
+ 1
126
+ (1 row)
127
+
128
+ -- We insert another row with nulls, that will end up in the
129
+ -- non-compressed region.
130
+ insert into readings(time,device,temp,humidity,jdata)
131
+ values ('2024-01-02 00:00:10', 1, 66.0, 66.0, '{"magic": "more"}'::jsonb);
132
+ -- We should not be able to set the not null before we have removed
133
+ -- the null rows in the table. This works for hypercore-compressed
134
+ -- chunks but not for heap-compressed chunks.
135
+ \set ON_ERROR_STOP 0
136
+ alter table readings alter column location set not null;
137
+ ERROR: column "location" of relation "_hyper_1_10_chunk" contains null values
138
+ \set ON_ERROR_STOP 1
139
+ delete from readings where location is null;
140
+ -- Compress the data to make sure that we are not working on
141
+ -- non-compressed data.
142
+ select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
143
+ compress_chunk
144
+ -----------------------------------------
145
+ _timescaledb_internal._hyper_1_1_chunk
146
+ _timescaledb_internal._hyper_1_2_chunk
147
+ _timescaledb_internal._hyper_1_3_chunk
148
+ _timescaledb_internal._hyper_1_4_chunk
149
+ _timescaledb_internal._hyper_1_10_chunk
150
+ _timescaledb_internal._hyper_1_12_chunk
151
+ (6 rows)
152
+
153
+ select count(*) from readings where location is null;
154
+ count
155
+ -------
156
+ 0
157
+ (1 row)
158
+
159
+ alter table readings alter column location set not null;
160
+ \d readings
161
+ Table "public.readings"
162
+ Column | Type | Collation | Nullable | Default
163
+ ----------+--------------------------+-----------+----------+---------
164
+ time | timestamp with time zone | | not null |
165
+ location | integer | | not null |
166
+ device | integer | | not null |
167
+ temp | numeric(4,1) | | |
168
+ humidity | double precision | | |
169
+ jdata | jsonb | | |
170
+ Indexes:
171
+ "readings_time_key" UNIQUE CONSTRAINT, btree ("time")
172
+ Triggers:
173
+ ts_insert_blocker BEFORE INSERT ON readings FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
174
+ Number of child tables: 6 (Use \d+ to list them.)
175
+
176
+ \d :chunk
177
+ Table "_timescaledb_internal._hyper_1_4_chunk"
178
+ Column | Type | Collation | Nullable | Default
179
+ ----------+--------------------------+-----------+----------+---------
180
+ time | timestamp with time zone | | not null |
181
+ location | integer | | not null |
182
+ device | integer | | not null |
183
+ temp | numeric(4,1) | | |
184
+ humidity | double precision | | |
185
+ jdata | jsonb | | |
186
+ Indexes:
187
+ "4_4_readings_time_key" UNIQUE CONSTRAINT, btree ("time")
188
+ Check constraints:
189
+ "constraint_4" CHECK ("time" >= 'Fri Jun 03 17:00:00 2022 PDT'::timestamp with time zone AND "time" < 'Sat Jun 04 17:00:00 2022 PDT'::timestamp with time zone)
190
+ Inherits: readings
191
+
192
+ select count(*) from readings where location is null;
193
+ count
194
+ -------
195
+ 0
196
+ (1 row)
197
+
198
+ ----------------------------------------------------------------
199
+ -- TRUNCATE test
200
+ -- We keep the truncate test last in the file to avoid having to
201
+ -- re-populate it.
202
+ -- Insert some extra data to get some non-compressed data as
203
+ -- well. This checks that truncate will deal with with write-store
204
+ -- (WS) and read-store (RS)
205
+ insert into readings (time, location, device, temp, humidity, jdata)
206
+ select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
207
+ from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
59
208
-- Check that the number of bytes in the table before and after the
60
209
-- truncate.
61
210
--
@@ -68,7 +217,7 @@ select pg_table_size(chunk) as chunk_size,
68
217
where chunk = :'chunk'::regclass;
69
218
chunk_size | compressed_chunk_size
70
219
------------+-----------------------
71
- 40960 | 57344
220
+ 49152 | 73728
72
221
(1 row)
73
222
74
223
truncate :chunk;
@@ -88,7 +237,7 @@ select (select count(*) from readings) tuples,
88
237
(select count(*) from show_chunks('readings')) chunks;
89
238
tuples | chunks
90
239
--------+--------
91
- 1560 | 4
240
+ 1560 | 6
92
241
(1 row)
93
242
94
243
truncate readings;
@@ -99,32 +248,3 @@ select (select count(*) from readings) tuples,
99
248
0 | 0
100
249
(1 row)
101
250
102
- \set ON_ERROR_STOP 0
103
- insert into readings(time,device,temp,humidity,jdata)
104
- values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
105
- ERROR: null value in column "location" of relation "_hyper_1_9_chunk" violates not-null constraint
106
- \set ON_ERROR_STOP 1
107
- -- Test altering column definitions
108
- alter table readings
109
- alter column location drop not null;
110
- -- This should now work.
111
- insert into readings(time,device,temp,humidity,jdata)
112
- values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
113
- select count(*) from readings where location is null;
114
- count
115
- -------
116
- 1
117
- (1 row)
118
-
119
- select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
120
- compress_chunk
121
- -----------------------------------------
122
- _timescaledb_internal._hyper_1_10_chunk
123
- (1 row)
124
-
125
- select count(*) from readings where location is null;
126
- count
127
- -------
128
- 1
129
- (1 row)
130
-
0 commit comments