Skip to content

Commit 0b52822

Browse files
committed
Add support for ALTER COLUMN SET NOT NULL
Since `SET NOT NULL` will only do a full table scan of the table to verify that there are no nulls, this should work without having to modify any data in the table. However, it does not work for chunks using the `heap` table access method if they are compressed since it uses a normal table scan, so adding a check for this.
1 parent 4f1e2d8 commit 0b52822

File tree

7 files changed

+299
-70
lines changed

7 files changed

+299
-70
lines changed

src/process_utility.c

Lines changed: 49 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -281,6 +281,7 @@ check_alter_table_allowed_on_ht_with_compression(Hypertable *ht, AlterTableStmt
281281
case AT_ReAddStatistics:
282282
case AT_SetCompression:
283283
case AT_DropNotNull:
284+
case AT_SetNotNull:
284285
#if PG15_GE
285286
case AT_SetAccessMethod:
286287
#endif
@@ -2563,11 +2564,50 @@ process_altertable_validate_constraint_end(Hypertable *ht, AlterTableCmd *cmd)
25632564
foreach_chunk(ht, validate_hypertable_constraint, cmd);
25642565
}
25652566

2567+
/*
2568+
* Validate that SET NOT NULL is ok for this chunk.
2569+
*
2570+
* Throws an error if SET NOT NULL on this chunk is not allowed, right now
2571+
* this means that this is either a fully decompressed chunk, or a chunk that
2572+
* is using the Hypercore table access method.
2573+
*/
2574+
static void
2575+
validate_set_not_null(Hypertable *ht, Oid chunk_relid, void *arg)
2576+
{
2577+
Chunk *chunk = ts_chunk_get_by_relid(chunk_relid, true);
2578+
if (ts_chunk_is_compressed(chunk) && !ts_is_hypercore_am(chunk->amoid))
2579+
{
2580+
ereport(ERROR,
2581+
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2582+
errmsg("operation not supported on compressed chunks not using the "
2583+
"\"hypercore\" table access method"),
2584+
errdetail("Chunk %s.%s is using the heap table access method and has compressed "
2585+
"data.",
2586+
NameStr(chunk->fd.schema_name),
2587+
NameStr(chunk->fd.table_name)),
2588+
errhint("Either decompress all chunks of the hypertable or use \"ALTER TABLE "
2589+
"%s.%s SET ACCESS METHOD hypercore\" on all chunks to change access "
2590+
"method.",
2591+
NameStr(chunk->fd.schema_name),
2592+
NameStr(chunk->fd.table_name))));
2593+
}
2594+
}
2595+
2596+
/*
2597+
* This function checks that we are not dropping NOT NULL from bad columns and
2598+
* that all chunks support the modification.
2599+
*/
25662600
static void
2567-
process_altertable_drop_not_null(Hypertable *ht, AlterTableCmd *cmd)
2601+
process_altertable_alter_not_null_start(Hypertable *ht, AlterTableCmd *cmd)
25682602
{
25692603
int i;
25702604

2605+
if (cmd->subtype == AT_SetNotNull)
2606+
foreach_chunk(ht, validate_set_not_null, cmd);
2607+
2608+
if (cmd->subtype != AT_DropNotNull)
2609+
return;
2610+
25712611
for (i = 0; i < ht->space->num_dimensions; i++)
25722612
{
25732613
Dimension *dim = &ht->space->dimensions[i];
@@ -3803,9 +3843,10 @@ process_altertable_start_table(ProcessUtilityArgs *args)
38033843
verify_constraint_hypertable(ht, cmd->def);
38043844
}
38053845
break;
3846+
case AT_SetNotNull:
38063847
case AT_DropNotNull:
38073848
if (ht != NULL)
3808-
process_altertable_drop_not_null(ht, cmd);
3849+
process_altertable_alter_not_null_start(ht, cmd);
38093850
break;
38103851
case AT_AddColumn:
38113852
#if PG16_LT
@@ -4187,6 +4228,8 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
41874228
case AT_DropCluster:
41884229
foreach_chunk(ht, process_altertable_chunk, cmd);
41894230
break;
4231+
case AT_SetNotNull:
4232+
case AT_DropNotNull:
41904233
case AT_SetRelOptions:
41914234
case AT_ResetRelOptions:
41924235
case AT_ReplaceRelOptions:
@@ -4213,8 +4256,6 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
42134256
case AT_SetStorage:
42144257
case AT_ColumnDefault:
42154258
case AT_CookedColumnDefault:
4216-
case AT_SetNotNull:
4217-
case AT_DropNotNull:
42184259
case AT_AddOf:
42194260
case AT_DropOf:
42204261
case AT_AddIdentity:
@@ -4494,8 +4535,8 @@ process_reassign_owned_start(ProcessUtilityArgs *args)
44944535
Oid newrole_oid = get_rolespec_oid(stmt->newrole, false);
44954536
HeapTuple tuple = ts_scanner_fetch_heap_tuple(ti, false, &should_free);
44964537

4497-
/* We do not need to check privileges here since ReassignOwnedObjects() will check the
4498-
* privileges and error out if they are not correct. */
4538+
/* We do not need to check privileges here since ReassignOwnedObjects() will check
4539+
* the privileges and error out if they are not correct. */
44994540
ts_bgw_job_update_owner(ti->scanrel, tuple, ts_scanner_get_tupledesc(ti), newrole_oid);
45004541

45014542
if (should_free)
@@ -4631,7 +4672,8 @@ process_create_stmt(ProcessUtilityArgs *args)
46314672
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
46324673
errmsg("hypercore access method not supported on \"%s\"", stmt->relation->relname),
46334674
errdetail("The hypercore access method is only supported for hypertables."),
4634-
errhint("It does not make sense to set the default access method for all tables "
4675+
errhint("It does not make sense to set the default access method for all "
4676+
"tables "
46354677
"to \"%s\" since it is only supported for hypertables.",
46364678
TS_HYPERCORE_TAM_NAME));
46374679

tsl/src/hypercore/utils.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <catalog/pg_class.h>
1313
#include <commands/defrem.h>
1414
#include <nodes/makefuncs.h>
15+
#include <postgres_ext.h>
1516
#include <storage/lmgr.h>
1617
#include <storage/lockdefs.h>
1718
#include <utils/builtins.h>

tsl/test/expected/compression_ddl.out

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2591,4 +2591,28 @@ SELECT count(*) FROM test2 WHERE i IS NULL;
25912591
1
25922592
(1 row)
25932593

2594-
SET client_min_messages = NOTICE;
2594+
SELECT count(compress_chunk(ch)) FROM show_chunks('test2') ch;
2595+
count
2596+
-------
2597+
28
2598+
(1 row)
2599+
2600+
SELECT count(*) FROM test2 WHERE i IS NULL;
2601+
count
2602+
-------
2603+
1
2604+
(1 row)
2605+
2606+
\set ON_ERROR_STOP 0
2607+
ALTER TABLE test2 ALTER COLUMN i SET NOT NULL;
2608+
ERROR: operation not supported on compressed chunks not using the "hypercore" table access method
2609+
DELETE FROM test2 WHERE i IS NULL;
2610+
SELECT count(*) FROM test2 WHERE i IS NULL;
2611+
count
2612+
-------
2613+
0
2614+
(1 row)
2615+
2616+
ALTER TABLE test2 ALTER COLUMN i SET NOT NULL;
2617+
ERROR: operation not supported on compressed chunks not using the "hypercore" table access method
2618+
\set ON_ERROR_STOP 1

tsl/test/expected/compression_errors-16.out

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ DETAIL: Cannot drop column that is a hypertable partitioning (space or time) di
239239
ALTER TABLE foo DROP COLUMN b;
240240
ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled
241241
ALTER TABLE foo ALTER COLUMN t SET NOT NULL;
242-
ERROR: operation not supported on hypertables that have compression enabled
242+
ERROR: column "t" of relation "_hyper_10_2_chunk" contains null values
243243
ALTER TABLE foo RESET (timescaledb.compress);
244244
ERROR: compression options cannot be reset
245245
--can add constraints as long as no data is compressed

tsl/test/expected/hypercore_ddl.out

Lines changed: 156 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,6 @@ select compress_chunk(show_chunks('readings'), hypercore_use_access_method => tr
3939
_timescaledb_internal._hyper_1_4_chunk
4040
(4 rows)
4141

42-
-- Insert some extra data to get some non-compressed data as well.
43-
insert into readings (time, location, device, temp, humidity, jdata)
44-
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
45-
from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
4642
select chunk, amname from chunk_info where hypertable = 'readings'::regclass;
4743
chunk | amname
4844
----------------------------------------+-----------
@@ -52,10 +48,163 @@ select chunk, amname from chunk_info where hypertable = 'readings'::regclass;
5248
_timescaledb_internal._hyper_1_4_chunk | hypercore
5349
(4 rows)
5450

55-
-- Pick a chunk to truncate that is not the first chunk. This is
51+
-- Pick a chunk to play with that is not the first chunk. This is
5652
-- mostly a precaution to make sure that there is no bias towards the
5753
-- first chunk and we could just as well pick the first chunk.
5854
select chunk from show_chunks('readings') x(chunk) limit 1 offset 3 \gset
55+
----------------------------------------------------------------
56+
-- Test ALTER TABLE .... ALTER COLUMN commands
57+
-- This should fail since "location" is NOT NULL
58+
\set ON_ERROR_STOP 0
59+
insert into readings(time,device,temp,humidity,jdata)
60+
values ('2024-01-01 00:00:10', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
61+
ERROR: null value in column "location" of relation "_hyper_1_9_chunk" violates not-null constraint
62+
\set ON_ERROR_STOP 1
63+
-- Test altering column definitions to drop NOT NULL and check that it
64+
-- propagates to the chunks. We just pick one chunk here and check
65+
-- that the setting propagates.
66+
alter table readings alter column location drop not null;
67+
\d readings
68+
Table "public.readings"
69+
Column | Type | Collation | Nullable | Default
70+
----------+--------------------------+-----------+----------+---------
71+
time | timestamp with time zone | | not null |
72+
location | integer | | |
73+
device | integer | | not null |
74+
temp | numeric(4,1) | | |
75+
humidity | double precision | | |
76+
jdata | jsonb | | |
77+
Indexes:
78+
"readings_time_key" UNIQUE CONSTRAINT, btree ("time")
79+
Triggers:
80+
ts_insert_blocker BEFORE INSERT ON readings FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
81+
Number of child tables: 4 (Use \d+ to list them.)
82+
83+
\d :chunk
84+
Table "_timescaledb_internal._hyper_1_4_chunk"
85+
Column | Type | Collation | Nullable | Default
86+
----------+--------------------------+-----------+----------+---------
87+
time | timestamp with time zone | | not null |
88+
location | integer | | |
89+
device | integer | | not null |
90+
temp | numeric(4,1) | | |
91+
humidity | double precision | | |
92+
jdata | jsonb | | |
93+
Indexes:
94+
"4_4_readings_time_key" UNIQUE CONSTRAINT, btree ("time")
95+
Check constraints:
96+
"constraint_4" CHECK ("time" >= 'Fri Jun 03 17:00:00 2022 PDT'::timestamp with time zone AND "time" < 'Sat Jun 04 17:00:00 2022 PDT'::timestamp with time zone)
97+
Inherits: readings
98+
99+
-- This should now work since we allow NULL values
100+
insert into readings(time,device,temp,humidity,jdata)
101+
values ('2024-01-01 00:00:10', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
102+
select count(*) from readings where location is null;
103+
count
104+
-------
105+
1
106+
(1 row)
107+
108+
select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
109+
NOTICE: chunk "_hyper_1_1_chunk" is already compressed
110+
NOTICE: chunk "_hyper_1_2_chunk" is already compressed
111+
NOTICE: chunk "_hyper_1_3_chunk" is already compressed
112+
NOTICE: chunk "_hyper_1_4_chunk" is already compressed
113+
compress_chunk
114+
-----------------------------------------
115+
_timescaledb_internal._hyper_1_1_chunk
116+
_timescaledb_internal._hyper_1_2_chunk
117+
_timescaledb_internal._hyper_1_3_chunk
118+
_timescaledb_internal._hyper_1_4_chunk
119+
_timescaledb_internal._hyper_1_10_chunk
120+
(5 rows)
121+
122+
select count(*) from readings where location is null;
123+
count
124+
-------
125+
1
126+
(1 row)
127+
128+
-- We insert another row with nulls, that will end up in the
129+
-- non-compressed region.
130+
insert into readings(time,device,temp,humidity,jdata)
131+
values ('2024-01-02 00:00:10', 1, 66.0, 66.0, '{"magic": "more"}'::jsonb);
132+
-- We should not be able to set the not null before we have removed
133+
-- the null rows in the table. This works for hypercore-compressed
134+
-- chunks but not for heap-compressed chunks.
135+
\set ON_ERROR_STOP 0
136+
alter table readings alter column location set not null;
137+
ERROR: column "location" of relation "_hyper_1_10_chunk" contains null values
138+
\set ON_ERROR_STOP 1
139+
delete from readings where location is null;
140+
-- Compress the data to make sure that we are not working on
141+
-- non-compressed data.
142+
select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
143+
compress_chunk
144+
-----------------------------------------
145+
_timescaledb_internal._hyper_1_1_chunk
146+
_timescaledb_internal._hyper_1_2_chunk
147+
_timescaledb_internal._hyper_1_3_chunk
148+
_timescaledb_internal._hyper_1_4_chunk
149+
_timescaledb_internal._hyper_1_10_chunk
150+
_timescaledb_internal._hyper_1_12_chunk
151+
(6 rows)
152+
153+
select count(*) from readings where location is null;
154+
count
155+
-------
156+
0
157+
(1 row)
158+
159+
alter table readings alter column location set not null;
160+
\d readings
161+
Table "public.readings"
162+
Column | Type | Collation | Nullable | Default
163+
----------+--------------------------+-----------+----------+---------
164+
time | timestamp with time zone | | not null |
165+
location | integer | | not null |
166+
device | integer | | not null |
167+
temp | numeric(4,1) | | |
168+
humidity | double precision | | |
169+
jdata | jsonb | | |
170+
Indexes:
171+
"readings_time_key" UNIQUE CONSTRAINT, btree ("time")
172+
Triggers:
173+
ts_insert_blocker BEFORE INSERT ON readings FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
174+
Number of child tables: 6 (Use \d+ to list them.)
175+
176+
\d :chunk
177+
Table "_timescaledb_internal._hyper_1_4_chunk"
178+
Column | Type | Collation | Nullable | Default
179+
----------+--------------------------+-----------+----------+---------
180+
time | timestamp with time zone | | not null |
181+
location | integer | | not null |
182+
device | integer | | not null |
183+
temp | numeric(4,1) | | |
184+
humidity | double precision | | |
185+
jdata | jsonb | | |
186+
Indexes:
187+
"4_4_readings_time_key" UNIQUE CONSTRAINT, btree ("time")
188+
Check constraints:
189+
"constraint_4" CHECK ("time" >= 'Fri Jun 03 17:00:00 2022 PDT'::timestamp with time zone AND "time" < 'Sat Jun 04 17:00:00 2022 PDT'::timestamp with time zone)
190+
Inherits: readings
191+
192+
select count(*) from readings where location is null;
193+
count
194+
-------
195+
0
196+
(1 row)
197+
198+
----------------------------------------------------------------
199+
-- TRUNCATE test
200+
-- We keep the truncate test last in the file to avoid having to
201+
-- re-populate it.
202+
-- Insert some extra data to get some non-compressed data as
203+
-- well. This checks that truncate will deal with with write-store
204+
-- (WS) and read-store (RS)
205+
insert into readings (time, location, device, temp, humidity, jdata)
206+
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
207+
from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
59208
-- Check that the number of bytes in the table before and after the
60209
-- truncate.
61210
--
@@ -68,7 +217,7 @@ select pg_table_size(chunk) as chunk_size,
68217
where chunk = :'chunk'::regclass;
69218
chunk_size | compressed_chunk_size
70219
------------+-----------------------
71-
40960 | 57344
220+
49152 | 73728
72221
(1 row)
73222

74223
truncate :chunk;
@@ -88,7 +237,7 @@ select (select count(*) from readings) tuples,
88237
(select count(*) from show_chunks('readings')) chunks;
89238
tuples | chunks
90239
--------+--------
91-
1560 | 4
240+
1560 | 6
92241
(1 row)
93242

94243
truncate readings;
@@ -99,32 +248,3 @@ select (select count(*) from readings) tuples,
99248
0 | 0
100249
(1 row)
101250

102-
\set ON_ERROR_STOP 0
103-
insert into readings(time,device,temp,humidity,jdata)
104-
values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
105-
ERROR: null value in column "location" of relation "_hyper_1_9_chunk" violates not-null constraint
106-
\set ON_ERROR_STOP 1
107-
-- Test altering column definitions
108-
alter table readings
109-
alter column location drop not null;
110-
-- This should now work.
111-
insert into readings(time,device,temp,humidity,jdata)
112-
values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
113-
select count(*) from readings where location is null;
114-
count
115-
-------
116-
1
117-
(1 row)
118-
119-
select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
120-
compress_chunk
121-
-----------------------------------------
122-
_timescaledb_internal._hyper_1_10_chunk
123-
(1 row)
124-
125-
select count(*) from readings where location is null;
126-
count
127-
-------
128-
1
129-
(1 row)
130-

tsl/test/sql/compression_ddl.sql

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1069,4 +1069,12 @@ SELECT count(compress_chunk(ch)) FROM show_chunks('test2') ch;
10691069
SELECT count(*) FROM test2 WHERE i IS NULL;
10701070
SELECT count(decompress_chunk(ch)) FROM show_chunks('test2') ch;
10711071
SELECT count(*) FROM test2 WHERE i IS NULL;
1072-
SET client_min_messages = NOTICE;
1072+
SELECT count(compress_chunk(ch)) FROM show_chunks('test2') ch;
1073+
SELECT count(*) FROM test2 WHERE i IS NULL;
1074+
1075+
\set ON_ERROR_STOP 0
1076+
ALTER TABLE test2 ALTER COLUMN i SET NOT NULL;
1077+
DELETE FROM test2 WHERE i IS NULL;
1078+
SELECT count(*) FROM test2 WHERE i IS NULL;
1079+
ALTER TABLE test2 ALTER COLUMN i SET NOT NULL;
1080+
\set ON_ERROR_STOP 1

0 commit comments

Comments
 (0)