Skip to content

Commit 8eb4f4e

Browse files
committed
Add support for ALTER COLUMN SET NOT NULL
Since `SET NOT NULL` will only do a full table scan of the table to verify that there are no nulls, this should work without having to modify any data in the table. However, it does not work for chunks using the `heap` table access method if they are compressed since it uses a normal table scan, so adding a check for this.
1 parent 601c367 commit 8eb4f4e

File tree

8 files changed

+273
-84
lines changed

8 files changed

+273
-84
lines changed

.unreleased/pr_7463

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Implements: #7463: Support SET NOT NULL on compressed hypertables

src/process_utility.c

Lines changed: 51 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -258,7 +258,8 @@ check_alter_table_allowed_on_ht_with_compression(Hypertable *ht, AlterTableStmt
258258
/*
259259
* ALLOWED:
260260
*
261-
* This is a whitelist of allowed commands.
261+
* This is a whitelist of allowed commands. Note that it is still possible that the
262+
* operation is aborted inside tsl_ddl_command_start().
262263
*/
263264
case AT_AddIndex:
264265
case AT_AddConstraint:
@@ -281,6 +282,7 @@ check_alter_table_allowed_on_ht_with_compression(Hypertable *ht, AlterTableStmt
281282
case AT_ReAddStatistics:
282283
case AT_SetCompression:
283284
case AT_DropNotNull:
285+
case AT_SetNotNull:
284286
#if PG15_GE
285287
case AT_SetAccessMethod:
286288
#endif
@@ -2564,19 +2566,50 @@ process_altertable_validate_constraint_end(Hypertable *ht, AlterTableCmd *cmd)
25642566
}
25652567

25662568
static void
2567-
process_altertable_drop_not_null(Hypertable *ht, AlterTableCmd *cmd)
2569+
validate_uses_hypercore(Hypertable *ht, Oid chunk_relid, void *arg)
25682570
{
2569-
int i;
2571+
AlterTableCmd *cmd = (AlterTableCmd *) arg;
2572+
Chunk *chunk = ts_chunk_get_by_relid(chunk_relid, true);
2573+
if (cmd->subtype == AT_SetNotNull)
2574+
{
2575+
if (ts_chunk_is_compressed(chunk) && !ts_is_hypercore_am(chunk->amoid))
2576+
ereport(ERROR,
2577+
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2578+
errmsg("operation not supported on compressed hypertables not using table "
2579+
"access method"),
2580+
errdetail("Operation is not supported on compressed hypertables or chunks "
2581+
"that are not using the table access method. "),
2582+
errhint("Use ALTER TABLE %s.%s SET ACCESS METHOD hypercore to change access "
2583+
"method.",
2584+
NameStr(chunk->fd.schema_name),
2585+
NameStr(chunk->fd.table_name))));
2586+
}
2587+
}
25702588

2571-
for (i = 0; i < ht->space->num_dimensions; i++)
2589+
/*
2590+
* This function checks that we are not dropping NOT NULL from bad columns and
2591+
* that all chunks support the modification.
2592+
*/
2593+
static void
2594+
process_altertable_alter_not_null_start(Hypertable *ht, AlterTableCmd *cmd)
2595+
{
2596+
if (cmd->subtype == AT_SetNotNull)
25722597
{
2573-
Dimension *dim = &ht->space->dimensions[i];
2598+
foreach_chunk(ht, validate_uses_hypercore, cmd);
2599+
}
25742600

2575-
if (IS_OPEN_DIMENSION(dim) &&
2576-
strncmp(NameStr(dim->fd.column_name), cmd->name, NAMEDATALEN) == 0)
2577-
ereport(ERROR,
2578-
(errcode(ERRCODE_TS_OPERATION_NOT_SUPPORTED),
2579-
errmsg("cannot drop not-null constraint from a time-partitioned column")));
2601+
if (cmd->subtype == AT_DropNotNull)
2602+
{
2603+
for (int i = 0; i < ht->space->num_dimensions; i++)
2604+
{
2605+
Dimension *dim = &ht->space->dimensions[i];
2606+
2607+
if (IS_OPEN_DIMENSION(dim) &&
2608+
strncmp(NameStr(dim->fd.column_name), cmd->name, NAMEDATALEN) == 0)
2609+
ereport(ERROR,
2610+
(errcode(ERRCODE_TS_OPERATION_NOT_SUPPORTED),
2611+
errmsg("cannot drop not-null constraint from a time-partitioned column")));
2612+
}
25802613
}
25812614
}
25822615

@@ -3803,9 +3836,10 @@ process_altertable_start_table(ProcessUtilityArgs *args)
38033836
verify_constraint_hypertable(ht, cmd->def);
38043837
}
38053838
break;
3839+
case AT_SetNotNull:
38063840
case AT_DropNotNull:
38073841
if (ht != NULL)
3808-
process_altertable_drop_not_null(ht, cmd);
3842+
process_altertable_alter_not_null_start(ht, cmd);
38093843
break;
38103844
case AT_AddColumn:
38113845
#if PG16_LT
@@ -4185,8 +4219,8 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
41854219
process_altertable_validate_constraint_end(ht, cmd);
41864220
break;
41874221
case AT_DropCluster:
4188-
foreach_chunk(ht, process_altertable_chunk, cmd);
4189-
break;
4222+
case AT_SetNotNull:
4223+
case AT_DropNotNull:
41904224
case AT_SetRelOptions:
41914225
case AT_ResetRelOptions:
41924226
case AT_ReplaceRelOptions:
@@ -4195,9 +4229,6 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
41954229
case AT_ResetOptions:
41964230
case AT_ReAddStatistics:
41974231
case AT_SetCompression:
4198-
/* Avoid running this command for distributed hypertable chunks
4199-
* since PostgreSQL currently does not allow to alter
4200-
* storage options for a foreign table. */
42014232
foreach_chunk(ht, process_altertable_chunk, cmd);
42024233
break;
42034234
case AT_SetTableSpace:
@@ -4213,8 +4244,6 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
42134244
case AT_SetStorage:
42144245
case AT_ColumnDefault:
42154246
case AT_CookedColumnDefault:
4216-
case AT_SetNotNull:
4217-
case AT_DropNotNull:
42184247
case AT_AddOf:
42194248
case AT_DropOf:
42204249
case AT_AddIdentity:
@@ -4494,8 +4523,8 @@ process_reassign_owned_start(ProcessUtilityArgs *args)
44944523
Oid newrole_oid = get_rolespec_oid(stmt->newrole, false);
44954524
HeapTuple tuple = ts_scanner_fetch_heap_tuple(ti, false, &should_free);
44964525

4497-
/* We do not need to check privileges here since ReassignOwnedObjects() will check the
4498-
* privileges and error out if they are not correct. */
4526+
/* We do not need to check privileges here since ReassignOwnedObjects() will check
4527+
* the privileges and error out if they are not correct. */
44994528
ts_bgw_job_update_owner(ti->scanrel, tuple, ts_scanner_get_tupledesc(ti), newrole_oid);
45004529

45014530
if (should_free)
@@ -4631,7 +4660,8 @@ process_create_stmt(ProcessUtilityArgs *args)
46314660
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
46324661
errmsg("hypercore access method not supported on \"%s\"", stmt->relation->relname),
46334662
errdetail("The hypercore access method is only supported for hypertables."),
4634-
errhint("It does not make sense to set the default access method for all tables "
4663+
errhint("It does not make sense to set the default access method for all "
4664+
"tables "
46354665
"to \"%s\" since it is only supported for hypertables.",
46364666
TS_HYPERCORE_TAM_NAME));
46374667

tsl/src/hypercore/utils.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <catalog/pg_class.h>
1313
#include <commands/defrem.h>
1414
#include <nodes/makefuncs.h>
15+
#include <postgres_ext.h>
1516
#include <storage/lmgr.h>
1617
#include <storage/lockdefs.h>
1718
#include <utils/builtins.h>

tsl/test/expected/compression_ddl.out

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2591,4 +2591,28 @@ SELECT count(*) FROM test2 WHERE i IS NULL;
25912591
1
25922592
(1 row)
25932593

2594-
SET client_min_messages = NOTICE;
2594+
SELECT count(compress_chunk(ch)) FROM show_chunks('test2') ch;
2595+
count
2596+
-------
2597+
28
2598+
(1 row)
2599+
2600+
SELECT count(*) FROM test2 WHERE i IS NULL;
2601+
count
2602+
-------
2603+
1
2604+
(1 row)
2605+
2606+
\set ON_ERROR_STOP 0
2607+
ALTER TABLE test2 ALTER COLUMN i SET NOT NULL;
2608+
ERROR: operation not supported on compressed hypertables not using table access method
2609+
DELETE FROM test2 WHERE i IS NULL;
2610+
SELECT count(*) FROM test2 WHERE i IS NULL;
2611+
count
2612+
-------
2613+
0
2614+
(1 row)
2615+
2616+
ALTER TABLE test2 ALTER COLUMN i SET NOT NULL;
2617+
ERROR: operation not supported on compressed hypertables not using table access method
2618+
\set ON_ERROR_STOP 1

tsl/test/expected/compression_errors-16.out

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ DETAIL: Cannot drop column that is a hypertable partitioning (space or time) di
239239
ALTER TABLE foo DROP COLUMN b;
240240
ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled
241241
ALTER TABLE foo ALTER COLUMN t SET NOT NULL;
242-
ERROR: operation not supported on hypertables that have compression enabled
242+
ERROR: column "t" of relation "_hyper_10_2_chunk" contains null values
243243
ALTER TABLE foo RESET (timescaledb.compress);
244244
ERROR: compression options cannot be reset
245245
--can add constraints as long as no data is compressed

tsl/test/expected/hypercore_ddl.out

Lines changed: 137 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,6 @@ select compress_chunk(show_chunks('readings'), hypercore_use_access_method => tr
3939
_timescaledb_internal._hyper_1_4_chunk
4040
(4 rows)
4141

42-
-- Insert some extra data to get some non-compressed data as well.
43-
insert into readings (time, location, device, temp, humidity, jdata)
44-
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
45-
from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
4642
select chunk, amname from chunk_info where hypertable = 'readings'::regclass;
4743
chunk | amname
4844
----------------------------------------+-----------
@@ -52,10 +48,144 @@ select chunk, amname from chunk_info where hypertable = 'readings'::regclass;
5248
_timescaledb_internal._hyper_1_4_chunk | hypercore
5349
(4 rows)
5450

55-
-- Pick a chunk to truncate that is not the first chunk. This is
51+
----------------------------------------------------------------
52+
-- Test ALTER TABLE .... ALTER COLUMN commands
53+
-- This should fail since "location" is NOT NULL
54+
\set ON_ERROR_STOP 0
55+
insert into readings(time,device,temp,humidity,jdata)
56+
values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
57+
ERROR: null value in column "location" of relation "_hyper_1_9_chunk" violates not-null constraint
58+
\set ON_ERROR_STOP 1
59+
-- Test altering column definitions to drop NOT NULL
60+
alter table readings alter column location drop not null;
61+
-- This should now work since we allow NULL values
62+
insert into readings(time,device,temp,humidity,jdata)
63+
values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
64+
select count(*) from readings where location is null;
65+
count
66+
-------
67+
1
68+
(1 row)
69+
70+
select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
71+
NOTICE: chunk "_hyper_1_1_chunk" is already compressed
72+
NOTICE: chunk "_hyper_1_2_chunk" is already compressed
73+
NOTICE: chunk "_hyper_1_3_chunk" is already compressed
74+
NOTICE: chunk "_hyper_1_4_chunk" is already compressed
75+
compress_chunk
76+
-----------------------------------------
77+
_timescaledb_internal._hyper_1_1_chunk
78+
_timescaledb_internal._hyper_1_2_chunk
79+
_timescaledb_internal._hyper_1_3_chunk
80+
_timescaledb_internal._hyper_1_4_chunk
81+
_timescaledb_internal._hyper_1_10_chunk
82+
(5 rows)
83+
84+
select count(*) from readings where location is null;
85+
count
86+
-------
87+
1
88+
(1 row)
89+
90+
-- Pick a chunk to play with that is not the first chunk. This is
5691
-- mostly a precaution to make sure that there is no bias towards the
5792
-- first chunk and we could just as well pick the first chunk.
5893
select chunk from show_chunks('readings') x(chunk) limit 1 offset 3 \gset
94+
-- We should not be able to set the not null before we have removed
95+
-- the null rows in the table. This works for hypercore-compressed
96+
-- chunks but not for heap-compressed chunks.
97+
\set ON_ERROR_STOP 0
98+
alter table readings alter column location set not null;
99+
ERROR: column "location" of relation "_hyper_1_10_chunk" contains null values
100+
\set ON_ERROR_STOP 1
101+
delete from readings where location is null;
102+
select count(*) from readings where location is null;
103+
count
104+
-------
105+
0
106+
(1 row)
107+
108+
\d readings
109+
Table "public.readings"
110+
Column | Type | Collation | Nullable | Default
111+
----------+--------------------------+-----------+----------+---------
112+
time | timestamp with time zone | | not null |
113+
location | integer | | |
114+
device | integer | | not null |
115+
temp | numeric(4,1) | | |
116+
humidity | double precision | | |
117+
jdata | jsonb | | |
118+
Indexes:
119+
"readings_time_key" UNIQUE CONSTRAINT, btree ("time")
120+
Triggers:
121+
ts_insert_blocker BEFORE INSERT ON readings FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
122+
Number of child tables: 5 (Use \d+ to list them.)
123+
124+
\d :chunk
125+
Table "_timescaledb_internal._hyper_1_4_chunk"
126+
Column | Type | Collation | Nullable | Default
127+
----------+--------------------------+-----------+----------+---------
128+
time | timestamp with time zone | | not null |
129+
location | integer | | |
130+
device | integer | | not null |
131+
temp | numeric(4,1) | | |
132+
humidity | double precision | | |
133+
jdata | jsonb | | |
134+
Indexes:
135+
"4_4_readings_time_key" UNIQUE CONSTRAINT, btree ("time")
136+
Check constraints:
137+
"constraint_4" CHECK ("time" >= 'Fri Jun 03 17:00:00 2022 PDT'::timestamp with time zone AND "time" < 'Sat Jun 04 17:00:00 2022 PDT'::timestamp with time zone)
138+
Inherits: readings
139+
140+
alter table readings alter column location set not null;
141+
\d readings
142+
Table "public.readings"
143+
Column | Type | Collation | Nullable | Default
144+
----------+--------------------------+-----------+----------+---------
145+
time | timestamp with time zone | | not null |
146+
location | integer | | not null |
147+
device | integer | | not null |
148+
temp | numeric(4,1) | | |
149+
humidity | double precision | | |
150+
jdata | jsonb | | |
151+
Indexes:
152+
"readings_time_key" UNIQUE CONSTRAINT, btree ("time")
153+
Triggers:
154+
ts_insert_blocker BEFORE INSERT ON readings FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
155+
Number of child tables: 5 (Use \d+ to list them.)
156+
157+
\d :chunk
158+
Table "_timescaledb_internal._hyper_1_4_chunk"
159+
Column | Type | Collation | Nullable | Default
160+
----------+--------------------------+-----------+----------+---------
161+
time | timestamp with time zone | | not null |
162+
location | integer | | not null |
163+
device | integer | | not null |
164+
temp | numeric(4,1) | | |
165+
humidity | double precision | | |
166+
jdata | jsonb | | |
167+
Indexes:
168+
"4_4_readings_time_key" UNIQUE CONSTRAINT, btree ("time")
169+
Check constraints:
170+
"constraint_4" CHECK ("time" >= 'Fri Jun 03 17:00:00 2022 PDT'::timestamp with time zone AND "time" < 'Sat Jun 04 17:00:00 2022 PDT'::timestamp with time zone)
171+
Inherits: readings
172+
173+
select count(*) from readings where location is null;
174+
count
175+
-------
176+
0
177+
(1 row)
178+
179+
----------------------------------------------------------------
180+
-- TRUNCATE test
181+
-- We keep the truncate test last in the file to avoid having to
182+
-- re-populate it.
183+
-- Insert some extra data to get some non-compressed data as
184+
-- well. This checks that truncate will deal with with write-store
185+
-- (WS) and read-store (RS)
186+
insert into readings (time, location, device, temp, humidity, jdata)
187+
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
188+
from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
59189
-- Check that the number of bytes in the table before and after the
60190
-- truncate.
61191
--
@@ -68,7 +198,7 @@ select pg_table_size(chunk) as chunk_size,
68198
where chunk = :'chunk'::regclass;
69199
chunk_size | compressed_chunk_size
70200
------------+-----------------------
71-
40960 | 57344
201+
49152 | 57344
72202
(1 row)
73203

74204
truncate :chunk;
@@ -88,7 +218,7 @@ select (select count(*) from readings) tuples,
88218
(select count(*) from show_chunks('readings')) chunks;
89219
tuples | chunks
90220
--------+--------
91-
1560 | 4
221+
1560 | 5
92222
(1 row)
93223

94224
truncate readings;
@@ -99,32 +229,3 @@ select (select count(*) from readings) tuples,
99229
0 | 0
100230
(1 row)
101231

102-
\set ON_ERROR_STOP 0
103-
insert into readings(time,device,temp,humidity,jdata)
104-
values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
105-
ERROR: null value in column "location" of relation "_hyper_1_9_chunk" violates not-null constraint
106-
\set ON_ERROR_STOP 1
107-
-- Test altering column definitions
108-
alter table readings
109-
alter column location drop not null;
110-
-- This should now work.
111-
insert into readings(time,device,temp,humidity,jdata)
112-
values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb);
113-
select count(*) from readings where location is null;
114-
count
115-
-------
116-
1
117-
(1 row)
118-
119-
select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
120-
compress_chunk
121-
-----------------------------------------
122-
_timescaledb_internal._hyper_1_10_chunk
123-
(1 row)
124-
125-
select count(*) from readings where location is null;
126-
count
127-
-------
128-
1
129-
(1 row)
130-

0 commit comments

Comments
 (0)