Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add hypercore chunk tests for triggers #7483

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
252 changes: 252 additions & 0 deletions tsl/test/expected/hypercore_trigger.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,252 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\ir include/setup_hypercore.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set hypertable readings
\ir hypercore_helpers.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- Function to run an explain analyze with and do replacements on the
-- emitted plan. This is intended to be used when the structure of the
-- plan is important, but not the specific chunks scanned nor the
-- number of heap fetches, rows, loops, etc.
create function explain_analyze_anonymize(text) returns setof text
language plpgsql as
$$
declare
ln text;
begin
for ln in
execute format('explain (analyze, costs off, summary off, timing off, decompress_cache_stats) %s', $1)
loop
if trim(both from ln) like 'Group Key:%' then
continue;
end if;
ln := regexp_replace(ln, 'Array Cache Hits: \d+', 'Array Cache Hits: N');
ln := regexp_replace(ln, 'Array Cache Misses: \d+', 'Array Cache Misses: N');
ln := regexp_replace(ln, 'Array Cache Evictions: \d+', 'Array Cache Evictions: N');
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'Workers Launched: \d+', 'Workers Launched: N');
ln := regexp_replace(ln, 'actual rows=\d+ loops=\d+', 'actual rows=N loops=N');
ln := regexp_replace(ln, '_hyper_\d+_\d+_chunk', '_hyper_I_N_chunk', 1, 0);
return next ln;
end loop;
end;
$$;
create function explain_anonymize(text) returns setof text
language plpgsql as
$$
declare
ln text;
begin
for ln in
execute format('explain (costs off, summary off, timing off) %s', $1)
loop
ln := regexp_replace(ln, 'Array Cache Hits: \d+', 'Array Cache Hits: N');
ln := regexp_replace(ln, 'Array Cache Misses: \d+', 'Array Cache Misses: N');
ln := regexp_replace(ln, 'Array Cache Evictions: \d+', 'Array Cache Evictions: N');
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'Workers Launched: \d+', 'Workers Launched: N');
ln := regexp_replace(ln, 'actual rows=\d+ loops=\d+', 'actual rows=N loops=N');
ln := regexp_replace(ln, '_hyper_\d+_\d+_chunk', '_hyper_I_N_chunk', 1, 0);
return next ln;
end loop;
end;
$$;
create table :hypertable(
metric_id serial,
created_at timestamptz not null unique,
location_id smallint, --segmentby attribute with index
owner_id bigint, --segmentby attribute without index
device_id bigint, --non-segmentby attribute
temp float8,
humidity float4
);
create index hypertable_location_id_idx on :hypertable (location_id);
create index hypertable_device_id_idx on :hypertable (device_id);
select create_hypertable(:'hypertable', by_range('created_at'));
create_hypertable
-------------------
(1,t)
(1 row)

-- Disable incremental sort to make tests stable
set enable_incremental_sort = false;
select setseed(1);
setseed
---------

(1 row)

-- Insert rows into the tables.
--
-- The timestamps for the original rows will have timestamps every 10
-- seconds. Any other timestamps are inserted as part of the test.
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
alter table :hypertable set (
timescaledb.compress,
timescaledb.compress_orderby = 'created_at',
timescaledb.compress_segmentby = 'location_id, owner_id'
);
-- Get some test chunks as global variables (first and last chunk here)
select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk1
from timescaledb_information.chunks
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = :'hypertable'::regclass
order by chunk1 asc
limit 1 \gset
select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
from timescaledb_information.chunks
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = :'hypertable'::regclass
order by chunk2 asc
limit 1 offset 1 \gset
create table saved_rows (like :chunk1, new_row bool not null, kind text);
create table count_stmt (inserts int, updates int, deletes int, truncates int);
create function save_insert_row() returns trigger as $$
begin
insert into saved_rows select new.*, true, 'insert';
return new;
end;
$$ language plpgsql;
create function save_update_row() returns trigger as $$
begin
insert into saved_rows select new.*, true, 'update';
insert into saved_rows select old.*, false, 'update';
return new;
end;
$$ language plpgsql;
create function save_delete_row() returns trigger as $$
begin
insert into saved_rows select old.*, false, 'delete';
return new;
end;
$$ language plpgsql;
create function count_inserts() returns trigger as $$
begin
insert into count_stmt(inserts) values (1);
return null;
end;
$$ language plpgsql;
create function count_updates() returns trigger as $$
begin
insert into count_stmt(updates) values (1);
return null;
end;
$$ language plpgsql;
create function count_deletes() returns trigger as $$
begin
insert into count_stmt(deletes) values (1);
return null;
end;
$$ language plpgsql;
create function notify_action() returns trigger as $$
begin
raise notice 'table was truncated';
return null;
end;
$$ language plpgsql;
-- Compress all the chunks and make sure that they are compressed
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)

select chunk_name, compression_status from chunk_compression_stats(:'hypertable');
chunk_name | compression_status
------------------+--------------------
_hyper_1_1_chunk | Compressed
_hyper_1_2_chunk | Compressed
_hyper_1_3_chunk | Compressed
_hyper_1_4_chunk | Compressed
_hyper_1_5_chunk | Compressed
_hyper_1_6_chunk | Compressed
(6 rows)

with the_info as (
select min(created_at) min_created_at,
max(created_at) max_created_at
from :hypertable
)
select min_created_at, max_created_at,
'1m'::interval + min_created_at + (max_created_at - min_created_at) as mid_created_at,
'1m'::interval + max_created_at + (max_created_at - min_created_at) as post_created_at
from the_info \gset
-- Insert a bunch of rows to make sure that we have a mix of
-- uncompressed, partially compressed, and fully compressed chunks.
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
from generate_series(:'mid_created_at'::timestamptz, :'post_created_at', '15m') t;
-- Start by testing insert triggers for both statements and rows. In
-- this case, the trigger will just save away the rows into a separate
-- table and could that we get the same number of rows with the same
-- values.
create trigger save_insert_row_trg before insert on :chunk1 for each row execute function save_insert_row();
create trigger count_inserts_trg before insert on :chunk1 for each statement execute function count_inserts();
insert into :chunk1(created_at, location_id, device_id, owner_id, temp, humidity)
values
('2022-06-01 00:01:23', 999, 666, 111, 3.14, 3.14),
('2022-06-01 00:02:23', 999, 666, 111, 3.14, 3.14);
select * from saved_rows where kind = 'insert';
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+--------
11523 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | insert
11524 | Wed Jun 01 00:02:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | t | insert
(2 rows)

select count(inserts), count(updates), count(deletes) from count_stmt;
count | count | count
-------+-------+-------
1 | 0 | 0
(1 row)

-- Run update tests
create trigger save_update_row_trg before update on :chunk1 for each row execute function save_update_row();
create trigger count_update_trg before update on :chunk1 for each statement execute function count_updates();
update :chunk1 set temp = 9.99 where device_id = 666;
select * from saved_rows where kind = 'update';
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+--------
11523 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 9.99 | 3.14 | t | update
11523 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | update
11524 | Wed Jun 01 00:02:23 2022 PDT | 999 | 111 | 666 | 9.99 | 3.14 | t | update
11524 | Wed Jun 01 00:02:23 2022 PDT | 999 | 111 | 666 | 3.14 | 3.14 | f | update
(4 rows)

select count(inserts), count(updates), count(deletes) from count_stmt;
count | count | count
-------+-------+-------
1 | 1 | 0
(1 row)

-- Run delete tests
create trigger save_delete_row_trg before delete on :chunk1 for each row execute function save_delete_row();
create trigger count_delete_trg before delete on :chunk1 for each statement execute function count_deletes();
delete from :chunk1 where device_id = 666;
select * from saved_rows where kind = 'delete';
metric_id | created_at | location_id | owner_id | device_id | temp | humidity | new_row | kind
-----------+------------------------------+-------------+----------+-----------+------+----------+---------+--------
11523 | Wed Jun 01 00:01:23 2022 PDT | 999 | 111 | 666 | 9.99 | 3.14 | f | delete
11524 | Wed Jun 01 00:02:23 2022 PDT | 999 | 111 | 666 | 9.99 | 3.14 | f | delete
(2 rows)

select count(inserts), count(updates), count(deletes) from count_stmt;
count | count | count
-------+-------+-------
1 | 1 | 1
(1 row)

-- Check truncate trigger
create trigger notify_truncate after truncate on :chunk1 for each statement execute function notify_action();
truncate :chunk1;
NOTICE: table was truncated
3 changes: 2 additions & 1 deletion tsl/test/sql/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,6 @@ if((${PG_VERSION_MAJOR} GREATER_EQUAL "15"))
list(
APPEND
TEST_FILES
merge_compress.sql
cagg_refresh_using_merge.sql
hypercore_columnar.sql
hypercore_copy.sql
Expand All @@ -157,9 +156,11 @@ if((${PG_VERSION_MAJOR} GREATER_EQUAL "15"))
hypercore_policy.sql
hypercore_scans.sql
hypercore_stats.sql
hypercore_trigger.sql
hypercore_types.sql
hypercore_update.sql
hypercore_vacuum.sql
merge_compress.sql
hypercore_vacuum_full.sql)
endif()

Expand Down
118 changes: 118 additions & 0 deletions tsl/test/sql/hypercore_trigger.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.

\ir include/setup_hypercore.sql

create table saved_rows (like :chunk1, new_row bool not null, kind text);
create table count_stmt (inserts int, updates int, deletes int, truncates int);

create function save_insert_row() returns trigger as $$
begin
insert into saved_rows select new.*, true, 'insert';
return new;
end;
$$ language plpgsql;

create function save_update_row() returns trigger as $$
begin
insert into saved_rows select new.*, true, 'update';
insert into saved_rows select old.*, false, 'update';
return new;
end;
$$ language plpgsql;

create function save_delete_row() returns trigger as $$
begin
insert into saved_rows select old.*, false, 'delete';
return new;
end;
$$ language plpgsql;
Comment on lines +10 to +30
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it can be simplified in only one trigger function and also track TG_WHEN and use TG_OP to save the proper operation on the saved_rows table.

Another thing is that you're testing only BEFORE triggers and what about AFTER and INSTEAD OF ??


create function count_inserts() returns trigger as $$
begin
insert into count_stmt(inserts) values (1);
return null;
end;
$$ language plpgsql;

create function count_updates() returns trigger as $$
begin
insert into count_stmt(updates) values (1);
return null;
end;
$$ language plpgsql;

create function count_deletes() returns trigger as $$
begin
insert into count_stmt(deletes) values (1);
return null;
end;
$$ language plpgsql;

create function notify_action() returns trigger as $$
begin
raise notice 'table was truncated';
return null;
end;
$$ language plpgsql;



-- Compress all the chunks and make sure that they are compressed
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
select chunk_name, compression_status from chunk_compression_stats(:'hypertable');

with the_info as (
select min(created_at) min_created_at,
max(created_at) max_created_at
from :hypertable
)
select min_created_at, max_created_at,
'1m'::interval + min_created_at + (max_created_at - min_created_at) as mid_created_at,
'1m'::interval + max_created_at + (max_created_at - min_created_at) as post_created_at
from the_info \gset

-- Insert a bunch of rows to make sure that we have a mix of
-- uncompressed, partially compressed, and fully compressed chunks.
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
from generate_series(:'mid_created_at'::timestamptz, :'post_created_at', '15m') t;

-- Start by testing insert triggers for both statements and rows. In
-- this case, the trigger will just save away the rows into a separate
-- table and could that we get the same number of rows with the same
-- values.
create trigger save_insert_row_trg before insert on :chunk1 for each row execute function save_insert_row();
create trigger count_inserts_trg before insert on :chunk1 for each statement execute function count_inserts();

insert into :chunk1(created_at, location_id, device_id, owner_id, temp, humidity)
values
('2022-06-01 00:01:23', 999, 666, 111, 3.14, 3.14),
('2022-06-01 00:02:23', 999, 666, 111, 3.14, 3.14);

select * from saved_rows where kind = 'insert';
select count(inserts), count(updates), count(deletes) from count_stmt;

-- Run update tests
create trigger save_update_row_trg before update on :chunk1 for each row execute function save_update_row();
create trigger count_update_trg before update on :chunk1 for each statement execute function count_updates();

update :chunk1 set temp = 9.99 where device_id = 666;

select * from saved_rows where kind = 'update';
select count(inserts), count(updates), count(deletes) from count_stmt;

-- Run delete tests
create trigger save_delete_row_trg before delete on :chunk1 for each row execute function save_delete_row();
create trigger count_delete_trg before delete on :chunk1 for each statement execute function count_deletes();

delete from :chunk1 where device_id = 666;

select * from saved_rows where kind = 'delete';
select count(inserts), count(updates), count(deletes) from count_stmt;

-- Check truncate trigger
create trigger notify_truncate after truncate on :chunk1 for each statement execute function notify_action();

truncate :chunk1;
Loading