Skip to content

[test](mtmv)Add mtmv up and down with drop table test case #48661

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 60 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
60 commits
Select commit Hold shift + click to select a range
ca80561
[test](mtmv)Add mtmv up and down with drop table test case
zfr9527 Mar 12, 2025
c6b5647
fix bug
zfr9527 Mar 13, 2025
6bc059c
fix bug
zfr9527 Mar 13, 2025
8e66c65
fix bug
zfr9527 Mar 13, 2025
1747ceb
fix bug
zfr9527 Mar 18, 2025
69f3f52
fix bug
zfr9527 Apr 3, 2025
c458a95
fix bug
zfr9527 Apr 3, 2025
f99811f
fix bug
zfr9527 Apr 3, 2025
110636b
fix bug
zfr9527 Apr 3, 2025
553943e
fix bug
zfr9527 Apr 7, 2025
cb5044e
fix bug
zfr9527 Apr 7, 2025
0dc1963
fix bug
zfr9527 Apr 7, 2025
b3f93fd
fix bug
zfr9527 May 27, 2025
ef59480
fix bug
zfr9527 May 27, 2025
fd77af4
fix bug
zfr9527 May 27, 2025
aa259c0
fix bug
zfr9527 May 27, 2025
05163c8
fix bug
zfr9527 May 27, 2025
172dc90
fix bug
zfr9527 May 27, 2025
b497037
fix bug
zfr9527 May 27, 2025
0e6655f
fix bug
zfr9527 May 27, 2025
5588f86
fix bug
zfr9527 May 27, 2025
7839f4b
fix bug
zfr9527 May 27, 2025
74f3239
fix bug
zfr9527 May 27, 2025
034de12
fix bug
zfr9527 May 27, 2025
10b6e5d
fix bug
zfr9527 May 27, 2025
5aeb0ef
fix bug
zfr9527 May 27, 2025
d3f2751
fix bug
zfr9527 May 27, 2025
4037374
fix bug
zfr9527 May 27, 2025
8cef539
fix bug
zfr9527 May 27, 2025
d374cd5
fix bug
zfr9527 May 27, 2025
3d213a5
fix bug
zfr9527 May 27, 2025
4181cc9
fix bug
zfr9527 May 28, 2025
a7762cc
fix bug
zfr9527 May 30, 2025
65a0ea8
fix bug
zfr9527 May 30, 2025
a271b0c
fix bug
zfr9527 Jun 3, 2025
965e704
fix bug
zfr9527 Jun 5, 2025
f0d1f89
fix bug
zfr9527 Jun 5, 2025
708643b
fix bug
zfr9527 Jun 6, 2025
bddcb3e
fix bug
zfr9527 Jun 6, 2025
0cb9fcd
fix bug
zfr9527 Jun 6, 2025
bedb8c3
fix bug
zfr9527 Jun 6, 2025
6125796
fix bug
zfr9527 Jun 6, 2025
4ae15aa
fix bug
zfr9527 Jun 6, 2025
b1b6ecc
fix bug
zfr9527 Jun 6, 2025
a84a046
fix bug
zfr9527 Jun 6, 2025
de16da0
fix bug
zfr9527 Jun 6, 2025
14c6692
fix bug
zfr9527 Jun 6, 2025
336d40f
fix bug
zfr9527 Jun 6, 2025
2c030a9
fix bug
zfr9527 Jun 6, 2025
f64198f
fix bug
zfr9527 Jun 6, 2025
00d1f83
fix bug
zfr9527 Jun 6, 2025
188e8ec
fix bug
zfr9527 Jun 9, 2025
e41aaef
fix bug
zfr9527 Jun 9, 2025
548aa5e
fix bug
zfr9527 Jun 9, 2025
acd88d8
fix bug
zfr9527 Jun 9, 2025
25080d0
fix bug
zfr9527 Jun 9, 2025
99ea860
fix bug
zfr9527 Jun 9, 2025
6fffb70
fix bug
zfr9527 Jun 9, 2025
4334acf
fix bug
zfr9527 Jun 9, 2025
14700ce
fix bug
zfr9527 Jun 9, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file not shown.
180 changes: 180 additions & 0 deletions regression-test/suites/mtmv_up_down_olap_p0/load.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,26 @@
// specific language governing permissions and limitations
// under the License.

/*
Build the basic information. When conceiving upgrade and downgrade test cases, you should firmly grasp the object
representation of the function in the FE meta.
Taking MTMV as an example, the main function points are creation, refresh, and rewriting, and the involved entities
are the base table and MTMV.
1.When creating an MTMV, check if the rewriting meets the expectations.
2.When refreshing an MTMV, check if the rewriting meets the expectations.
3.When deleting an MTMV, check if the rewriting meets the expectations.
4.When deleting a base table, check if the rewriting meets the expectations; then trigger a refresh and check if the
rewriting meets the expectations.
5.When deleting a partition of a base table, check if the rewriting meets the expectations; then trigger a refresh and
check if the rewriting meets the expectations.
6.Design a slightly more complex scenario. For example: Build an MTMV with two base tables. When deleting one of the
base tables, check if the refresh of the MTMV meets the expectations and if the rewriting meets the expectations;
create an MTMV with the undeleted base table and check if it can be created and refreshed normally, and if the
corresponding rewriting meets the expectations.
*/
suite("test_upgrade_downgrade_prepare_olap_mtmv","p0,mtmv,restart_fe") {
String suiteName = "mtmv_up_down_olap"
String dbName = context.config.getDbNameByFile(context.file)
String mvName = "${suiteName}_mtmv"
String tableName = "${suiteName}_table"
String tableName2 = "${suiteName}_table2"
Expand Down Expand Up @@ -68,4 +86,166 @@ suite("test_upgrade_downgrade_prepare_olap_mtmv","p0,mtmv,restart_fe") {
SELECT a.* FROM ${tableName} a inner join ${tableName2} b on a.user_id=b.user_id;
"""
waitingMTMVTaskFinishedByMvName(mvName)


String dropTableName1 = """${suiteName}_DropTableName1"""
String dropTableName2 = """${suiteName}_DropTableName2"""
String dropTableName3 = """${suiteName}_DropTableName3"""
String dropTableName4 = """${suiteName}_DropTableName4"""
String dropMtmvName1 = """${suiteName}_dropMtmvName1"""
String dropMtmvName2 = """${suiteName}_dropMtmvName2"""
String dropMtmvName3 = """${suiteName}_dropMtmvName3"""
String dropMtmvName4 = """${suiteName}_dropMtmvName4"""

sql """drop table if exists `${dropTableName1}`"""
sql """drop table if exists `${dropTableName2}`"""
sql """drop table if exists `${dropTableName3}`"""
sql """drop table if exists `${dropTableName4}`"""
sql """drop materialized view if exists ${dropMtmvName1};"""
sql """drop materialized view if exists ${dropMtmvName2};"""
sql """drop materialized view if exists ${dropMtmvName3};"""
sql """drop materialized view if exists ${dropMtmvName4};"""


sql """
CREATE TABLE `${dropTableName1}` (
`user_id` LARGEINT NOT NULL COMMENT '\"用户id\"',
`date` DATE NOT NULL COMMENT '\"数据灌入日期时间\"',
`num` SMALLINT NOT NULL COMMENT '\"数量\"'
) ENGINE=OLAP
DUPLICATE KEY(`user_id`, `date`, `num`)
COMMENT 'OLAP'
PARTITION BY RANGE(`date`)
(PARTITION p201701_1000 VALUES [('0000-01-01'), ('2017-02-01')),
PARTITION p201702_2000 VALUES [('2017-02-01'), ('2017-03-01')),
PARTITION p201703_3000 VALUES [('2017-03-01'), ('2017-04-01')),
PARTITION p201704_4000 VALUES [('2017-04-01'), ('2017-05-01')),
PARTITION p201705_5000 VALUES [('2017-05-01'), ('2017-06-01')),
PARTITION p201706_6000 VALUES [('2017-06-01'), ('2017-07-01')),
PARTITION p201707_7000 VALUES [('2017-07-01'), ('2017-08-01')),
PARTITION p201708_8000 VALUES [('2017-08-01'), ('2017-09-01')),
PARTITION p201709_9000 VALUES [('2017-09-01'), ('2017-10-01')),
PARTITION p201710_1000 VALUES [('2017-10-01'), ('2017-11-01')),
PARTITION p201711_1100 VALUES [('2017-11-01'), ('2017-12-01')),
PARTITION p201712_1200 VALUES [('2017-12-01'), ('2018-01-01')))
DISTRIBUTED BY HASH(`user_id`) BUCKETS 2
PROPERTIES ('replication_num' = '1') ;
"""
sql """
insert into ${dropTableName1} values(1,"2017-01-15",1),(2,"2017-02-15",2),(3,"2017-03-15",3),(4,"2017-04-15",4),(5,"2017-05-15",5),(6,"2017-06-15",6),(7,"2017-07-15",7),(8,"2017-08-15",8),(9,"2017-09-15",9),(10,"2017-10-15",10),(11,"2017-11-15",11),(12,"2017-12-15",12);
"""

sql """
CREATE TABLE `${dropTableName2}` (
`user_id` LARGEINT NOT NULL COMMENT '\"用户id\"',
`date` DATE NOT NULL COMMENT '\"数据灌入日期时间\"',
`num` SMALLINT NOT NULL COMMENT '\"数量\"'
) ENGINE=OLAP
DUPLICATE KEY(`user_id`, `date`, `num`)
COMMENT 'OLAP'
PARTITION BY RANGE(`date`)
(PARTITION p201701_1000 VALUES [('0000-01-01'), ('2017-02-01')),
PARTITION p201702_2000 VALUES [('2017-02-01'), ('2017-03-01')),
PARTITION p201703_3000 VALUES [('2017-03-01'), ('2017-04-01')),
PARTITION p201704_4000 VALUES [('2017-04-01'), ('2017-05-01')),
PARTITION p201705_5000 VALUES [('2017-05-01'), ('2017-06-01')),
PARTITION p201706_6000 VALUES [('2017-06-01'), ('2017-07-01')),
PARTITION p201707_7000 VALUES [('2017-07-01'), ('2017-08-01')),
PARTITION p201708_8000 VALUES [('2017-08-01'), ('2017-09-01')),
PARTITION p201709_9000 VALUES [('2017-09-01'), ('2017-10-01')),
PARTITION p201710_1000 VALUES [('2017-10-01'), ('2017-11-01')),
PARTITION p201711_1100 VALUES [('2017-11-01'), ('2017-12-01')),
PARTITION p201712_1200 VALUES [('2017-12-01'), ('2018-01-01')))
DISTRIBUTED BY HASH(`user_id`) BUCKETS 2
PROPERTIES ('replication_num' = '1') ;
"""
sql """
insert into ${dropTableName2} values(1,"2017-01-15",1),(2,"2017-02-15",2),(3,"2017-03-15",3),(4,"2017-04-15",4),(5,"2017-05-15",5),(6,"2017-06-15",6),(7,"2017-07-15",7),(8,"2017-08-15",8),(9,"2017-09-15",9),(10,"2017-10-15",10),(11,"2017-11-15",11),(12,"2017-12-15",12);
"""

sql """
CREATE TABLE `${dropTableName3}` (
`user_id` LARGEINT NOT NULL COMMENT '\"用户id\"',
`date` DATE NOT NULL COMMENT '\"数据灌入日期时间\"',
`num` SMALLINT NOT NULL COMMENT '\"数量\"'
) ENGINE=OLAP
DUPLICATE KEY(`user_id`, `date`, `num`)
COMMENT 'OLAP'
PARTITION BY RANGE(`date`)
(PARTITION p201701_1000 VALUES [('0000-01-01'), ('2017-02-01')),
PARTITION p201702_2000 VALUES [('2017-02-01'), ('2017-03-01')),
PARTITION p201703_3000 VALUES [('2017-03-01'), ('2017-04-01')),
PARTITION p201704_4000 VALUES [('2017-04-01'), ('2017-05-01')),
PARTITION p201705_5000 VALUES [('2017-05-01'), ('2017-06-01')),
PARTITION p201706_6000 VALUES [('2017-06-01'), ('2017-07-01')),
PARTITION p201707_7000 VALUES [('2017-07-01'), ('2017-08-01')),
PARTITION p201708_8000 VALUES [('2017-08-01'), ('2017-09-01')),
PARTITION p201709_9000 VALUES [('2017-09-01'), ('2017-10-01')),
PARTITION p201710_1000 VALUES [('2017-10-01'), ('2017-11-01')),
PARTITION p201711_1100 VALUES [('2017-11-01'), ('2017-12-01')),
PARTITION p201712_1200 VALUES [('2017-12-01'), ('2018-01-01')))
DISTRIBUTED BY HASH(`user_id`) BUCKETS 2
PROPERTIES ('replication_num' = '1') ;
"""
sql """
insert into ${dropTableName3} values(1,"2017-01-15",1),(2,"2017-02-15",2),(3,"2017-03-15",3),(4,"2017-04-15",4),(5,"2017-05-15",5),(6,"2017-06-15",6),(7,"2017-07-15",7),(8,"2017-08-15",8),(9,"2017-09-15",9),(10,"2017-10-15",10),(11,"2017-11-15",11),(12,"2017-12-15",12);
"""

sql """
CREATE TABLE `${dropTableName4}` (
`user_id` LARGEINT NOT NULL COMMENT '\"用户id\"',
`age` SMALLINT NOT NULL COMMENT '\"年龄\"'
) ENGINE=OLAP
DUPLICATE KEY(`user_id`, `age`)
COMMENT 'OLAP'
DISTRIBUTED BY HASH(`user_id`) BUCKETS 2
PROPERTIES ('replication_num' = '1') ;
"""
sql """
insert into ${dropTableName4} values(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),(11,11),(12,12);
"""


sql """
CREATE MATERIALIZED VIEW ${dropMtmvName1}
REFRESH AUTO ON MANUAL
partition by(`date`)
DISTRIBUTED BY RANDOM BUCKETS 2
PROPERTIES ('replication_num' = '1')
AS
SELECT a.* FROM ${dropTableName1} a inner join ${dropTableName4} b on a.user_id=b.user_id;
"""
waitingMTMVTaskFinishedByMvName(dropMtmvName1)

sql """
CREATE MATERIALIZED VIEW ${dropMtmvName2}
REFRESH AUTO ON MANUAL
partition by(`date`)
DISTRIBUTED BY RANDOM BUCKETS 2
PROPERTIES ('replication_num' = '1')
AS
SELECT a.* FROM ${dropTableName2} a inner join ${dropTableName4} b on a.user_id=b.user_id;
"""
waitingMTMVTaskFinishedByMvName(dropMtmvName2)

sql """
CREATE MATERIALIZED VIEW ${dropMtmvName3}
REFRESH AUTO ON MANUAL
partition by(`date`)
DISTRIBUTED BY RANDOM BUCKETS 2
PROPERTIES ('replication_num' = '1')
AS
SELECT a.* FROM ${dropTableName3} a inner join ${dropTableName4} b on a.user_id=b.user_id;
"""
waitingMTMVTaskFinishedByMvName(dropMtmvName3)

def state_mtmv1 = sql """select State,RefreshState,SyncWithBaseTables from mv_infos('database'='${dbName}') where Name = '${dropMtmvName1}';"""
assertTrue(state_mtmv1[0][0] == "NORMAL")
assertTrue(state_mtmv1[0][1] == "SUCCESS")
assertTrue(state_mtmv1[0][2] == true)
def state_mtmv2 = sql """select State,RefreshState,SyncWithBaseTables from mv_infos('database'='${dbName}') where Name = '${dropMtmvName2}';"""
assertTrue(state_mtmv2[0][0] == "NORMAL")
assertTrue(state_mtmv2[0][1] == "SUCCESS")
assertTrue(state_mtmv2[0][2] == true)

}
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,136 @@ suite("test_upgrade_downgrade_olap_mtmv","p0,mtmv,restart_fe") {
String dbName = context.config.getDbNameByFile(context.file)
String mvName = "${suiteName}_mtmv"
String tableName = "${suiteName}_table"

// test data is normal
order_qt_refresh_init "SELECT * FROM ${mvName}"
// test is sync
order_qt_mtmv_sync "select SyncWithBaseTables from mv_infos('database'='${dbName}') where Name='${mvName}'"
sql """
sql """
REFRESH MATERIALIZED VIEW ${mvName} complete
"""
// test can refresh success
waitingMTMVTaskFinishedByMvName(mvName)

String dropTableName1 = """${suiteName}_DropTableName1"""
String dropTableName2 = """${suiteName}_DropTableName2"""
String dropTableName4 = """${suiteName}_DropTableName4"""
String dropMtmvName1 = """${suiteName}_dropMtmvName1"""
String dropMtmvName2 = """${suiteName}_dropMtmvName2"""
String dropMtmvName3 = """${suiteName}_dropMtmvName3"""


def get_follower_ip = {
def result = sql """show frontends;"""
logger.info("result:" + result)
for (int i = 0; i < result.size(); i++) {
if (result[i][7] == "FOLLOWER" && result[i][8] == "false" && result[i][11] == "true") {
return result[i][1]
}
}
return "null"
}
def get_master_ip = {
def result = sql """show frontends;"""
logger.info("result:" + result)
for (int i = 0; i < result.size(); i++) {
if (result[i][7] == "FOLLOWER" && result[i][8] == "true" && result[i][11] == "true") {
return result[i][1]
}
}
return "null"
}

def follower_ip = get_follower_ip()
def master_ip = get_master_ip()

def tokens = context.config.jdbcUrl.split('/')
def url_tmp1 = tokens[0] + "//" + tokens[2] + "/" + "information_schema" + "?"
def follower_jdbc_url = url_tmp1.replaceAll(/\/\/[0-9.]+:/, "//${follower_ip}:")
logger.info("follower_jdbc_url: " + follower_jdbc_url)

def master_jdbc_url = url_tmp1.replaceAll(/\/\/[0-9.]+:/, "//${master_ip}:")
logger.info("master_jdbc_url: " + master_jdbc_url)


// drop table
sql """drop table if exists ${dropTableName1}"""
def state_mtmv1 = sql """select State,RefreshState,SyncWithBaseTables from mv_infos('database'='${dbName}') where Name = '${dropMtmvName1}';"""
assertTrue(state_mtmv1[0][0] == "SCHEMA_CHANGE")
assertTrue(state_mtmv1[0][1] == "SUCCESS" || state_mtmv1[0][1] == "INIT")
assertTrue(state_mtmv1[0][2] == false)

// After deleting the table, you can create a new MTMV
def cur_dropMtmvName3 = dropMtmvName3 + UUID.randomUUID().toString().replaceAll("-", "")
sql """
CREATE MATERIALIZED VIEW ${cur_dropMtmvName3}
REFRESH AUTO ON MANUAL
DISTRIBUTED BY RANDOM BUCKETS 2
PROPERTIES ('replication_num' = '1')
AS
SELECT user_id, age FROM ${dropTableName4};
"""
waitingMTMVTaskFinishedByMvName(cur_dropMtmvName3)

// drop partition
def parts_res = sql """show partitions from ${dropTableName2}"""
sql """ALTER TABLE ${dropTableName2} DROP PARTITION ${parts_res[0][1]};"""
def state_mtmv2 = sql """select State,RefreshState,SyncWithBaseTables from mv_infos('database'='${dbName}') where Name = '${dropMtmvName2}';"""
assertTrue(state_mtmv2[0][0] == "NORMAL")
assertTrue(state_mtmv2[0][1] == "SUCCESS")
def mtmv_part_res = sql """show partitions from ${dropMtmvName2}"""
logger.info("mtmv_part_res[0][18]: " + mtmv_part_res[0][18])
logger.info("mtmv_part_res[0][19]: " + mtmv_part_res[0][19])
logger.info("mtmv_part_res:" + mtmv_part_res)
def part_1 = mtmv_part_res.size()
def diff_part = 0
for (int i = 0; i < mtmv_part_res.size(); i++) {
if (mtmv_part_res[i][18] == "false" && mtmv_part_res[i][19] as String == "[${dropTableName2}]") {
diff_part = diff_part + 1
}
}

def sql2 = "SELECT a.* FROM ${dropTableName2} a inner join ${dropTableName4} b on a.user_id=b.user_id;"
// mv_rewrite_success(sql2, dropMtmvName2)
connect('root', context.config.jdbcPassword, follower_jdbc_url) {
sql """use ${dbName}"""
mv_rewrite_success(sql2, dropMtmvName2)
}

connect('root', context.config.jdbcPassword, master_jdbc_url) {
sql """use ${dbName}"""
mv_rewrite_success(sql2, dropMtmvName2)
}

// An error occurred when refreshing the partition individually, and the partition was not deleted after the refresh.
try {
sql """refresh MATERIALIZED VIEW ${dropMtmvName2} partition(${mtmv_part_res[0][1]})"""
} catch (Exception e) {
logger.info(e.getMessage())
}

// When refreshing the entire MTMV, the partition will be deleted.
sql """refresh MATERIALIZED VIEW ${dropMtmvName2} auto"""
waitingMTMVTaskFinishedByMvName(dropMtmvName2)
mtmv_part_res = sql """show partitions from ${dropMtmvName2}"""
logger.info("mtmv_part_res:" + mtmv_part_res)
def part_2 = mtmv_part_res.size()
assertTrue(part_1 == part_2 + diff_part)

state_mtmv2 = sql """select State,RefreshState,SyncWithBaseTables from mv_infos('database'='${dbName}') where Name = '${dropMtmvName2}';"""
logger.info("state_mtmv2:" + state_mtmv2)
assertTrue(state_mtmv2[0][0] == "NORMAL")
assertTrue(state_mtmv2[0][1] == "SUCCESS")
assertTrue(state_mtmv2[0][2] == true)
// mv_rewrite_success(sql2, dropMtmvName2)
connect('root', context.config.jdbcPassword, follower_jdbc_url) {
sql """use ${dbName}"""
mv_rewrite_success(sql2, dropMtmvName2)
}

connect('root', context.config.jdbcPassword, master_jdbc_url) {
sql """use ${dbName}"""
mv_rewrite_success(sql2, dropMtmvName2)
}

}
Loading