Skip to content

feat(subscription): support blocking cursor #18675

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Oct 8, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 61 additions & 15 deletions e2e_test/subscription/main.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import subprocess
import psycopg2
import threading
import time


Expand Down Expand Up @@ -56,7 +57,7 @@ def test_cursor_snapshot():
execute_insert("declare cur subscription cursor for sub full",conn)
row = execute_query("fetch next from cur",conn)
check_rows_data([1,2],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
execute_insert("close cur",conn)
drop_table_subscription()
Expand All @@ -75,7 +76,7 @@ def test_cursor_snapshot_log_store():
execute_insert("declare cur subscription cursor for sub full",conn)
row = execute_query("fetch next from cur",conn)
check_rows_data([1,2],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
execute_insert("insert into t1 values(4,4)",conn)
execute_insert("flush",conn)
Expand All @@ -85,7 +86,7 @@ def test_cursor_snapshot_log_store():
check_rows_data([4,4],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
check_rows_data([5,5],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
execute_insert("close cur",conn)
drop_table_subscription()
Expand Down Expand Up @@ -113,7 +114,7 @@ def test_cursor_since_begin():
check_rows_data([5,5],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
check_rows_data([6,6],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
execute_insert("close cur",conn)
drop_table_subscription()
Expand All @@ -138,7 +139,7 @@ def test_cursor_since_now():
execute_insert("flush",conn)
row = execute_query("fetch next from cur",conn)
check_rows_data([6,6],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
execute_insert("close cur",conn)
drop_table_subscription()
Expand All @@ -163,7 +164,7 @@ def test_cursor_without_since():
execute_insert("flush",conn)
row = execute_query("fetch next from cur",conn)
check_rows_data([6,6],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
execute_insert("close cur",conn)
drop_table_subscription()
Expand Down Expand Up @@ -197,7 +198,7 @@ def test_cursor_since_rw_timestamp():
valuelen = len(row[0])
rw_timestamp_3 = row[0][valuelen - 1] + 1
check_rows_data([6,6],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
execute_insert("close cur",conn)

Expand All @@ -212,7 +213,7 @@ def test_cursor_since_rw_timestamp():
execute_insert("close cur",conn)

execute_insert(f"declare cur subscription cursor for sub since {rw_timestamp_3}",conn)
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
execute_insert("close cur",conn)

Expand All @@ -231,7 +232,7 @@ def test_cursor_op():
execute_insert("declare cur subscription cursor for sub full",conn)
row = execute_query("fetch next from cur",conn)
check_rows_data([1,2],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []

execute_insert("insert into t1 values(4,4)",conn)
Expand All @@ -244,14 +245,14 @@ def test_cursor_op():
check_rows_data([4,4],row[0],"UpdateDelete")
row = execute_query("fetch next from cur",conn)
check_rows_data([4,10],row[0],"UpdateInsert")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []

execute_insert("delete from t1 where v1 = 4",conn)
execute_insert("flush",conn)
row = execute_query("fetch next from cur",conn)
check_rows_data([4,10],row[0],"Delete")
row = execute_query("fetch next from cur",conn)
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []

execute_insert("close cur",conn)
Expand All @@ -273,8 +274,8 @@ def test_cursor_with_table_alter():
execute_insert("flush",conn)
row = execute_query("fetch next from cur",conn)
check_rows_data([1,2],row[0],"Insert")
row = execute_query("fetch next from cur",conn)
assert(row == [])
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
row = execute_query("fetch next from cur",conn)
check_rows_data([4,4,4],row[0],"Insert")
execute_insert("insert into t1 values(5,5,5)",conn)
Expand All @@ -284,8 +285,8 @@ def test_cursor_with_table_alter():
execute_insert("alter table t1 drop column v2",conn)
execute_insert("insert into t1 values(6,6)",conn)
execute_insert("flush",conn)
row = execute_query("fetch next from cur",conn)
assert(row == [])
row = execute_query("fetch next from cur with (timeout = '2s')",conn)
assert row == []
row = execute_query("fetch next from cur",conn)
check_rows_data([6,6],row[0],"Insert")
drop_table_subscription()
Expand Down Expand Up @@ -355,6 +356,50 @@ def test_rebuild_table():
check_rows_data([1,100],row[2],"UpdateInsert")
drop_table_subscription()

def test_blcok_cursor():
print(f"test_blcok_cursor")
create_table_subscription()
conn = psycopg2.connect(
host="localhost",
port="4566",
user="root",
database="dev"
)

execute_insert("declare cur subscription cursor for sub2 full",conn)
execute_insert("insert into t2 values(1,1)",conn)
execute_insert("flush",conn)
execute_insert("update t2 set v2 = 100 where v1 = 1",conn)
execute_insert("flush",conn)
row = execute_query("fetch 100 from cur",conn)
assert len(row) == 3
check_rows_data([1,1],row[0],"Insert")
check_rows_data([1,1],row[1],"UpdateDelete")
check_rows_data([1,100],row[2],"UpdateInsert")

# Test block cursor fetches data successfully
thread = threading.Thread(target=insert_into_table)
thread.start()
row = execute_query("fetch 100 from cur",conn)
check_rows_data([10,10],row[0],"Insert")
thread.join()

# Test block cursor timeout
row = execute_query("fetch 100 from cur with (timeout = '5s')",conn)
assert row == []

drop_table_subscription()

def insert_into_table():
time.sleep(2)
conn = psycopg2.connect(
host="localhost",
port="4566",
user="root",
database="dev"
)
execute_insert("insert into t2 values(10,10)",conn)

if __name__ == "__main__":
test_cursor_snapshot()
test_cursor_op()
Expand All @@ -366,3 +411,4 @@ def test_rebuild_table():
test_cursor_with_table_alter()
test_cursor_fetch_n()
test_rebuild_table()
test_blcok_cursor()
11 changes: 0 additions & 11 deletions proto/hummock.proto
Original file line number Diff line number Diff line change
Expand Up @@ -837,7 +837,6 @@ service HummockManagerService {
rpc ListCompactTaskAssignment(ListCompactTaskAssignmentRequest) returns (ListCompactTaskAssignmentResponse);
rpc ListCompactTaskProgress(ListCompactTaskProgressRequest) returns (ListCompactTaskProgressResponse);
rpc CancelCompactTask(CancelCompactTaskRequest) returns (CancelCompactTaskResponse);
rpc ListChangeLogEpochs(ListChangeLogEpochsRequest) returns (ListChangeLogEpochsResponse);
rpc GetVersionByEpoch(GetVersionByEpochRequest) returns (GetVersionByEpochResponse);
rpc MergeCompactionGroup(MergeCompactionGroupRequest) returns (MergeCompactionGroupResponse);
}
Expand Down Expand Up @@ -909,13 +908,3 @@ message BranchedObject {
// Compaction group id the SST belongs to.
uint64 compaction_group_id = 3;
}

message ListChangeLogEpochsRequest {
uint32 table_id = 1;
uint64 min_epoch = 2;
uint32 max_count = 3;
}

message ListChangeLogEpochsResponse {
repeated uint64 epochs = 1;
}
4 changes: 4 additions & 0 deletions src/frontend/src/catalog/schema_catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,10 @@ impl SchemaCatalog {
.map(|(_, v)| v)
}

pub fn iter_all_table_ids(&self) -> impl Iterator<Item = &TableId> {
self.table_by_id.keys()
}

pub fn iter_internal_table(&self) -> impl Iterator<Item = &Arc<TableCatalog>> {
self.table_by_name
.iter()
Expand Down
15 changes: 2 additions & 13 deletions src/frontend/src/catalog/subscription_catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use core::str::FromStr;

use risingwave_common::catalog::{TableId, UserId, OBJECT_ID_PLACEHOLDER};
use risingwave_common::types::Interval;
use risingwave_common::util::epoch::Epoch;
use risingwave_pb::catalog::subscription::PbSubscriptionState;
use risingwave_pb::catalog::PbSubscription;
use thiserror_ext::AsReport;

use super::OwnedByUserCatalog;
use crate::error::{ErrorCode, Result};
use crate::handler::util::convert_interval_to_u64_seconds;
use crate::WithOptions;

#[derive(Clone, Debug, PartialEq, Eq, Hash)]
Expand Down Expand Up @@ -86,15 +83,7 @@ impl SubscriptionCatalog {
let retention_seconds_str = properties.get("retention").ok_or_else(|| {
ErrorCode::InternalError("Subscription retention time not set.".to_string())
})?;
let retention_seconds = (Interval::from_str(retention_seconds_str)
.map_err(|err| {
ErrorCode::InternalError(format!(
"Retention needs to be set in Interval format: {:?}",
err.to_report_string()
))
})?
.epoch_in_micros()
/ 1000000) as u64;
let retention_seconds = convert_interval_to_u64_seconds(retention_seconds_str)?;
self.retention_seconds = retention_seconds;
Ok(())
}
Expand Down
26 changes: 24 additions & 2 deletions src/frontend/src/handler/fetch_cursor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@ use risingwave_sqlparser::ast::{FetchCursorStatement, Statement};

use super::extended_handle::{PortalResult, PrepareStatement, PreparedResult};
use super::query::BoundResult;
use super::util::convert_interval_to_u64_seconds;
use super::RwPgResponse;
use crate::binder::BoundStatement;
use crate::error::Result;
use crate::handler::HandlerArgs;
use crate::{Binder, PgResponseStream};
use crate::{Binder, PgResponseStream, WithOptions};

pub async fn handle_fetch_cursor_execute(
handler_args: HandlerArgs,
Expand Down Expand Up @@ -61,10 +62,31 @@ pub async fn handle_fetch_cursor(
let (_, cursor_name) =
Binder::resolve_schema_qualified_name(db_name, stmt.cursor_name.clone())?;

let with_options = WithOptions::try_from(stmt.with_properties.0.as_slice())?;

if with_options.len() > 1 {
bail_not_implemented!("only `timeout` is supported in with options")
}

let timeout_seconds = with_options
.get("timeout")
.map(convert_interval_to_u64_seconds)
.transpose()?;

if with_options.len() == 1 && timeout_seconds.is_none() {
bail_not_implemented!("only `timeout` is supported in with options")
}

let cursor_manager = session.get_cursor_manager();

let (rows, pg_descs) = cursor_manager
.get_rows_with_cursor(cursor_name, stmt.count, handler_args, formats)
.get_rows_with_cursor(
cursor_name,
stmt.count,
handler_args,
formats,
timeout_seconds,
)
.await?;
Ok(build_fetch_cursor_response(rows, pg_descs))
}
Expand Down
19 changes: 18 additions & 1 deletion src/frontend/src/handler/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use core::str::FromStr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
Expand All @@ -28,13 +29,16 @@ use pin_project_lite::pin_project;
use risingwave_common::array::DataChunk;
use risingwave_common::catalog::Field;
use risingwave_common::row::Row as _;
use risingwave_common::types::{write_date_time_tz, DataType, ScalarRefImpl, Timestamptz};
use risingwave_common::types::{
write_date_time_tz, DataType, Interval, ScalarRefImpl, Timestamptz,
};
use risingwave_common::util::epoch::Epoch;
use risingwave_common::util::iter_util::ZipEqFast;
use risingwave_sqlparser::ast::{
CompatibleSourceSchema, ConnectorSchema, ObjectName, Query, Select, SelectItem, SetExpr,
TableFactor, TableWithJoins,
};
use thiserror_ext::AsReport;

use crate::error::{ErrorCode, Result as RwResult};
use crate::session::{current, SessionImpl};
Expand Down Expand Up @@ -238,6 +242,19 @@ pub fn convert_logstore_u64_to_unix_millis(logstore_u64: u64) -> u64 {
Epoch::from(logstore_u64).as_unix_millis()
}

pub fn convert_interval_to_u64_seconds(interval: &String) -> RwResult<u64> {
let seconds = (Interval::from_str(interval)
.map_err(|err| {
ErrorCode::InternalError(format!(
"Covert interval to u64 error, please check format, error: {:?}",
err.to_report_string()
))
})?
.epoch_in_micros()
/ 1000000) as u64;
Ok(seconds)
}

#[cfg(test)]
mod tests {
use postgres_types::{ToSql, Type};
Expand Down
18 changes: 0 additions & 18 deletions src/frontend/src/meta_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,6 @@ pub trait FrontendMetaClient: Send + Sync {
rate_limit: Option<u32>,
) -> Result<()>;

async fn list_change_log_epochs(
&self,
table_id: u32,
min_epoch: u64,
max_count: u32,
) -> Result<Vec<u64>>;

async fn get_cluster_recovery_status(&self) -> Result<RecoveryStatus>;

async fn get_cluster_limits(&self) -> Result<Vec<ClusterLimit>>;
Expand Down Expand Up @@ -297,17 +290,6 @@ impl FrontendMetaClient for FrontendMetaClientImpl {
.map(|_| ())
}

async fn list_change_log_epochs(
&self,
table_id: u32,
min_epoch: u64,
max_count: u32,
) -> Result<Vec<u64>> {
self.0
.list_change_log_epochs(table_id, min_epoch, max_count)
.await
}

async fn get_cluster_recovery_status(&self) -> Result<RecoveryStatus> {
self.0.get_cluster_recovery_status().await
}
Expand Down
Loading
Loading