Skip to content

Commit 27aca2d

Browse files
authored
Merge pull request #757 from drmingdrmer/42-refact
Refactor: Add Command::SendResult to send back response
2 parents 85d28b3 + b912cc2 commit 27aca2d

File tree

12 files changed

+346
-240
lines changed

12 files changed

+346
-240
lines changed

openraft/src/core/install_snapshot.rs

-140
This file was deleted.

openraft/src/core/mod.rs

+1-2
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,10 @@
55
//! storage or forward messages to other raft nodes.
66
77
mod building_state;
8-
mod install_snapshot;
98
mod raft_core;
109
mod replication_state;
1110
mod server_state;
12-
mod streaming_state;
11+
pub(crate) mod streaming_state;
1312
mod tick;
1413

1514
pub(crate) mod snapshot_state;

openraft/src/core/raft_core.rs

+119-27
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ use maplit::btreeset;
1515
use pin_utils::pin_mut;
1616
use tokio::io::AsyncRead;
1717
use tokio::io::AsyncSeek;
18+
use tokio::io::AsyncWriteExt;
1819
use tokio::sync::mpsc;
1920
use tokio::sync::oneshot;
2021
use tokio::sync::watch;
@@ -30,12 +31,14 @@ use crate::config::RuntimeConfig;
3031
use crate::config::SnapshotPolicy;
3132
use crate::core::building_state;
3233
use crate::core::snapshot_state;
34+
use crate::core::snapshot_state::SnapshotRequestId;
35+
use crate::core::streaming_state::Streaming;
3336
use crate::core::ServerState;
3437
use crate::core::SnapshotResult;
3538
use crate::display_ext::DisplaySlice;
3639
use crate::engine::Command;
3740
use crate::engine::Engine;
38-
use crate::engine::SendResult;
41+
use crate::engine::Respond;
3942
use crate::entry::FromAppData;
4043
use crate::entry::RaftEntry;
4144
use crate::entry::RaftPayload;
@@ -46,6 +49,7 @@ use crate::error::ForwardToLeader;
4649
use crate::error::InitializeError;
4750
use crate::error::QuorumNotEnough;
4851
use crate::error::RPCError;
52+
use crate::error::SnapshotMismatch;
4953
use crate::error::Timeout;
5054
use crate::log_id::LogIdOptionExt;
5155
use crate::log_id::RaftLogId;
@@ -62,8 +66,11 @@ use crate::raft::AppendEntriesTx;
6266
use crate::raft::ClientWriteResponse;
6367
use crate::raft::ClientWriteTx;
6468
use crate::raft::ExternalCommand;
69+
use crate::raft::InstallSnapshotRequest;
70+
use crate::raft::InstallSnapshotResponse;
71+
use crate::raft::InstallSnapshotTx;
6572
use crate::raft::RaftMsg;
66-
use crate::raft::RaftRespTx;
73+
use crate::raft::ResultSender;
6774
use crate::raft::VoteRequest;
6875
use crate::raft::VoteResponse;
6976
use crate::raft::VoteTx;
@@ -87,8 +94,10 @@ use crate::RaftNetworkFactory;
8794
use crate::RaftStorage;
8895
use crate::RaftTypeConfig;
8996
use crate::SnapshotId;
97+
use crate::SnapshotSegmentId;
9098
use crate::StorageError;
9199
use crate::StorageHelper;
100+
use crate::StorageIOError;
92101
use crate::Update;
93102
use crate::Vote;
94103

@@ -218,7 +227,7 @@ impl<C: RaftTypeConfig, N: RaftNetworkFactory<C>, S: RaftStorage<C>> RaftCore<C,
218227
#[tracing::instrument(level = "trace", skip(self, tx))]
219228
pub(super) async fn handle_check_is_leader_request(
220229
&mut self,
221-
tx: RaftRespTx<(), CheckIsLeaderError<C::NodeId, C::Node>>,
230+
tx: ResultSender<(), CheckIsLeaderError<C::NodeId, C::Node>>,
222231
) -> Result<(), StorageError<C::NodeId>> {
223232
// Setup sentinel values to track when we've received majority confirmation of leadership.
224233

@@ -370,7 +379,7 @@ impl<C: RaftTypeConfig, N: RaftNetworkFactory<C>, S: RaftStorage<C>> RaftCore<C,
370379
&mut self,
371380
changes: ChangeMembers<C::NodeId, C::Node>,
372381
retain: bool,
373-
tx: RaftRespTx<ClientWriteResponse<C>, ClientWriteError<C::NodeId, C::Node>>,
382+
tx: ResultSender<ClientWriteResponse<C>, ClientWriteError<C::NodeId, C::Node>>,
374383
) {
375384
let res = self.engine.state.membership_state.change_handler().apply(changes, retain);
376385
let new_membership = match res {
@@ -519,17 +528,111 @@ impl<C: RaftTypeConfig, N: RaftNetworkFactory<C>, S: RaftStorage<C>> RaftCore<C,
519528
pub(crate) fn handle_initialize(
520529
&mut self,
521530
member_nodes: BTreeMap<C::NodeId, C::Node>,
522-
tx: RaftRespTx<(), InitializeError<C::NodeId, C::Node>>,
531+
tx: ResultSender<(), InitializeError<C::NodeId, C::Node>>,
523532
) {
524533
let membership = Membership::from(member_nodes);
525534

526535
let entry = C::Entry::new_membership(LogId::default(), membership);
527536
let res = self.engine.initialize(entry);
528-
self.engine.output.push_command(Command::SendInitializeResult {
529-
send: SendResult::new(res, tx),
537+
self.engine.output.push_command(Command::Respond {
538+
resp: Respond::new(res, tx),
530539
});
531540
}
532541

542+
/// Invoked by leader to send chunks of a snapshot to a follower.
543+
///
544+
/// Leaders always send chunks in order. It is important to note that, according to the Raft
545+
/// spec, a log may only have one snapshot at any time. As snapshot contents are application
546+
/// specific, the Raft log will only store a pointer to the snapshot file along with the
547+
/// index & term.
548+
#[tracing::instrument(level = "debug", skip_all)]
549+
pub(crate) async fn handle_install_snapshot_request(
550+
&mut self,
551+
req: InstallSnapshotRequest<C>,
552+
tx: InstallSnapshotTx<C::NodeId>,
553+
) -> Result<(), StorageError<C::NodeId>> {
554+
// TODO: move receiving to another thread.
555+
tracing::debug!(req = display(req.summary()));
556+
557+
let snapshot_meta = req.meta.clone();
558+
let done = req.done;
559+
let offset = req.offset;
560+
561+
let req_id = SnapshotRequestId::new(*req.vote.leader_id(), snapshot_meta.snapshot_id.clone(), offset);
562+
563+
let res = self.engine.vote_handler().accept_vote(&req.vote, tx, |state, _rejected| {
564+
Ok(InstallSnapshotResponse {
565+
vote: *state.vote_ref(),
566+
})
567+
});
568+
569+
let tx = match res {
570+
Ok(tx) => tx,
571+
Err(_) => return Ok(()),
572+
};
573+
574+
let curr_id = self.snapshot_state.streaming.as_ref().map(|s| &s.snapshot_id);
575+
576+
// Changed to another stream. re-init snapshot state.
577+
if curr_id != Some(&req.meta.snapshot_id) {
578+
if req.offset > 0 {
579+
let mismatch = SnapshotMismatch {
580+
expect: SnapshotSegmentId {
581+
id: snapshot_meta.snapshot_id.clone(),
582+
offset: 0,
583+
},
584+
got: SnapshotSegmentId {
585+
id: snapshot_meta.snapshot_id.clone(),
586+
offset,
587+
},
588+
};
589+
590+
self.engine.output.push_command(Command::Respond {
591+
resp: Respond::new(Err(mismatch.into()), tx),
592+
});
593+
594+
return Ok(());
595+
}
596+
597+
let snapshot_data = self.storage.begin_receiving_snapshot().await?;
598+
self.snapshot_state.streaming = Some(Streaming::new(req.meta.snapshot_id.clone(), snapshot_data));
599+
}
600+
601+
tracing::info!("Received snapshot request: {:?}", req_id);
602+
603+
let streaming = self.snapshot_state.streaming.as_mut().unwrap();
604+
605+
// Receive the data.
606+
streaming.receive(req).await?;
607+
608+
if done {
609+
let streaming = self.snapshot_state.streaming.take().unwrap();
610+
let mut data = streaming.snapshot_data;
611+
612+
data.as_mut()
613+
.shutdown()
614+
.await
615+
.map_err(|e| StorageIOError::write_snapshot(snapshot_meta.signature(), &e))?;
616+
617+
self.received_snapshot.insert(snapshot_meta.snapshot_id.clone(), data);
618+
}
619+
620+
if done {
621+
self.engine.following_handler().install_snapshot(snapshot_meta);
622+
}
623+
624+
self.engine.output.push_command(Command::Respond {
625+
resp: Respond::new(
626+
Ok(InstallSnapshotResponse {
627+
vote: *self.engine.state.vote_ref(),
628+
}),
629+
tx,
630+
),
631+
});
632+
633+
Ok(())
634+
}
635+
533636
fn handle_building_snapshot_result(
534637
&mut self,
535638
result: SnapshotResult<C::NodeId, C::Node>,
@@ -619,7 +722,7 @@ impl<C: RaftTypeConfig, N: RaftNetworkFactory<C>, S: RaftStorage<C>> RaftCore<C,
619722

620723
/// Reject a request due to the Raft node being in a state which prohibits the request.
621724
#[tracing::instrument(level = "trace", skip(self, tx))]
622-
pub(crate) fn reject_with_forward_to_leader<T, E>(&self, tx: RaftRespTx<T, E>)
725+
pub(crate) fn reject_with_forward_to_leader<T, E>(&self, tx: ResultSender<T, E>)
623726
where E: From<ForwardToLeader<C::NodeId, C::Node>> {
624727
let mut leader_id = self.current_leader();
625728
let leader_node = self.get_leader_node(leader_id);
@@ -926,8 +1029,8 @@ impl<C: RaftTypeConfig, N: RaftNetworkFactory<C>, S: RaftStorage<C>> RaftCore<C,
9261029
tracing::debug!(req = display(req.summary()), func = func_name!());
9271030

9281031
let resp = self.engine.handle_vote_req(req);
929-
self.engine.output.push_command(Command::SendVoteResult {
930-
send: SendResult::new(Ok(resp), tx),
1032+
self.engine.output.push_command(Command::Respond {
1033+
resp: Respond::new(Ok(resp), tx),
9311034
});
9321035
}
9331036

@@ -1368,25 +1471,14 @@ impl<C: RaftTypeConfig, N: RaftNetworkFactory<C>, S: RaftStorage<C>> RaftRuntime
13681471
debug_assert!(got.is_some(), "there has to be a buffered snapshot data");
13691472
}
13701473
Command::InstallSnapshot { snapshot_meta } => {
1371-
let snapshot_data = self.received_snapshot.remove(&snapshot_meta.snapshot_id);
1474+
// Safe unwrap: it is guaranteed that the snapshot data is buffered. Otherwise it is a bug.
1475+
let data = self.received_snapshot.remove(&snapshot_meta.snapshot_id).unwrap();
13721476

1373-
if let Some(data) = snapshot_data {
1374-
self.storage.install_snapshot(&snapshot_meta, data).await?;
1375-
tracing::debug!("Done install_snapshot, meta: {:?}", snapshot_meta);
1376-
} else {
1377-
unreachable!("buffered snapshot not found: snapshot meta: {:?}", snapshot_meta)
1378-
}
1379-
}
1380-
Command::SendVoteResult { send } => {
1381-
send.send();
1382-
}
1383-
Command::SendAppendEntriesResult { send } => {
1384-
send.send();
1385-
}
1386-
Command::SendInstallSnapshotResult { send } => {
1387-
send.send();
1477+
tracing::info!("Start to install_snapshot, meta: {:?}", snapshot_meta);
1478+
self.storage.install_snapshot(&snapshot_meta, data).await?;
1479+
tracing::info!("Done install_snapshot, meta: {:?}", snapshot_meta);
13881480
}
1389-
Command::SendInitializeResult { send } => {
1481+
Command::Respond { resp: send } => {
13901482
send.send();
13911483
}
13921484
}

0 commit comments

Comments
 (0)