-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathraft.h
More file actions
1228 lines (973 loc) · 41.8 KB
/
raft.h
File metadata and controls
1228 lines (973 loc) · 41.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#ifndef raft_h
#define raft_h
#include <atomic>
#include <mutex>
#include <chrono>
#include <thread>
#include <ctime>
#include <algorithm>
#include <thread>
#include <stdarg.h>
#include <numeric>
#include "rpc.h"
#include "raft_storage.h"
#include "raft_protocol.h"
#include "raft_state_machine.h"
template<typename state_machine, typename command>
class raft {
static_assert(std::is_base_of<raft_state_machine, state_machine>(),
"state_machine must inherit from raft_state_machine");
static_assert(std::is_base_of<raft_command, command>(),
"command must inherit from raft_command");
friend class thread_pool;
//#define LOG_TO_FILE
#define RAFT_LOG(fmt, args...) \
do { \
} while (0);
//#define RAFT_LOG(fmt, args...) \
// do { \
// auto now = \
// std::chrono::duration_cast<std::chrono::milliseconds>( \
// std::chrono::system_clock::now().time_since_epoch()) \
// .count(); \
// printf("[%ld][%s:%d:%s][node %d term %d] " fmt "\n", now, __FILE__, __LINE__, __FUNCTION__ ,my_id, current_term, ##args); \
// } while (0);
#define RAFT_ERR(fmt, args...) \
do { \
auto now = \
std::chrono::duration_cast<std::chrono::milliseconds>( \
std::chrono::system_clock::now().time_since_epoch()) \
.count(); \
printf("[ERROR] [%ld][%s:%d:%s][node %d term %d] " fmt "\n", now, __FILE__, __LINE__, __FUNCTION__ ,my_id, current_term, ##args); \
} while (0);
#define PRINT_ALL_LOG(args...) \
do { \
} while (0);
//#define PRINT_ALL_LOG(logs) \
// do { \
// RAFT_LOG("print log entries") \
// for(auto log : logs) { \
// RAFT_LOG("index %d, term %d", log.index_, log.term_); \
// } \
// } while (0);
public:
raft(
rpcs *rpc_server,
std::vector<rpcc *> rpc_clients,
int idx,
raft_storage<command> *storage,
state_machine *state);
~raft();
// start the raft node.
// Please make sure all of the rpc request handlers have been registered before this method.
void start();
// stop the raft node.
// Please make sure all of the background threads are joined in this method.
// Notice: you should check whether is server should be stopped by calling is_stopped().
// Once it returns true, you should break all of your long-running loops in the background threads.
void stop();
// send a new command to the raft nodes.
// This method returns true if this raft node is the leader that successfully appends the log.
// If this node is not the leader, returns false.
bool new_command(command cmd, int &term, int &index);
// returns whether this node is the leader, you should also set the current term;
bool is_leader(int &term);
// save a snapshot of the state machine and compact the log.
bool save_snapshot();
private:
std::mutex mtx; // A big lock to protect the whole data structure
ThrPool *thread_pool;
raft_storage<command> *storage; // To persist the raft log
state_machine *state; // The state machine that applies the raft log, e.g. a kv store
rpcs *rpc_server; // RPC server to recieve and handle the RPC requests
std::vector<rpcc *> rpc_clients; // RPC clients of all raft nodes including this node
int my_id; // The index of this node in rpc_clients, start from 0
std::atomic_bool stopped;
enum raft_role {
follower,
candidate,
leader
};
raft_role role;
// Current terms are exchanged whenever servers communicate;
// if one server’s current term is smaller than the other’s,
// then it updates its current term to the larger value.
// If a candidate or leader discovers that its term is out of date,
// it immediately reverts to follower state.
// latest term server has seen
// (initialized to 0 on first boot, increases monotonically)
int current_term;
int leader_id;
std::thread *background_election;
std::thread *background_ping;
std::thread *background_commit;
std::thread *background_apply;
// Your code here:
// time related const in milliseconds
const int heartbeat_time_interval = 100;
const int commit_time_interval = 100;
const int apply_time_interval = 10;
const int snapshot_interval = 300;
const int sleep_time = 10;
const int follower_election_timeout_lower = 700;
const int follower_election_timeout_upper = 1300;
const int candidate_election_timeout_lower = 1000;
const int candidate_election_timeout_upper = 1500;
std::chrono::milliseconds follower_election_timeout;
std::chrono::milliseconds candidate_election_timeout;
// time related
std::chrono::system_clock::time_point last_election_start_time;
std::chrono::system_clock::time_point last_received_RPC_time;
std::chrono::system_clock::time_point last_snapshot_sent_time;
/* ----Persistent state on all server---- */
// updated on stable storage before responding to RPCs
// current_term (has defined before)
// candidateId that received vote in current term (or -1 if none)
int voted_for;
// log entries; each entry contains command for state machine,
// and term when entry was received by leader (first index is 1)
std::vector<log_entry<command> > log;
/* ---- Volatile state on all server---- */
// index of highest log entry known to be committed (initialized to 0)
int commit_index;
// index of highest log entry applied to state machine (initialized to 0, increases monotonically)
int last_applied;
/* ---- Volatile state on leader---- */
// reinitialized after election
// for each server, index of the next log entry to send to that server
// (initialized to leader last log index + 1)
std::vector<int> next_index;
// for each server, index of highest log entry known to be replicated on server
// (initialized to 0, increases monotonically)
std::vector<int> match_index;
/* ---- Volatile state on candidate---- */
// reinitialized before election
// store the votes of followers
// use std::vector<int> instead of std::vector<bool> since latter
// may cause problem due to std::vector<bool> own optimization
// elements are only 0 and 1
std::vector<int> votes_get;
// snapshot related
// initialized to false
// modify to true when outer calls save_snapshot
bool use_snapshot;
// the index of the last entry in the log that the snapshot replaces
// (the last entry the state machine had applied)
int last_included_index;
// the term of this entry
int last_included_term;
// data of last snapshot, will be sent to followers
std::vector<char> snapshot_data;
private:
// RPC handlers
int request_vote(request_vote_args arg, request_vote_reply &reply);
int append_entries(append_entries_args<command> arg, append_entries_reply &reply);
int install_snapshot(install_snapshot_args arg, install_snapshot_reply &reply);
// RPC helpers
void send_request_vote(int target, request_vote_args arg);
void handle_request_vote_reply(int target, const request_vote_args &arg, const request_vote_reply &reply);
void send_append_entries(int target, append_entries_args<command> arg);
void handle_append_entries_reply(int target,
const append_entries_args<command> &arg,
const append_entries_reply &reply);
void send_install_snapshot(int target, install_snapshot_args arg);
void handle_install_snapshot_reply(int target, const install_snapshot_args &arg, const install_snapshot_reply &reply);
private:
bool is_stopped();
int num_nodes() {
return rpc_clients.size();
}
// background workers
void run_background_ping();
void run_background_election();
void run_background_commit();
void run_background_apply();
// Your code here:
// two concepts for the log index:
// physical index (e.g. the index of the std::vector)
// and logical index (e.g. physical index + snapshot index)
// convert logical_index to physical index
int to_physical_index(int logical_index) {
int physical_index = logical_index - last_included_index - 1;
assert(physical_index >= 0 && physical_index < log.size());
return physical_index;
}
// convert physical index to logical_index
int to_logical_index(int physical_index) {
return physical_index + last_included_index + 1;
}
int get_last_log_index() {
if (log.empty()) {
return last_included_index;
} else {
return to_logical_index(log.size() - 1);
}
}
int get_last_log_term() {
if (log.empty()) {
return last_included_term;
} else {
return log[log.size() - 1].term_;
}
}
log_entry<command> get_log_by_logical_index(int logical_index) {
int physical_index = logical_index - last_included_index - 1;
if (physical_index >= 0 && physical_index < log.size()) {
return log[physical_index];
} else {
if (physical_index == -1) {
return log_entry<command>(last_included_term, last_included_index);
} else {
RAFT_ERR("physical_index %d, current log size %d, logical_index %d", physical_index, log.size(), logical_index);
assert(0);
}
}
}
};
template<typename state_machine, typename command>
raft<state_machine, command>::raft(rpcs *server,
std::vector<rpcc *> clients,
int idx,
raft_storage<command> *storage,
state_machine *state) :
stopped(false),
rpc_server(server),
rpc_clients(clients),
my_id(idx),
storage(storage),
state(state),
background_election(nullptr),
background_ping(nullptr),
background_commit(nullptr),
background_apply(nullptr),
current_term(0),
role(follower) {
thread_pool = new ThrPool(32);
// Register the rpcs.
rpc_server->reg(raft_rpc_opcodes::op_request_vote, this, &raft::request_vote);
rpc_server->reg(raft_rpc_opcodes::op_append_entries, this, &raft::append_entries);
rpc_server->reg(raft_rpc_opcodes::op_install_snapshot, this, &raft::install_snapshot);
// Your code here:
// Do the initialization
voted_for = -1;
commit_index = 0;
last_applied = 0;
last_included_index = -1;
last_included_term = -1;
log = std::vector<log_entry<command> >();
next_index = std::vector<int>(num_nodes(), 1);
match_index = std::vector<int>(num_nodes(), 0);
votes_get = std::vector<int>(num_nodes(), false);
last_election_start_time = std::chrono::system_clock::now();
last_received_RPC_time = std::chrono::system_clock::now();
last_snapshot_sent_time = std::chrono::system_clock::now();
use_snapshot = false;
candidate_election_timeout =
std::chrono::milliseconds(rand() % (candidate_election_timeout_upper - candidate_election_timeout_lower)
+ candidate_election_timeout_lower);
follower_election_timeout =
std::chrono::milliseconds(rand() % (follower_election_timeout_upper - follower_election_timeout_lower)
+ follower_election_timeout_lower);
#ifdef LOG_TO_FILE
freopen("raft.log", "w", stdout);
#endif
}
template<typename state_machine, typename command>
raft<state_machine, command>::~raft() {
if (background_ping) {
delete background_ping;
}
if (background_election) {
delete background_election;
}
if (background_commit) {
delete background_commit;
}
if (background_apply) {
delete background_apply;
}
delete thread_pool;
}
/******************************************************************
Public Interfaces
*******************************************************************/
template<typename state_machine, typename command>
void raft<state_machine, command>::stop() {
stopped.store(true);
background_ping->join();
background_election->join();
background_commit->join();
background_apply->join();
thread_pool->destroy();
}
template<typename state_machine, typename command>
bool raft<state_machine, command>::is_stopped() {
return stopped.load();
}
template<typename state_machine, typename command>
bool raft<state_machine, command>::is_leader(int &term) {
term = current_term;
return role == leader;
}
template<typename state_machine, typename command>
void raft<state_machine, command>::start() {
// Lab3: Your code here
RAFT_LOG("start");
// the first log index is 1 instead of 0
// To simplify the programming, you can append an empty log entry
// to the logs at the very beginning. And since the 'lastApplied'
// index starts from 0, the first empty log entry will never be
// applied to the state machine.
command cmd;
// append a null log as the first log
log.push_back(log_entry<command>(0, 0, cmd));
// restore metadata and log
storage->restore_metadata(current_term, voted_for);
storage->restore_log(log);
// restore snapshot
std::vector<char> data;
storage->restore_snapshot(last_included_index, last_included_term, data);
if (!data.empty()) {
state->apply_snapshot(data);
commit_index = last_included_index;
last_applied = last_included_index;
}
// create 4 background threads
this->background_election = new std::thread(&raft::run_background_election, this);
this->background_ping = new std::thread(&raft::run_background_ping, this);
this->background_commit = new std::thread(&raft::run_background_commit, this);
this->background_apply = new std::thread(&raft::run_background_apply, this);
}
template<typename state_machine, typename command>
bool raft<state_machine, command>::new_command(command cmd, int &term, int &index) {
// Lab3: Your code here
// when the user calls raft::new_command to append a new command to the leader's log,
// the leader should return the new_command function immediately
// And the log should be replicated to the follower asynchronously in another background thread
std::unique_lock<std::mutex> lock(mtx);
if (is_leader(term)) {
RAFT_LOG("new command");
index = to_logical_index(log.size());
log_entry<command> new_log(term, index, cmd);
// persist log
storage->persist_log(new_log);
log.push_back(new_log);
next_index[my_id] = index + 1;
match_index[my_id] = index;
return true;
} else {
term = current_term;
return false;
}
}
template<typename state_machine, typename command>
bool raft<state_machine, command>::save_snapshot() {
// Lab3: Your code here
std::unique_lock<std::mutex> lock(mtx);
use_snapshot = true;
RAFT_LOG("save_snapshot START");
if (last_applied == 0) return false;
RAFT_LOG("get_log_by_logical_index");
auto log_last_applied = get_log_by_logical_index(last_applied);
// do snapshot
std::vector<char> snapshot_data_ = state->snapshot();
snapshot_data.swap(snapshot_data_);
// clear log before last_applied (including last_applied)
log.erase(log.begin(), log.begin() + to_physical_index(last_applied) + 1);
// update last_included
last_included_index = log_last_applied.index_;
last_included_term = log_last_applied.term_;
// persist log
storage->persist_logs(log);
// persist snapshot
storage->persist_snapshot(last_included_index, last_included_term, snapshot_data);
RAFT_LOG("save_snapshot SAVED, state machine length %d", state->store.size());
if (is_leader(current_term)) {
// immediately send InstallSnapshot rpc
last_snapshot_sent_time = std::chrono::system_clock::now();
for (int id = 0; id < num_nodes(); ++id) {
if (id == my_id) continue;
install_snapshot_args arg(current_term, my_id, last_included_index, last_included_term, snapshot_data);
thread_pool->addObjJob(this, &raft::send_install_snapshot, id, arg);
}
RAFT_LOG("install_snapshot rpc sent");
}
return true;
}
/******************************************************************
RPC Related
*******************************************************************/
// To implement an asynchronous RPC call,
// use thread pool to handle asynchronous events
// thread_pool->addObjJob(this, &raft::your_method, arg1, arg2);
template<typename state_machine, typename command>
int raft<state_machine, command>::request_vote(request_vote_args args, request_vote_reply &reply) {
// Lab3: Your code here
std::unique_lock<std::mutex> lock(mtx);
// should not update here
// since rpc sender is not leader yet
// last_received_RPC_time = std::chrono::system_clock::now();
RAFT_LOG("request_vote start");
if (args.term_ < current_term) {
// reply false if term < currentTerm
RAFT_LOG("term < currentTerm, reply vote_granted_ FALSE");
reply.term_ = current_term;
reply.vote_granted_ = false;
} else {
// args.term_ >= current_term
if (args.term_ > current_term) {
// If term > currentTerm, currentTerm ← term
current_term = args.term_;
// step down if leader or candidate
role = follower;
voted_for = -1;
// persist metadata
storage->persist_metadata(current_term, voted_for);
RAFT_LOG("args.term_ > current_term, reverts to FOLLOWER");
}
// args.term_ == current_term
// If term == currentTerm, votedFor is null or candidateId,
// and candidate's log is at least as complete as local log,
if (voted_for == -1 || voted_for == args.candidate_id_) {
// voting server denies vote if its log is more complete
if ((args.last_log_term_ < get_last_log_term())
|| ((args.last_log_term_ == get_last_log_term()) && (args.last_log_index_ < get_last_log_index()))) {
RAFT_LOG("term == currentTerm, reply vote_granted_ FALSE");
// do not grant vote
reply.term_ = current_term;
reply.vote_granted_ = false;
} else {
// grant vote
reply.term_ = current_term;
reply.vote_granted_ = true;
RAFT_LOG("term == currentTerm, reply vote_granted_ TRUE");
// modify voted for
voted_for = args.candidate_id_;
// reset election timeout
// persist metadata
storage->persist_metadata(current_term, voted_for);
}
} else {
RAFT_LOG("term == currentTerm, reply vote_granted_ FALSE");
// do not grant vote
reply.term_ = current_term;
reply.vote_granted_ = false;
}
}
return 0;
}
template<typename state_machine, typename command>
void raft<state_machine, command>::handle_request_vote_reply(int target,
const request_vote_args &arg,
const request_vote_reply &reply) {
// Lab3: Your code here
std::unique_lock<std::mutex> lock(mtx);
RAFT_LOG("handle_request_vote_reply start");
// if one server’s current term is smaller than the other’s,
// then it updates its current term to the larger value.
// If a candidate or leader discovers that its term is out of date,
// it immediately reverts to follower state.
if (reply.term_ > current_term) {
current_term = reply.term_;
role = follower;
voted_for = -1;
// persist metadata
storage->persist_metadata(current_term, voted_for);
RAFT_LOG("reply.term_ > current_term, reverts to FOLLOWER");
} else {
// if reply.vote_granted_ == true and still in candidate role
if (reply.vote_granted_ && role == candidate) {
votes_get[target] = reply.vote_granted_;
// check votes num
int votes_num = std::accumulate(votes_get.begin(), votes_get.end(), 0);
RAFT_LOG("votes num get %d", votes_num);
if (votes_num > (int) num_nodes() / 2) {
// for each server, index of the next log entry to send to that server
// initialized to leader last log index + 1 (log.size())
next_index.assign(num_nodes(), to_logical_index(log.size()));
// for each server, index of highest log entry known to be replicated on server
// (initialized to 0, increases monotonically)
match_index.assign(num_nodes(), 0);
RAFT_LOG("become LEADER");
// change role to leader
role = leader;
// immediately send heartbeat
if (is_leader(current_term)) {
for (int id = 0; id < num_nodes(); ++id) {
if (id == my_id) continue;
RAFT_LOG("get_log_by_logical_index, id %d", id);
log_entry<command> prev_log = get_log_by_logical_index(next_index[id] - 1);
append_entries_args<command> arg(current_term, my_id, prev_log.index_, prev_log.term_, commit_index, true);
thread_pool->addObjJob(this, &raft::send_append_entries, id, arg);
}
RAFT_LOG("heartbeat sent");
}
// // append empty log of current term
// command cmd;
// log_entry<command> empty_log(current_term, to_logical_index(log.size()), cmd);
// // persist log
// storage->persist_log(empty_log);
// log.push_back(empty_log);
}
}
}
return;
}
template<typename state_machine, typename command>
int raft<state_machine, command>::append_entries(append_entries_args<command> arg, append_entries_reply &reply) {
// Lab3: Your code here
std::unique_lock<std::mutex> lock(mtx);
RAFT_LOG("append_entries start");
last_received_RPC_time = std::chrono::system_clock::now();
if (arg.heartbeat_) {
// heartbeat
RAFT_LOG("heartbeat received");
if (arg.term_ < current_term) {
// reply false if term < current_term
reply.success_ = false;
reply.term_ = current_term;
} else {
// arg.term >= current_term
role = follower;
if (arg.term_ > current_term) voted_for = -1;
current_term = arg.term_;
RAFT_LOG("leader_commit %d, commit_index %d", arg.leader_commit_, commit_index);
// If leaderCommit > commitIndex, set commitIndex =
// min(leaderCommit, index of last new entry)
if (arg.leader_commit_ > commit_index && arg.prev_log_term_ == get_last_log_term()
&& arg.prev_log_index_ == get_last_log_index() && to_logical_index(log.size()) > arg.leader_commit_) {
commit_index = std::min(arg.leader_commit_, to_logical_index(log.size()) - 1);
}
reply.term_ = current_term;
reply.success_ = true;
reply.match_index_ = commit_index;
// persist metadata
storage->persist_metadata(current_term, voted_for);
}
} else {
// append_entries
if (arg.term_ > current_term) {
current_term = arg.term_;
role = follower;
voted_for = -1;
// persist metadata
storage->persist_metadata(current_term, voted_for);
}
if (arg.term_ < current_term) {
// reply false if term < current_term
reply.success_ = false;
reply.term_ = current_term;
RAFT_LOG("term < current_term, append_entries FAILED");
} else if (to_logical_index(log.size()) <= arg.prev_log_index_
|| get_log_by_logical_index(arg.prev_log_index_).term_ != arg.prev_log_term_) {
// reply false if log doesn't contain an entry at
// prev_log_index whose term matches prev_log_term
reply.success_ = false;
reply.term_ = current_term;
RAFT_LOG("arg.prev_log_term_ %d MISMATCH log.prev_log_term_ at index %d, append_entries FAILED",
arg.prev_log_term_, arg.prev_log_index_);
} else {
if (arg.prev_log_index_ <= last_included_index) {
// already in snapshot
RAFT_LOG("arg.prev_log_index_ <= last_included_index, prev_log_index_ in SNAPSHOT");
auto itr = arg.entries_.begin();
for (; itr != arg.entries_.end(); ++itr) {
// itr points to log index of last_included_index + 1
if ((*itr).index_ == last_included_index + 1) break;
}
RAFT_LOG("log size before %d", log.size());
// append itr (included) to entries.end to log
log.clear();
log.insert(log.end(), itr, arg.entries_.end());
RAFT_LOG("log size after %d", log.size());
// persist log
storage->persist_logs(log);
// // If leaderCommit > commitIndex, set commitIndex =
// // min(leaderCommit, index of last new entry)
// if (arg.leader_commit_ > commit_index) {
// commit_index = std::min(arg.leader_commit_, to_logical_index((int) log.size() - 1));
// }
reply.term_ = current_term;
reply.success_ = true;
reply.match_index_ = std::max(get_last_log_index(), 0);
RAFT_LOG("reply.match_index_: %d", reply.match_index_);
RAFT_LOG("append_entries SUCCESS");
} else {
// if an existing entry conflicts with a new one
// (same index but different terms), delete the
// existing entry and all that follow it
auto start = log.begin() + to_physical_index(arg.prev_log_index_) + 1;
auto end = log.end();
RAFT_LOG("append_entries::log size before erase: %d", log.size());
PRINT_ALL_LOG(log);
// delete [arg.prev_log_index_+1, end]
log.erase(start, end);
RAFT_LOG("append_entries::log size after erase: %d", log.size());
PRINT_ALL_LOG(log);
RAFT_LOG("append_entries::arg.entries_ size: %d", arg.entries_.size());
PRINT_ALL_LOG(arg.entries_);
// persist logs
storage->persist_logs(arg.entries_);
// Append any new entries not already in the log
log.insert(log.end(), arg.entries_.begin(), arg.entries_.end());
RAFT_LOG("append_entries::log size after insert: %d", log.size());
PRINT_ALL_LOG(log);
// If leaderCommit > commitIndex, set commitIndex =
// min(leaderCommit, index of last new entry)
if (arg.leader_commit_ > commit_index) {
commit_index = std::min(arg.leader_commit_, to_logical_index((int) log.size() - 1));
}
reply.success_ = true;
reply.term_ = current_term;
reply.match_index_ = std::max(get_last_log_index(), 0);
RAFT_LOG("reply.match_index_: %d", reply.match_index_);
RAFT_LOG("append_entries SUCCESS");
}
}
}
return 0;
}
template<typename state_machine, typename command>
void raft<state_machine, command>::handle_append_entries_reply(int node,
const append_entries_args<command> &arg,
const append_entries_reply &reply) {
// Lab3: Your code here
std::unique_lock<std::mutex> lock(mtx);
RAFT_LOG("handle_append_entries_reply start, reply from node %d", node);
if (reply.term_ > current_term) {
current_term = reply.term_;
role = follower;
voted_for = -1;
// persist metadata
storage->persist_metadata(current_term, voted_for);
RAFT_LOG("reply.term_ > current_term, reverts to FOLLOWER");
} else {
if (arg.heartbeat_) {}
else {
if (reply.success_) {
// append_entries success
RAFT_LOG("handle_append_entries_reply from node %d: handle SUCCESS, reply.match_index_ %d",
node,
reply.match_index_);
// update index of highest log entry known to be replicated on server
// match_index[node] = std::max(match_index[node], arg.prev_log_index_ + (int) arg.entries_.size());
match_index[node] = std::max(reply.match_index_, match_index[node]);
RAFT_LOG("update node %d, match_index[node] = %d", node, match_index[node]);
// update index of the next log entry to send to that server
next_index[node] = match_index[node] + 1;
// update commit_index
std::vector<int> temp = match_index;
std::sort(temp.begin(), temp.end());
// elements in temp are increasing
// index before middle must be replicated on majority servers
int new_commit_index = temp[(temp.size() - 1) / 2];
if (new_commit_index > commit_index) {
commit_index = new_commit_index;
}
RAFT_LOG("new_commit_index %d, commit_index %d", new_commit_index, commit_index);
// TODO
} else {
// append_entries failed
RAFT_LOG("handle_append_entries_reply from node %d: handle FAILURE", node);
// update next_index to smaller
// int old_next_index = next_index[node];
// next_index[node] = std::max(old_next_index - 1, 1);
// trick: directly update next_index to 1
next_index[node] = 1;
// next_index[node] = std::max(1, last_included_index + 1);
}
}
}
return;
}
template<typename state_machine, typename command>
int raft<state_machine, command>::install_snapshot(install_snapshot_args args, install_snapshot_reply &reply) {
// Lab3: Your code here
std::unique_lock<std::mutex> lock(mtx);
RAFT_LOG("install_snapshot start");
last_received_RPC_time = std::chrono::system_clock::now();
if (args.term_ < current_term) {
// reply immediately if term < current_term
reply.term_ = current_term;
RAFT_LOG("term < current_term, install_snapshot FAILED");
} else {
// args.term_ >= current_term
if (args.term_ > current_term) {
current_term = args.term_;
role = follower;
voted_for = -1;
// persist metadata
storage->persist_metadata(current_term, voted_for);
}
// erase log before last_included_index (including last_included_index)
auto itr = log.begin();
for (; itr != log.end(); ++itr) {
if ((*itr).index_ > args.last_included_index_) {
break;
}
}
// if existing log entry has same index and term as snapshot's last included entry,
// retain log entries following it and reply
// discard the entire log if snapshot contain new information
// not already in the recipient’s log
// If instead, the follower receives a snapshot that describes a prefix of its log
// (due to retransmission or by mistake), then log entries covered by the snapshot
// are deleted but entries following the snapshot are still valid and must be retained.
log.erase(log.begin(), itr);
// persist logs
storage->persist_logs(log);
last_included_index = args.last_included_index_;
last_included_term = args.last_included_term_;
commit_index = last_included_index;
last_applied = last_included_index;
// reset state machine using snapshot contents
// (and load snapshot's cluster configuration)
state->apply_snapshot(args.data_);
storage->persist_snapshot(last_included_index, last_included_term, args.data_);
// reply
reply.term_ = current_term;
}
RAFT_LOG("install_snapshot finish");
return 0;
}
template<typename state_machine, typename command>
void raft<state_machine, command>::handle_install_snapshot_reply(int node,
const install_snapshot_args &arg,
const install_snapshot_reply &reply) {
// Lab3: Your code here
std::unique_lock<std::mutex> lock(mtx);
RAFT_LOG("handle_install_snapshot_reply start");
if (reply.term_ > current_term) {
current_term = reply.term_;
role = follower;
voted_for = -1;
// persist metadata
storage->persist_metadata(current_term, voted_for);
RAFT_LOG("reply.term_ > current_term, reverts to FOLLOWER");
} else {
next_index[node] = arg.last_included_index_ + 1;
match_index[node] = arg.last_included_index_;
}
return;
}
template<typename state_machine, typename command>
void raft<state_machine, command>::send_request_vote(int target, request_vote_args arg) {
request_vote_reply reply;
if (rpc_clients[target]->call(raft_rpc_opcodes::op_request_vote, arg, reply) == 0) {
handle_request_vote_reply(target, arg, reply);
} else {
// RPC fails
}
}
template<typename state_machine, typename command>
void raft<state_machine, command>::send_append_entries(int target, append_entries_args<command> arg) {
append_entries_reply reply;
if (rpc_clients[target]->call(raft_rpc_opcodes::op_append_entries, arg, reply) == 0) {
handle_append_entries_reply(target, arg, reply);
} else {
// RPC fails
}
}
template<typename state_machine, typename command>
void raft<state_machine, command>::send_install_snapshot(int target, install_snapshot_args arg) {
install_snapshot_reply reply;
if (rpc_clients[target]->call(raft_rpc_opcodes::op_install_snapshot, arg, reply) == 0) {
handle_install_snapshot_reply(target, arg, reply);
} else {
// RPC fails
}
}
/******************************************************************
Background Workers
*******************************************************************/