@@ -652,16 +652,22 @@ EndListener.prototype.onReceiveMessage = function(){};
652
652
EndListener . prototype . onReceiveStatus = function ( ) { } ;
653
653
EndListener . prototype . recvMessageWithContext = function ( ) { } ;
654
654
655
+ var OP_DEPENDENCIES = {
656
+ [ grpc . opType . SEND_MESSAGE ] : [ grpc . opType . SEND_INITIAL_METADATA ] ,
657
+ [ grpc . opType . SEND_CLOSE_FROM_CLIENT ] : [ grpc . opType . SEND_MESSAGE ] ,
658
+ [ grpc . opType . RECV_MESSAGE ] : [ grpc . opType . SEND_INITIAL_METADATA ]
659
+ } ;
660
+
655
661
/**
656
662
* Produces a callback triggered by streaming response messages.
657
663
* @private
658
664
* @param {EventEmitter } emitter
659
665
* @param {grpc.internal~Call } call
660
- * @param {grpc~Listener } listener
666
+ * @param {function } get_listener Returns a grpc~Listener.
661
667
* @param {grpc~deserialize } deserialize
662
668
* @return {Function }
663
669
*/
664
- function _getStreamReadCallback ( emitter , call , listener , deserialize ) {
670
+ function _getStreamReadCallback ( emitter , call , get_listener , deserialize ) {
665
671
return function ( err , response ) {
666
672
if ( err ) {
667
673
// Something has gone wrong. Stop reading and wait for status
@@ -684,6 +690,7 @@ function _getStreamReadCallback(emitter, call, listener, deserialize) {
684
690
emitter . _readsDone ( ) ;
685
691
return ;
686
692
}
693
+ var listener = get_listener ( ) ;
687
694
var context = {
688
695
call : call ,
689
696
listener : listener
@@ -692,6 +699,66 @@ function _getStreamReadCallback(emitter, call, listener, deserialize) {
692
699
} ;
693
700
}
694
701
702
+ /**
703
+ * Tests whether a batch can be started.
704
+ * @private
705
+ * @param {number[] } batch_ops The operations in the batch we are checking.
706
+ * @param {number[] } completed_ops Previously completed operations.
707
+ * @return {boolean }
708
+ */
709
+ function _areBatchRequirementsMet ( batch_ops , completed_ops ) {
710
+ var dependencies = _ . flatMap ( batch_ops , function ( op ) {
711
+ return OP_DEPENDENCIES [ op ] || [ ] ;
712
+ } ) ;
713
+ var dependencies_met = _ . intersection ( dependencies ,
714
+ batch_ops . concat ( completed_ops ) ) ;
715
+ return _ . isEqual ( dependencies_met . sort ( ) , dependencies . sort ( ) ) ;
716
+ }
717
+
718
+ /**
719
+ * Enforces the order of operations for synchronous requests. If a batch's
720
+ * operations cannot be started because required operations have not started
721
+ * yet, the batch is deferred until requirements are met.
722
+ * @private
723
+ * @param {grpc.Client~Call } call
724
+ * @param {object } batch
725
+ * @param {object } batch_state
726
+ * @param {number[] } [batch_state.completed_ops] The ops already sent.
727
+ * @param {object } [batch_state.deferred_batches] Batches to be sent after
728
+ * their dependencies are fulfilled.
729
+ * @param {function } callback
730
+ * @return {object }
731
+ */
732
+ function _startBatchIfReady ( call , batch , batch_state , callback ) {
733
+ var completed_ops = batch_state . completed_ops ;
734
+ var deferred_batches = batch_state . deferred_batches ;
735
+ var batch_ops = _ . map ( _ . keys ( batch ) , Number ) ;
736
+ if ( _areBatchRequirementsMet ( batch_ops , completed_ops ) ) {
737
+ // Dependencies are met, start the batch and any deferred batches whose
738
+ // dependencies are met as a result.
739
+ call . startBatch ( batch , callback ) ;
740
+ completed_ops = _ . union ( completed_ops , batch_ops ) ;
741
+ deferred_batches = _ . flatMap ( deferred_batches , function ( deferred_batch ) {
742
+ var deferred_batch_ops = _ . map ( _ . keys ( deferred_batch ) , Number ) ;
743
+ if ( _areBatchRequirementsMet ( deferred_batch_ops , completed_ops ) ) {
744
+ call . startBatch ( deferred_batch . batch , deferred_batch . callback ) ;
745
+ return [ ] ;
746
+ }
747
+ return [ deferred_batch ] ;
748
+ } ) ;
749
+ } else {
750
+ // Dependencies are not met, defer the batch
751
+ deferred_batches = deferred_batches . concat ( {
752
+ batch : batch ,
753
+ callback : callback
754
+ } ) ;
755
+ }
756
+ return {
757
+ completed_ops : completed_ops ,
758
+ deferred_batches : deferred_batches
759
+ } ;
760
+ }
761
+
695
762
/**
696
763
* Produces an interceptor which will start gRPC batches for unary calls.
697
764
* @private
@@ -708,19 +775,25 @@ function _getUnaryInterceptor(method_definition, channel, emitter, callback) {
708
775
var call = common . getCall ( channel , method_definition . path , options ) ;
709
776
var first_listener ;
710
777
var final_requester = { } ;
778
+ var batch_state = {
779
+ completed_ops : [ ] ,
780
+ deferred_batches : [ ]
781
+ } ;
711
782
final_requester . start = function ( metadata , listener ) {
712
783
var batch = {
713
784
[ grpc . opType . SEND_INITIAL_METADATA ] :
714
785
metadata . _getCoreRepresentation ( ) ,
715
786
} ;
716
787
first_listener = listener ;
717
- call . startBatch ( batch , function ( ) { } ) ;
788
+ batch_state = _startBatchIfReady ( call , batch , batch_state ,
789
+ function ( ) { } ) ;
718
790
} ;
719
791
final_requester . sendMessage = function ( message ) {
720
792
var batch = {
721
793
[ grpc . opType . SEND_MESSAGE ] : serialize ( message ) ,
722
794
} ;
723
- call . startBatch ( batch , function ( ) { } ) ;
795
+ batch_state = _startBatchIfReady ( call , batch , batch_state ,
796
+ function ( ) { } ) ;
724
797
} ;
725
798
final_requester . halfClose = function ( ) {
726
799
var batch = {
@@ -729,7 +802,7 @@ function _getUnaryInterceptor(method_definition, channel, emitter, callback) {
729
802
[ grpc . opType . RECV_MESSAGE ] : true ,
730
803
[ grpc . opType . RECV_STATUS_ON_CLIENT ] : true
731
804
} ;
732
- call . startBatch ( batch , function ( err , response ) {
805
+ var callback = function ( err , response ) {
733
806
response . status . metadata = Metadata . _fromCoreRepresentation (
734
807
response . status . metadata ) ;
735
808
var status = response . status ;
@@ -757,7 +830,8 @@ function _getUnaryInterceptor(method_definition, channel, emitter, callback) {
757
830
first_listener . onReceiveMetadata ( response . metadata ) ;
758
831
first_listener . onReceiveMessage ( deserialized ) ;
759
832
first_listener . onReceiveStatus ( status ) ;
760
- } ) ;
833
+ } ;
834
+ batch_state = _startBatchIfReady ( call , batch , batch_state , callback ) ;
761
835
} ;
762
836
final_requester . cancel = function ( ) {
763
837
call . cancel ( ) ;
@@ -895,25 +969,34 @@ function _getServerStreamingInterceptor(method_definition, channel, emitter) {
895
969
method_definition . responseDeserialize ) ;
896
970
var serialize = method_definition . requestSerialize ;
897
971
return function ( options ) {
898
- var first_listener ;
972
+ var batch_state = {
973
+ completed_ops : [ ] ,
974
+ deferred_batches : [ ]
975
+ } ;
899
976
var call = common . getCall ( channel , method_definition . path , options ) ;
900
977
var final_requester = { } ;
978
+ var first_listener ;
979
+ var get_listener = function ( ) {
980
+ return first_listener ;
981
+ } ;
901
982
final_requester . start = function ( metadata , listener ) {
902
983
first_listener = listener ;
903
984
metadata = metadata . clone ( ) ;
904
985
var metadata_batch = {
905
986
[ grpc . opType . SEND_INITIAL_METADATA ] : metadata . _getCoreRepresentation ( ) ,
906
- [ grpc . opType . RECV_INITIAL_METADATA ] : true ,
987
+ [ grpc . opType . RECV_INITIAL_METADATA ] : true
907
988
} ;
908
- call . startBatch ( metadata_batch , function ( err , response ) {
989
+ var callback = function ( err , response ) {
909
990
if ( err ) {
910
991
// The call has stopped for some reason. A non-OK status will arrive
911
992
// in the other batch.
912
993
return ;
913
994
}
914
995
first_listener . onReceiveMetadata (
915
996
Metadata . _fromCoreRepresentation ( response . metadata ) ) ;
916
- } ) ;
997
+ } ;
998
+ batch_state = _startBatchIfReady ( call , metadata_batch , batch_state ,
999
+ callback ) ;
917
1000
var status_batch = {
918
1001
[ grpc . opType . RECV_STATUS_ON_CLIENT ] : true
919
1002
} ;
@@ -935,26 +1018,28 @@ function _getServerStreamingInterceptor(method_definition, channel, emitter) {
935
1018
var send_batch = {
936
1019
[ grpc . opType . SEND_MESSAGE ] : message
937
1020
} ;
938
- call . startBatch ( send_batch , function ( err , response ) {
1021
+ var callback = function ( err , response ) {
939
1022
if ( err ) {
940
1023
// The call has stopped for some reason. A non-OK status will arrive
941
1024
// in the other batch.
942
1025
return ;
943
1026
}
944
- } ) ;
1027
+ } ;
1028
+ batch_state = _startBatchIfReady ( call , send_batch , batch_state , callback ) ;
945
1029
} ;
946
1030
final_requester . halfClose = function ( ) {
947
1031
var batch = {
948
1032
[ grpc . opType . SEND_CLOSE_FROM_CLIENT ] : true
949
1033
} ;
950
- call . startBatch ( batch , function ( ) { } ) ;
1034
+ batch_state = _startBatchIfReady ( call , batch , batch_state , function ( ) { } ) ;
951
1035
} ;
952
1036
final_requester . recvMessageWithContext = function ( context ) {
953
1037
var recv_batch = {
954
1038
[ grpc . opType . RECV_MESSAGE ] : true
955
1039
} ;
956
- call . startBatch ( recv_batch , _getStreamReadCallback ( emitter , call ,
957
- first_listener , deserialize ) ) ;
1040
+ var callback = _getStreamReadCallback ( emitter , call ,
1041
+ get_listener , deserialize ) ;
1042
+ batch_state = _startBatchIfReady ( call , recv_batch , batch_state , callback ) ;
958
1043
} ;
959
1044
final_requester . cancel = function ( ) {
960
1045
call . cancel ( ) ;
@@ -981,6 +1066,9 @@ function _getBidiStreamingInterceptor(method_definition, channel, emitter) {
981
1066
method_definition . responseDeserialize ) ;
982
1067
return function ( options ) {
983
1068
var first_listener ;
1069
+ var get_listener = function ( ) {
1070
+ return first_listener ;
1071
+ } ;
984
1072
var call = common . getCall ( channel , method_definition . path , options ) ;
985
1073
var final_requester = { } ;
986
1074
final_requester . start = function ( metadata , listener ) {
@@ -1057,7 +1145,7 @@ function _getBidiStreamingInterceptor(method_definition, channel, emitter) {
1057
1145
[ grpc . opType . RECV_MESSAGE ] : true
1058
1146
} ;
1059
1147
call . startBatch ( recv_batch , _getStreamReadCallback ( emitter , call ,
1060
- first_listener , deserialize ) ) ;
1148
+ get_listener , deserialize ) ) ;
1061
1149
} ;
1062
1150
final_requester . cancel = function ( ) {
1063
1151
call . cancel ( ) ;
@@ -1144,11 +1232,13 @@ function _getServerStreamingListener(method_definition, emitter) {
1144
1232
onReceiveMessage : function ( message , next , context ) {
1145
1233
if ( emitter . push ( message ) && message !== null ) {
1146
1234
var call = context . call ;
1147
- var listener = context . listener ;
1235
+ var get_listener = function ( ) {
1236
+ return context . listener ;
1237
+ } ;
1148
1238
var read_batch = { } ;
1149
1239
read_batch [ grpc . opType . RECV_MESSAGE ] = true ;
1150
1240
call . startBatch ( read_batch , _getStreamReadCallback ( emitter , call ,
1151
- listener , deserialize ) ) ;
1241
+ get_listener , deserialize ) ) ;
1152
1242
} else {
1153
1243
emitter . reading = false ;
1154
1244
}
@@ -1176,11 +1266,13 @@ function _getBidiStreamingListener(method_definition, emitter) {
1176
1266
onReceiveMessage : function ( message , next , context ) {
1177
1267
if ( emitter . push ( message ) && message !== null ) {
1178
1268
var call = context . call ;
1179
- var listener = context . listener ;
1269
+ var get_listener = function ( ) {
1270
+ return context . listener ;
1271
+ } ;
1180
1272
var read_batch = { } ;
1181
1273
read_batch [ grpc . opType . RECV_MESSAGE ] = true ;
1182
1274
call . startBatch ( read_batch , _getStreamReadCallback ( emitter , call ,
1183
- listener , deserialize ) ) ;
1275
+ get_listener , deserialize ) ) ;
1184
1276
} else {
1185
1277
emitter . reading = false ;
1186
1278
}
0 commit comments