@@ -937,8 +937,9 @@ func (b *BinlogReader) DataStreamEvents(entriesChannel chan<- *common.EntryConte
937
937
// Throttle if this job has un-acked big tx, or
938
938
// there are too much global jobs with big tx.
939
939
for ! b .shutdown {
940
- localLimit , globalLimit := atomic .LoadInt32 (& b .BigTxCount ) > b .mysqlContext .BigTxSrcQueue , g .BigTxReachMax ()
941
- if ! localLimit && ! globalLimit {
940
+ localCount := atomic .LoadInt32 (& b .BigTxCount )
941
+ globalLimit := g .BigTxReachMax ()
942
+ if localCount <= b .mysqlContext .BigTxSrcQueue && ! globalLimit {
942
943
bigTxThrottlingCount = 0
943
944
break
944
945
}
@@ -951,7 +952,7 @@ func (b *BinlogReader) DataStreamEvents(entriesChannel chan<- *common.EntryConte
951
952
}
952
953
time .Sleep (time .Duration (sleepMs ) * time .Millisecond )
953
954
if bigTxThrottlingCount * sleepMs >= 15 * 1000 {
954
- b .logger .Warn ("reader big tx throttling for 15s" , "local" , localLimit , "global" , globalLimit )
955
+ b .logger .Warn ("reader big tx throttling for 15s" , "local" , localCount , "global" , globalLimit )
955
956
bigTxThrottlingCount = 0
956
957
}
957
958
}
@@ -1798,9 +1799,9 @@ func (b *BinlogReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *r
1798
1799
schemaName := string (rowsEvent .Table .Schema )
1799
1800
tableName := string (rowsEvent .Table .Table )
1800
1801
coordinate := b .entryContext .Entry .Coordinates .(* common.MySQLCoordinateTx )
1801
- b .logger .Debug ("got rowsEvent" , "schema" , schemaName , "table" , tableName ,
1802
- "gno" , coordinate .GNO ,
1803
- "flags " , rowsEvent .Flags , "tableFlags" , rowsEvent . Table . Flags )
1802
+ b .logger .Trace ("got rowsEvent" , "schema" , schemaName , "table" , tableName ,
1803
+ "gno" , coordinate .GNO , "flags" , rowsEvent . Flags , "tableFlags" , rowsEvent . Table . Flags ,
1804
+ "nRows " , len ( rowsEvent .Rows ) )
1804
1805
1805
1806
dml := common .ToEventDML (ev .Header .EventType )
1806
1807
skip , table := b .skipRowEvent (rowsEvent , dml )
@@ -1943,7 +1944,34 @@ func (b *BinlogReader) handleRowsEvent(ev *replication.BinlogEvent, rowsEvent *r
1943
1944
}
1944
1945
1945
1946
if len (dmlEvent .Rows ) > 0 {
1946
- b .entryContext .Entry .Events = append (b .entryContext .Entry .Events , dmlEvent )
1947
+ // return 0 if the last event could be reused.
1948
+ reuseLast := func () int {
1949
+ if len (b .entryContext .Entry .Events ) == 0 {
1950
+ return 1
1951
+ }
1952
+ lastEvent := & b .entryContext .Entry .Events [len (b .entryContext .Entry .Events )- 1 ]
1953
+ if dml != common .InsertDML || lastEvent .DML != common .InsertDML {
1954
+ return 2
1955
+ }
1956
+ if lastEvent .DatabaseName != dmlEvent .DatabaseName || lastEvent .TableName != dmlEvent .TableName {
1957
+ return 3
1958
+ }
1959
+ if bytes .Compare (lastEvent .Flags , dmlEvent .Flags ) != 0 {
1960
+ return 4
1961
+ }
1962
+ if dmlEvent .FKParent != lastEvent .FKParent {
1963
+ return 5
1964
+ }
1965
+
1966
+ lastEvent .Rows = append (lastEvent .Rows , dmlEvent .Rows ... )
1967
+ b .logger .Debug ("reuseLast. reusing" , "nRows" , len (lastEvent .Rows ))
1968
+ return 0
1969
+ }()
1970
+
1971
+ if reuseLast != 0 {
1972
+ b .logger .Debug ("reuseLast. not reusing" , "step" , reuseLast )
1973
+ b .entryContext .Entry .Events = append (b .entryContext .Entry .Events , * dmlEvent )
1974
+ }
1947
1975
1948
1976
if b .entryContext .OriginalSize >= bigTxSplittingSize {
1949
1977
b .logger .Debug ("splitting big tx" , "index" , b .entryContext .Entry .Index )
0 commit comments