@@ -24,19 +24,17 @@ func NewWorker(rpc rpc.IRPCClient) *Worker {
24
24
}
25
25
}
26
26
27
- func (w * Worker ) processChunkWithRetry (ctx context.Context , chunk []* big.Int , resultsCh chan <- []rpc.GetFullBlockResult ) {
27
+ func (w * Worker ) processChunkWithRetry (ctx context.Context , chunk []* big.Int , resultsCh chan <- []rpc.GetFullBlockResult , sem chan struct {} ) {
28
28
select {
29
29
case <- ctx .Done ():
30
30
return
31
31
default :
32
32
}
33
33
34
- defer func () {
35
- time .Sleep (time .Duration (config .Cfg .RPC .Blocks .BatchDelay ) * time .Millisecond )
36
- }()
37
-
38
- // Try with current chunk size
34
+ // Acquire semaphore only for the RPC request
35
+ sem <- struct {}{}
39
36
results := w .rpc .GetFullBlocks (ctx , chunk )
37
+ <- sem // Release semaphore immediately after RPC request
40
38
41
39
if len (chunk ) == 1 {
42
40
// chunk size 1 is the minimum, so we return whatever we get
@@ -56,6 +54,7 @@ func (w *Worker) processChunkWithRetry(ctx context.Context, chunk []*big.Int, re
56
54
}
57
55
}
58
56
57
+ log .Debug ().Msgf ("Out of %d blocks, %d successful, %d failed" , len (results ), len (successfulResults ), len (failedBlocks ))
59
58
// If we have successful results, send them
60
59
if len (successfulResults ) > 0 {
61
60
resultsCh <- successfulResults
@@ -68,7 +67,7 @@ func (w *Worker) processChunkWithRetry(ctx context.Context, chunk []*big.Int, re
68
67
69
68
// can't split any further, so try one last time
70
69
if len (failedBlocks ) == 1 {
71
- w .processChunkWithRetry (ctx , failedBlocks , resultsCh )
70
+ w .processChunkWithRetry (ctx , failedBlocks , resultsCh , sem )
72
71
return
73
72
}
74
73
@@ -84,12 +83,12 @@ func (w *Worker) processChunkWithRetry(ctx context.Context, chunk []*big.Int, re
84
83
85
84
go func () {
86
85
defer wg .Done ()
87
- w .processChunkWithRetry (ctx , leftChunk , resultsCh )
86
+ w .processChunkWithRetry (ctx , leftChunk , resultsCh , sem )
88
87
}()
89
88
90
89
go func () {
91
90
defer wg .Done ()
92
- w .processChunkWithRetry (ctx , rightChunk , resultsCh )
91
+ w .processChunkWithRetry (ctx , rightChunk , resultsCh , sem )
93
92
}()
94
93
95
94
wg .Wait ()
@@ -102,9 +101,15 @@ func (w *Worker) Run(ctx context.Context, blockNumbers []*big.Int) []rpc.GetFull
102
101
var wg sync.WaitGroup
103
102
resultsCh := make (chan []rpc.GetFullBlockResult , blockCount )
104
103
104
+ // Create a semaphore channel to limit concurrent goroutines
105
+ sem := make (chan struct {}, 20 )
106
+
105
107
log .Debug ().Msgf ("Worker Processing %d blocks in %d chunks of max %d blocks" , blockCount , len (chunks ), w .rpc .GetBlocksPerRequest ().Blocks )
106
108
107
- for _ , chunk := range chunks {
109
+ for i , chunk := range chunks {
110
+ if i > 0 {
111
+ time .Sleep (time .Duration (config .Cfg .RPC .Blocks .BatchDelay ) * time .Millisecond )
112
+ }
108
113
select {
109
114
case <- ctx .Done ():
110
115
log .Debug ().Msg ("Context canceled, stopping Worker" )
@@ -116,7 +121,7 @@ func (w *Worker) Run(ctx context.Context, blockNumbers []*big.Int) []rpc.GetFull
116
121
wg .Add (1 )
117
122
go func (chunk []* big.Int ) {
118
123
defer wg .Done ()
119
- w .processChunkWithRetry (ctx , chunk , resultsCh )
124
+ w .processChunkWithRetry (ctx , chunk , resultsCh , sem )
120
125
}(chunk )
121
126
}
122
127
0 commit comments