Skip to content

Commit ed6c859

Browse files
lXXXwfacebook-github-bot
authored andcommitted
Back out "http: add "per batch" concurrent request limit"
Summary: Original commit changeset: 64bd8dea2326 Original Phabricator Diff: D72823375 Reviewed By: MichaelCuevas Differential Revision: D73602841 fbshipit-source-id: 33b08997986b0b8aaf9cafecbeaa90bc62b8569b
1 parent 553955b commit ed6c859

File tree

2 files changed

+17
-46
lines changed

2 files changed

+17
-46
lines changed

eden/scm/lib/edenapi/src/builder.rs

-4
Original file line numberDiff line numberDiff line change
@@ -209,9 +209,6 @@ impl HttpClientBuilder {
209209
let max_requests = get_config(config, "edenapi", "max-concurrent-requests")?
210210
.or(get_config(config, "edenapi", "maxrequests")?);
211211

212-
let max_requests_per_batch =
213-
get_config(config, "edenapi", "max-concurrent-requests-per-batch")?;
214-
215212
let try_route_consistently =
216213
get_config(config, "edenapi", "try-route-consistently")?.unwrap_or_default();
217214

@@ -306,7 +303,6 @@ impl HttpClientBuilder {
306303
let mut http_config = hg_http::http_config(config, &server_url)?;
307304
http_config.verbose_stats |= debug;
308305
http_config.max_concurrent_requests = max_requests;
309-
http_config.max_concurrent_requests_per_batch = max_requests_per_batch;
310306

311307
let builder = HttpClientBuilder {
312308
repo_name,

eden/scm/lib/http-client/src/client.rs

+17-42
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,6 @@ pub struct Config {
6565
// library to limit the number of in-flight requests separately, _before_ the
6666
// requests are given to curl.
6767
pub max_concurrent_requests: Option<usize>,
68-
69-
// Limit the number of concurrent requests for a "batch" of requests passed at once to
70-
// client.send_async(). This allows us to have a high global request limit for small
71-
// "random" requests, while having lower limits for heavy requests (e.g. fetch 10m
72-
// files across 1000 requests).
73-
pub max_concurrent_requests_per_batch: Option<usize>,
74-
7568
// Escape hatch to turn off our request limiting.
7669
pub limit_requests: bool,
7770
// Escape hatch to turn off our response body limiting.
@@ -102,7 +95,6 @@ impl Default for Config {
10295
client_info: None,
10396
disable_tls_verification: false,
10497
max_concurrent_requests: None, // No limit by default
105-
max_concurrent_requests_per_batch: None,
10698
limit_requests: true,
10799
limit_response_buffering: true,
108100
unix_socket_domains: HashSet::new(),
@@ -253,37 +245,23 @@ impl HttpClient {
253245
/// until all of the transfers are complete, and will return
254246
/// the total stats across all transfers when complete.
255247
pub fn stream(&self, requests: Vec<StreamRequest>) -> Result<Stats, HttpClientError> {
256-
// This is a "local" limit for how many concurrent requests we allow for a single
257-
// batch of requests. Requests are still subject to the global limit via self.claimer.
258-
let mut allowed_requests = self
259-
.config
260-
.max_concurrent_requests_per_batch
261-
.unwrap_or(requests.len());
262-
263248
// Add as many of remaining requests to the handle as we can, limited by the claimer.
264-
let try_add = |h: &MultiDriver,
265-
reqs: &mut IntoIter<StreamRequest>,
266-
allowed_requests: &mut usize|
267-
-> Result<(), HttpClientError> {
268-
for claim in self
269-
.claimer
270-
.try_claim_requests((*allowed_requests).min(reqs.len()))
271-
{
272-
let mut request = match reqs.next() {
273-
Some(request) => request,
274-
// Shouldn't happen, but just in case.
275-
None => break,
276-
};
277-
278-
self.event_listeners
279-
.trigger_new_request(request.request.ctx_mut());
280-
h.add(request.into_easy(claim)?)?;
281-
282-
*allowed_requests -= 1;
283-
}
249+
let try_add =
250+
|h: &MultiDriver, reqs: &mut IntoIter<StreamRequest>| -> Result<(), HttpClientError> {
251+
for claim in self.claimer.try_claim_requests(reqs.len()) {
252+
let mut request = match reqs.next() {
253+
Some(request) => request,
254+
// Shouldn't happen, but just in case.
255+
None => break,
256+
};
257+
258+
self.event_listeners
259+
.trigger_new_request(request.request.ctx_mut());
260+
h.add(request.into_easy(claim)?)?;
261+
}
284262

285-
Ok(())
286-
};
263+
Ok(())
264+
};
287265

288266
let mut requests = requests.into_iter();
289267
let mut stats = Stats::default();
@@ -298,7 +276,7 @@ impl HttpClient {
298276
let driver = MultiDriver::new(multi.get(), self.config.verbose_stats);
299277

300278
// Add requests to the driver. This can add anywhere from zero to all the requests.
301-
try_add(&driver, &mut requests, &mut allowed_requests)?;
279+
try_add(&driver, &mut requests)?;
302280

303281
let mut tls_error = false;
304282
let result = driver
@@ -312,17 +290,14 @@ impl HttpClient {
312290

313291
self.report_result_and_drop_receiver(res)?;
314292

315-
allowed_requests += 1;
316-
317293
// A request finished - let's see if there are pending requests we can now add
318294
// to this multi. This allows pending requests to proceed without needing to
319295
// wait for _all_ in-progress requests to finish. Note that there may be other
320296
// curl multis active bound by the same request limit, so it is still possible
321297
// for our pending requests to wait longer than they need to (i.e. when a
322298
// request finishes on a different multi, our loop here will still wait for one
323299
// of our requests to finish before trying to enqueue new requests).
324-
try_add(&driver, &mut requests, &mut allowed_requests)
325-
.map_err(|err| Abort::WithReason(err.into()))
300+
try_add(&driver, &mut requests).map_err(|err| Abort::WithReason(err.into()))
326301
})
327302
.inspect(|stats| {
328303
self.event_listeners.trigger_stats(stats);

0 commit comments

Comments
 (0)