diff options
| author | William A. Kennington III <wak@google.com> | 2018-06-15 10:10:20 -0700 |
|---|---|---|
| committer | William A. Kennington III <wak@google.com> | 2018-06-15 10:15:01 -0700 |
| commit | 5e21ac017d4aeec18ce64fea7a0f42332d405cfc (patch) | |
| tree | d5a2e756abf6d82d7dc943f78f96f7ca83e69c6c | |
| parent | 20cbbfb008d3d7d39f333b54b183793c12f2466c (diff) | |
| download | phosphor-objmgr-5e21ac017d4aeec18ce64fea7a0f42332d405cfc.tar.gz phosphor-objmgr-5e21ac017d4aeec18ce64fea7a0f42332d405cfc.zip | |
libmapper: Retry with exponential backoff
We don't want to keep retrying every 1 second for 5 tries. This would
allow the timeout to lapse for a very busy BMC and just cause more
congestion. Instead backoff at exponentially increasing intervals.
Change-Id: I9780d9a3dc787a6936aca2c2af30418dd2b0bf4b
Signed-off-by: William A. Kennington III <wak@google.com>
| -rw-r--r-- | libmapper/mapper.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/libmapper/mapper.c b/libmapper/mapper.c index c6b797d..ff96dc8 100644 --- a/libmapper/mapper.c +++ b/libmapper/mapper.c @@ -173,7 +173,7 @@ static int async_wait_getobject_callback(sd_bus_message *m, void *userdata, int i, r; struct async_wait_callback_data *data = userdata; mapper_async_wait *wait = data->wait; - uint64_t now; + uint64_t next_retry; if (wait->finished) goto exit; @@ -184,17 +184,17 @@ static int async_wait_getobject_callback(sd_bus_message *m, void *userdata, if (r == EBUSY && data->retry < mapper_busy_retries) { - r = sd_event_now(wait->loop, CLOCK_MONOTONIC, &now); + r = sd_event_now(wait->loop, CLOCK_MONOTONIC, &next_retry); if (r < 0) { async_wait_done(r, wait); goto exit; } - ++data->retry; + next_retry += mapper_busy_delay_interval_usec * (1 << data->retry); r = sd_event_add_time(wait->loop, &data->event_source, CLOCK_MONOTONIC, - now + mapper_busy_delay_interval_usec, 0, - async_wait_timeout_callback, data); + next_retry, 0, async_wait_timeout_callback, data); + ++data->retry; if (r < 0) { async_wait_done(r, wait); @@ -405,7 +405,7 @@ static int async_subtree_getpaths_callback(sd_bus_message *m, void *userdata, { int r; struct mapper_async_subtree *subtree = userdata; - uint64_t now; + uint64_t next_retry; if (subtree->finished) goto exit; @@ -422,18 +422,18 @@ static int async_subtree_getpaths_callback(sd_bus_message *m, void *userdata, if (r == EBUSY && subtree->retry < mapper_busy_retries) { - r = sd_event_now(subtree->loop, CLOCK_MONOTONIC, &now); + r = sd_event_now(subtree->loop, CLOCK_MONOTONIC, &next_retry); if (r < 0) { async_subtree_done(r, subtree); goto exit; } - ++subtree->retry; + next_retry += mapper_busy_delay_interval_usec * (1 << subtree->retry); r = sd_event_add_time(subtree->loop, &subtree->event_source, - CLOCK_MONOTONIC, - now + mapper_busy_delay_interval_usec, 0, + CLOCK_MONOTONIC, next_retry, 0, async_subtree_timeout_callback, subtree); + ++subtree->retry; if (r < 0) { async_subtree_done(r, subtree); @@ -598,16 +598,17 @@ int mapper_get_object(sd_bus *conn, const char *obj, sd_bus_message **reply) if (r < 0) goto exit; - while (retry < mapper_busy_retries) + while (true) { sd_bus_error_free(&error); r = sd_bus_call(conn, request, 0, &error, reply); if (r < 0 && sd_bus_error_get_errno(&error) == EBUSY) { - ++retry; + if (retry >= mapper_busy_retries) + break; - if (retry != mapper_busy_retries) - usleep(mapper_busy_delay_interval_usec); + usleep(mapper_busy_delay_interval_usec * (1 << retry)); + ++retry; continue; } break; |

