[geary/wip/limit-replay-op-retries] Limit number of times a reply op is retried before failing
- From: Michael Gratton <mjog src gnome org>
- To: commits-list gnome org
- Cc:
- Subject: [geary/wip/limit-replay-op-retries] Limit number of times a reply op is retried before failing
- Date: Mon, 4 Feb 2019 23:18:51 +0000 (UTC)
commit db9bb5f4b52ec1ab2bf1d14f61fb370e3140795a
Author: Michael Gratton <mike vee net>
Date: Tue Feb 5 10:16:49 2019 +1100
Limit number of times a reply op is retried before failing
This prevents the op being retried... forever 🕸🕸
.../imap-engine/imap-engine-replay-queue.vala | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
---
diff --git a/src/engine/imap-engine/imap-engine-replay-queue.vala
b/src/engine/imap-engine/imap-engine-replay-queue.vala
index 8d5e2e61..3d839775 100644
--- a/src/engine/imap-engine/imap-engine-replay-queue.vala
+++ b/src/engine/imap-engine/imap-engine-replay-queue.vala
@@ -12,10 +12,18 @@
* ensure the execution of the operations maintains consistent.
*/
private class Geary.ImapEngine.ReplayQueue : Geary.BaseObject {
- // this value is high because delays between back-to-back unsolicited notifications have been
- // see as high as 250ms
+
+ // Maximum number of times a retry-able operation should be
+ // retried before failing. It's set to 1 since we only attempt to
+ // retry if there's some transient error, so if it doesn't work
+ // second time around, it probably won't work at all.
+ private const int MAX_OP_RETRIES = 1;
+
+ // This value is high because delays between back-to-back
+ // unsolicited notifications have been see as high as 250ms
private const int NOTIFICATION_QUEUE_WAIT_MSEC = 1000;
-
+
+
private enum State {
OPEN,
CLOSING,
@@ -522,9 +530,10 @@ private class Geary.ImapEngine.ReplayQueue : Geary.BaseObject {
// If a recoverable failure and operation allows
// remote replay and not closing, re-schedule now
- if ((op.on_remote_error == ReplayOperation.OnError.RETRY)
- && !is_unrecoverable_failure(replay_err)
- && state == State.OPEN) {
+ if (op.on_remote_error == ReplayOperation.OnError.RETRY &&
+ op.remote_retry_count <= MAX_OP_RETRIES &&
+ !is_unrecoverable_failure(replay_err) &&
+ state == State.OPEN) {
debug("Schedule op retry %s on %s", op.to_string(), to_string());
// the Folder will disconnect and reconnect due to the hard error and
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]