Skip to content

Commit 9714641

Browse files
committed
wip store [nfc]: move backoff/reload inside error helpers
1 parent 092ea3f commit 9714641

File tree

1 file changed

+56
-43
lines changed

1 file changed

+56
-43
lines changed

lib/model/store.dart

Lines changed: 56 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -929,7 +929,6 @@ class UpdateMachine {
929929
try {
930930
assert(!_disposed);
931931

932-
BackoffMachine? backoffMachine;
933932
while (true) {
934933
if (_debugLoopSignal != null) {
935934
await _debugLoopSignal!.future;
@@ -945,33 +944,13 @@ class UpdateMachine {
945944
result = await getEvents(store.connection,
946945
queueId: queueId, lastEventId: lastEventId);
947946
if (_disposed) return;
948-
} catch (e) {
947+
} catch (e, stackTrace) {
949948
if (_disposed) return;
950-
final shouldRetry = _triagePollRequestError(e);
951-
if (!shouldRetry) rethrow;
952-
await (backoffMachine ??= BackoffMachine()).wait();
949+
await _handlePollRequestError(e, stackTrace); // may rethrow
953950
if (_disposed) return;
954-
assert(debugLog('… Backoff wait complete, retrying poll.'));
955951
continue;
956952
}
957953

958-
// After one successful request, we reset backoff to its initial state.
959-
// That way if the user is off the network and comes back on, the app
960-
// doesn't wind up in a state where it's slow to recover the next time
961-
// one request fails.
962-
//
963-
// This does mean that if the server is having trouble and handling some
964-
// but not all of its requests, we'll end up doing a lot more retries than
965-
// if we stayed at the max backoff interval; partway toward what would
966-
// happen if we weren't backing off at all.
967-
//
968-
// But at least for [getEvents] requests, as here, it should be OK,
969-
// because this is a long-poll. That means a typical successful request
970-
// takes a long time to come back; in fact longer than our max backoff
971-
// duration (which is 10 seconds). So if we're getting a mix of successes
972-
// and failures, the successes themselves should space out the requests.
973-
backoffMachine = null;
974-
975954
_clearPollErrors();
976955

977956
final events = result.events;
@@ -991,22 +970,14 @@ class UpdateMachine {
991970
}
992971
} catch (e) {
993972
if (_disposed) return;
994-
995-
// An error occurred, other than the transient request errors we retry on.
996-
// This means either a lost/expired event queue on the server (which is
997-
// normal after the app is offline for a period like 10 minutes),
998-
// or an unexpected exception representing a bug in our code or the server.
999-
// Either way, the show must go on. So reload server data from scratch.
1000-
1001-
_reportPollError(e);
1002-
1003-
// This disposes the store, which disposes this update machine.
1004-
await store._globalStore._reloadPerAccount(store.accountId);
1005-
assert(debugLog('… Event queue replaced.'));
973+
await _handlePollError(e);
974+
assert(_disposed);
1006975
return;
1007976
}
1008977
}
1009978

979+
BackoffMachine? _pollBackoffMachine;
980+
1010981
/// This controls when we start to report transient errors to the user when
1011982
/// polling.
1012983
///
@@ -1017,17 +988,42 @@ class UpdateMachine {
1017988
int _accumulatedTransientFailureCount = 0;
1018989

1019990
void _clearPollErrors() {
991+
// After one successful request, we reset backoff to its initial state.
992+
// That way if the user is off the network and comes back on, the app
993+
// doesn't wind up in a state where it's slow to recover the next time
994+
// one request fails.
995+
//
996+
// This does mean that if the server is having trouble and handling some
997+
// but not all of its requests, we'll end up doing a lot more retries than
998+
// if we stayed at the max backoff interval; partway toward what would
999+
// happen if we weren't backing off at all.
1000+
//
1001+
// But at least for [getEvents] requests, as here, it should be OK,
1002+
// because this is a long-poll. That means a typical successful request
1003+
// takes a long time to come back; in fact longer than our max backoff
1004+
// duration (which is 10 seconds). So if we're getting a mix of successes
1005+
// and failures, the successes themselves should space out the requests.
1006+
_pollBackoffMachine = null;
1007+
10201008
store.isLoading = false;
10211009
_accumulatedTransientFailureCount = 0;
10221010
reportErrorToUserBriefly(null);
10231011
}
10241012

1025-
/// Sort out an error from the network request in [poll].
1013+
/// Sort out an error from the network request in [poll]:
1014+
/// either wait for a backoff duration (and possibly report the error),
1015+
/// or rethrow.
10261016
///
1027-
/// If the request should be retried, this method returns true,
1017+
/// If the request should be retried, this method uses [_pollBackoffMachine]
1018+
/// to wait an appropriate backoff duration for that retry,
10281019
/// after reporting the error if appropriate to the user and/or developer.
1029-
/// Otherwise, this method returns false with no side effects.
1030-
bool _triagePollRequestError(Object error) {
1020+
///
1021+
/// Otherwise this method rethrows the error, with no other side effects.
1022+
///
1023+
/// See also:
1024+
/// * [_handlePollError], which handles errors from the rest of [poll]
1025+
/// and errors this method rethrows.
1026+
Future<void> _handlePollRequestError(Object error, StackTrace stackTrace) async {
10311027
store.isLoading = true;
10321028

10331029
bool shouldReportToUser;
@@ -1046,19 +1042,32 @@ class UpdateMachine {
10461042
shouldReportToUser = true;
10471043

10481044
default:
1049-
return false;
1045+
Error.throwWithStackTrace(error, stackTrace);
10501046
}
10511047

10521048
assert(debugLog('Transient error polling event queue for $store: $error\n'
10531049
'Backing off, then will retry…'));
10541050
if (shouldReportToUser) {
10551051
_maybeReportToUserTransientError(error);
10561052
}
1057-
return true;
1053+
await (_pollBackoffMachine ??= BackoffMachine()).wait();
1054+
if (_disposed) return;
1055+
assert(debugLog('… Backoff wait complete, retrying poll.'));
10581056
}
10591057

1060-
/// Report an error in [poll], if appropriate, to the user and/or developer.
1061-
void _reportPollError(Object error) {
1058+
/// Deal with an error in [poll]: reload server data to replace the store,
1059+
/// after reporting the error as appropriate to the user and/or developer.
1060+
///
1061+
/// See also:
1062+
/// * [_handlePollRequestError], which handles certain errors
1063+
/// and causes them not to reach this method.
1064+
Future<void> _handlePollError(Object error) async {
1065+
// An error occurred, other than the transient request errors we retry on.
1066+
// This means either a lost/expired event queue on the server (which is
1067+
// normal after the app is offline for a period like 10 minutes),
1068+
// or an unexpected exception representing a bug in our code or the server.
1069+
// Either way, the show must go on. So reload server data from scratch.
1070+
10621071
store.isLoading = true;
10631072

10641073
switch (error) {
@@ -1091,6 +1100,10 @@ class UpdateMachine {
10911100
// The bug here could be in the server if it's an ApiRequestException,
10921101
// but our remedy is the same either way.
10931102
}
1103+
1104+
await store._globalStore._reloadPerAccount(store.accountId);
1105+
assert(_disposed);
1106+
assert(debugLog('… Event queue replaced.'));
10941107
}
10951108

10961109
/// This only reports transient errors after reaching

0 commit comments

Comments
 (0)