Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 70 additions & 0 deletions frontend/public/actions/__tests__/dashboards.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import {
} from '../dashboards';
import { defaults } from '../../reducers/dashboards';
import { RESULTS_TYPE } from '../../reducers/dashboard-results';
import { MIN_POLL_DELAY } from '../../components/utils/adaptive-polling';

const testStopWatch = (stopAction, type: RESULTS_TYPE, key: string) => {
expect(stopAction(key)).toEqual({
Expand Down Expand Up @@ -102,4 +103,73 @@ describe('dashboards-actions', () => {

it('stopWatchPrometheusQuery stops watching Prometheus', () =>
testStopWatch(stopWatchPrometheusQuery, RESULTS_TYPE.PROMETHEUS, 'fooQuery'));

describe('adaptive polling', () => {
let setTimeoutSpy: jest.SpyInstance;

beforeEach(() => {
setTimeoutSpy = jest.spyOn(global, 'setTimeout');
});

afterEach(() => {
setTimeoutSpy.mockRestore();
jest.restoreAllMocks();
});

const flushPromises = () => new Promise(process.nextTick);

const setupWatchURL = (fetchMock: jest.Mock) => {
const activeState = ImmutableMap(defaults).setIn([RESULTS_TYPE.URL, 'testURL', 'active'], 1);
const getState = jest
.fn()
.mockReturnValueOnce({ dashboards: ImmutableMap(defaults) })
.mockReturnValue({ dashboards: activeState });
const dispatch = jest.fn();

watchURL('testURL', fetchMock)(dispatch, getState);
return { dispatch, getState };
};

it('uses MIN_POLL_DELAY for fast responses', async () => {
const now = 1000;
jest
.spyOn(Date, 'now')
.mockReturnValueOnce(now)
.mockReturnValueOnce(now + 100);

const fetchMock = jest.fn().mockResolvedValueOnce({ data: 'test' });
setupWatchURL(fetchMock);

await flushPromises();

const lastSetTimeout = setTimeoutSpy.mock.calls[setTimeoutSpy.mock.calls.length - 1];
expect(lastSetTimeout[1]).toBe(MIN_POLL_DELAY);
});

it('increases delay for slow responses', async () => {
const now = 1000;
jest
.spyOn(Date, 'now')
.mockReturnValueOnce(now)
.mockReturnValueOnce(now + 3000);

const fetchMock = jest.fn().mockResolvedValueOnce({ data: 'test' });
setupWatchURL(fetchMock);

await flushPromises();

const lastSetTimeout = setTimeoutSpy.mock.calls[setTimeoutSpy.mock.calls.length - 1];
expect(lastSetTimeout[1]).toBe(30000);
});

it('does not jump to MAX_POLL_DELAY on first fetch error', async () => {
const fetchMock = jest.fn().mockRejectedValueOnce(new Error('network error'));
setupWatchURL(fetchMock);

await flushPromises();

const lastSetTimeout = setTimeoutSpy.mock.calls[setTimeoutSpy.mock.calls.length - 1];
expect(lastSetTimeout[1]).toBe(MIN_POLL_DELAY);
});
});
});
22 changes: 19 additions & 3 deletions frontend/public/actions/dashboards.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,13 @@ import { isWatchActive, RESULTS_TYPE } from '../reducers/dashboard-results';
import type { RootState } from '../redux';
import { getPrometheusURL, PrometheusEndpoint } from '../components/graphs/helpers';
import { PrometheusResponse } from '../components/graphs';
import { URL_POLL_DEFAULT_DELAY } from '../components/utils/url-poll-hook';
import {
computeAdaptiveDelay,
emaToDelay,
MIN_POLL_DELAY,
MAX_POLL_DELAY,
SCALE_FACTOR,
} from '../components/utils/adaptive-polling';
import { Fetch, RequestMap } from '@console/dynamic-plugin-sdk/src/api/internal-types';

export enum ActionType {
Expand Down Expand Up @@ -63,23 +69,32 @@ const fetchPeriodically: FetchPeriodically = async (
getURL,
getState,
fetch,
responseTimeEma = 0,
) => {
if (!isWatchActive(getState().dashboards, type, key)) {
return;
}
let nextEma = responseTimeEma;
try {
dispatch(updateWatchInFlight(type, key, true));
const startTime = Date.now();
const data = await fetch(getURL());
const elapsed = Date.now() - startTime;
[, nextEma] = computeAdaptiveDelay(elapsed, responseTimeEma);
dispatch(setData(type, key, data));
dispatch(setError(type, key, null));
} catch (error) {
// Feed a synthetic slow response into the EMA to gradually back off without jumping to max
const errorSeed =
responseTimeEma === 0 ? MIN_POLL_DELAY / SCALE_FACTOR : MAX_POLL_DELAY / SCALE_FACTOR;
[, nextEma] = computeAdaptiveDelay(errorSeed, responseTimeEma);
dispatch(setError(type, key, error));
dispatch(setData(type, key, null));
} finally {
dispatch(updateWatchInFlight(type, key, false));
const timeout = setTimeout(
() => fetchPeriodically(dispatch, type, key, getURL, getState, fetch),
URL_POLL_DEFAULT_DELAY,
() => fetchPeriodically(dispatch, type, key, getURL, getState, fetch, nextEma),
emaToDelay(nextEma),
Comment on lines +87 to +97
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major | ⚡ Quick win

First error can jump straight to 60s retry, causing overly aggressive backoff.

With responseTimeEma = 0, the catch path seeds EMA with MAX_POLL_DELAY / SCALE_FACTOR, which immediately schedules MAX_POLL_DELAY. That delays recovery after transient first-request failures.

Suggested fix (seed from floor-equivalent EMA when no history)
 import {
   computeAdaptiveDelay,
   emaToDelay,
+  MIN_POLL_DELAY,
   MAX_POLL_DELAY,
   SCALE_FACTOR,
 } from '../components/utils/adaptive-polling';
@@
   } catch (error) {
-    // Feed a synthetic slow response into the EMA to gradually back off without jumping to max
-    [, nextEma] = computeAdaptiveDelay(MAX_POLL_DELAY / SCALE_FACTOR, responseTimeEma);
+    // Feed a synthetic slow response into EMA; if no history, seed from floor-equivalent EMA.
+    const emaSeed =
+      responseTimeEma > 0 ? responseTimeEma : MIN_POLL_DELAY / SCALE_FACTOR;
+    [, nextEma] = computeAdaptiveDelay(MAX_POLL_DELAY / SCALE_FACTOR, emaSeed);
     dispatch(setError(type, key, error));
     dispatch(setData(type, key, null));
   } finally {
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
// Feed a synthetic slow response into the EMA to gradually back off without jumping to max
[, nextEma] = computeAdaptiveDelay(MAX_POLL_DELAY / SCALE_FACTOR, responseTimeEma);
dispatch(setError(type, key, error));
dispatch(setData(type, key, null));
} finally {
dispatch(updateWatchInFlight(type, key, false));
const timeout = setTimeout(
() => fetchPeriodically(dispatch, type, key, getURL, getState, fetch),
URL_POLL_DEFAULT_DELAY,
() => fetchPeriodically(dispatch, type, key, getURL, getState, fetch, nextEma),
emaToDelay(nextEma),
// Feed a synthetic slow response into EMA; if no history, seed from floor-equivalent EMA.
const emaSeed =
responseTimeEma > 0 ? responseTimeEma : MIN_POLL_DELAY / SCALE_FACTOR;
[, nextEma] = computeAdaptiveDelay(MAX_POLL_DELAY / SCALE_FACTOR, emaSeed);
dispatch(setError(type, key, error));
dispatch(setData(type, key, null));
} finally {
dispatch(updateWatchInFlight(type, key, false));
const timeout = setTimeout(
() => fetchPeriodically(dispatch, type, key, getURL, getState, fetch, nextEma),
emaToDelay(nextEma),
🤖 Prompt for AI Agents
Verify each finding against current code. Fix only still-valid issues, skip the
rest with a brief reason, keep changes minimal, and validate.

In `@frontend/public/actions/dashboards.ts` around lines 86 - 94, The catch path
currently seeds the EMA with MAX_POLL_DELAY / SCALE_FACTOR when responseTimeEma
is zero, which can jump retries straight to MAX_POLL_DELAY; modify the logic
around computeAdaptiveDelay so that when responseTimeEma === 0 you seed the EMA
from the "floor" value (the minimal/steady-state equivalent) instead of
MAX_POLL_DELAY / SCALE_FACTOR (e.g. use a MIN_POLL_DELAY-based seed or the floor
EMA value), keeping all calls and variables (computeAdaptiveDelay,
responseTimeEma, MAX_POLL_DELAY, SCALE_FACTOR, emaToDelay, fetchPeriodically)
intact; ensure nextEma is computed from that floor-seeded value so the first
failure backs off conservatively rather than immediately scheduling the max
delay.

);
dispatch(updateWatchTimeout(type, key, timeout));
}
Expand Down Expand Up @@ -147,6 +162,7 @@ type FetchPeriodically = (
getURL: () => string,
getState: () => RootState,
fetch: Fetch,
responseTimeEma?: number,
) => void;

export type DashboardsAction = Action<typeof dashboardsActions>;
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import {
computeAdaptiveDelay,
emaToDelay,
MIN_POLL_DELAY,
MAX_POLL_DELAY,
EMA_ALPHA,
SCALE_FACTOR,
} from '../adaptive-polling';

describe('emaToDelay', () => {
it('clamps to MIN_POLL_DELAY for small EMA values', () => {
expect(emaToDelay(0)).toBe(MIN_POLL_DELAY);
expect(emaToDelay(500)).toBe(MIN_POLL_DELAY);
expect(emaToDelay(1499)).toBe(MIN_POLL_DELAY);
});

it('scales proportionally for mid-range EMA values', () => {
expect(emaToDelay(2000)).toBe(20000);
expect(emaToDelay(3000)).toBe(30000);
expect(emaToDelay(4500)).toBe(45000);
});

it('clamps to MAX_POLL_DELAY for large EMA values', () => {
expect(emaToDelay(6000)).toBe(MAX_POLL_DELAY);
expect(emaToDelay(10000)).toBe(MAX_POLL_DELAY);
});

it('falls back to MIN_POLL_DELAY for non-finite values', () => {
expect(emaToDelay(NaN)).toBe(MIN_POLL_DELAY);
expect(emaToDelay(Infinity)).toBe(MIN_POLL_DELAY);
expect(emaToDelay(-Infinity)).toBe(MIN_POLL_DELAY);
});
});

describe('computeAdaptiveDelay', () => {
it('uses elapsed directly as EMA on first call (previousEma = 0)', () => {
const [delay, ema] = computeAdaptiveDelay(500, 0);
expect(ema).toBe(500);
expect(delay).toBe(MIN_POLL_DELAY);
});

it('applies EMA smoothing with previous value', () => {
const [, ema] = computeAdaptiveDelay(4000, 3000);
const expected = EMA_ALPHA * 4000 + (1 - EMA_ALPHA) * 3000;
expect(ema).toBe(expected);
});

it('returns MIN_POLL_DELAY for fast responses', () => {
const [delay] = computeAdaptiveDelay(200, 300);
expect(delay).toBe(MIN_POLL_DELAY);
});

it('returns proportional delay for moderate responses', () => {
const [delay, ema] = computeAdaptiveDelay(3000, 3000);
expect(ema).toBe(3000);
expect(delay).toBe(30000);
});

it('returns MAX_POLL_DELAY for very slow responses', () => {
const [delay] = computeAdaptiveDelay(10000, 8000);
expect(delay).toBe(MAX_POLL_DELAY);
});

it('dampens a single outlier spike via EMA smoothing', () => {
// Stable at 1s, then a 10s spike
const [, ema1] = computeAdaptiveDelay(1000, 1000);
expect(ema1).toBe(1000);

const [delay, ema2] = computeAdaptiveDelay(10000, ema1);
const expected = EMA_ALPHA * 10000 + (1 - EMA_ALPHA) * 1000;
expect(ema2).toBe(expected);
// Should not jump to MAX_POLL_DELAY from a single spike
expect(delay).toBeLessThan(MAX_POLL_DELAY);
});

it('recovers gradually after error backoff', () => {
const errorInput = MAX_POLL_DELAY / SCALE_FACTOR;
// Start from stable fast state
const [, emaAfterError] = computeAdaptiveDelay(errorInput, 500);
expect(emaAfterError).toBeGreaterThan(500);

// Follow up with a fast response — EMA should decrease
const [, emaRecovery] = computeAdaptiveDelay(500, emaAfterError);
expect(emaRecovery).toBeLessThan(emaAfterError);
});

it('defaults previousEma to 0 when omitted', () => {
const [delay, ema] = computeAdaptiveDelay(2000);
expect(ema).toBe(2000);
expect(delay).toBe(20000);
});

it('falls back safely for non-finite or negative elapsedMs', () => {
expect(computeAdaptiveDelay(NaN, 1000)).toEqual([MIN_POLL_DELAY, 1000]);
expect(computeAdaptiveDelay(Infinity, 1000)).toEqual([MIN_POLL_DELAY, 1000]);
expect(computeAdaptiveDelay(-1, 1000)).toEqual([MIN_POLL_DELAY, 1000]);
});
});
34 changes: 34 additions & 0 deletions frontend/public/components/utils/adaptive-polling.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
export const MIN_POLL_DELAY = 15000;
export const MAX_POLL_DELAY = 60000;
export const EMA_ALPHA = 0.3;
export const SCALE_FACTOR = 10;

/** Converts a smoothed response time (EMA) to a clamped polling delay in ms. */
export const emaToDelay = (ema: number): number =>
Number.isFinite(ema)
? Math.max(MIN_POLL_DELAY, Math.min(MAX_POLL_DELAY, Math.round(ema * SCALE_FACTOR)))
: MIN_POLL_DELAY;

/**
* Computes the next adaptive polling delay using an Exponential Moving Average
* of response times. Returns `[nextDelay, updatedEma]`.
*
* On first call pass `previousEma` as 0 (or omit) to seed the EMA with `elapsedMs`.
*
* With current parameters (alpha=0.3, scale=10x, 15s–60s clamp):
* ~500ms response = 15s poll (floor)
* ~2s response = 20s poll
* ~3s response = 30s poll
* ~5s response = 50s poll
* ~6s+ response = 60s poll (ceiling)
*/
export const computeAdaptiveDelay = (
elapsedMs: number,
previousEma: number = 0,
): [number, number] => {
if (!Number.isFinite(elapsedMs) || elapsedMs < 0) {
return [MIN_POLL_DELAY, previousEma];
}
const ema = previousEma === 0 ? elapsedMs : EMA_ALPHA * elapsedMs + (1 - EMA_ALPHA) * previousEma;
return [emaToDelay(ema), ema];
};