@@ -2945,11 +2945,17 @@ describe("RunEngine debounce", () => {
29452945 // the engine's runLock cannot acquire it. Since we configured
29462946 // `retryConfig.maxAttempts: 0` and `maxTotalWaitTime: 1`, the second
29472947 // trigger should hit the contention fallback rather than bubble a 5xx.
2948+ // Note: the prefix template here intentionally matches what the engine
2949+ // builds at index.ts:120 (no `?? ""` fallback) so that the keys line up
2950+ // even when redisOptions.keyPrefix is undefined.
29482951 const blockingRedis = createRedisClient ( {
29492952 ...redisOptions ,
2950- keyPrefix : `${ redisOptions . keyPrefix ?? "" } runlock:` ,
2953+ keyPrefix : `${ redisOptions . keyPrefix } runlock:` ,
29512954 } ) ;
29522955
2956+ const originalDelayUntil = run1 . delayUntil ;
2957+ assertNonNullable ( originalDelayUntil ) ;
2958+
29532959 try {
29542960 const blockResult = await blockingRedis . set (
29552961 run1 . id ,
@@ -2987,6 +2993,16 @@ describe("RunEngine debounce", () => {
29872993
29882994 // We did NOT 5xx; we returned the existing run.
29892995 expect ( run2 . id ) . toBe ( run1 . id ) ;
2996+
2997+ // Prove the fallback actually ran rather than the lock being acquired
2998+ // normally: the second trigger could not push delayUntil forward
2999+ // because rescheduling is skipped on contention.
3000+ const updatedRun = await prisma . taskRun . findFirst ( {
3001+ where : { id : run1 . id } ,
3002+ } ) ;
3003+ assertNonNullable ( updatedRun ) ;
3004+ assertNonNullable ( updatedRun . delayUntil ) ;
3005+ expect ( updatedRun . delayUntil . getTime ( ) ) . toBe ( originalDelayUntil . getTime ( ) ) ;
29903006 } finally {
29913007 await blockingRedis . del ( run1 . id ) ;
29923008 await blockingRedis . quit ( ) ;
0 commit comments