From 326186f4a8897fb830d0895103890d30d6ca661b Mon Sep 17 00:00:00 2001 From: valknarness Date: Mon, 27 Oct 2025 20:52:41 +0100 Subject: [PATCH] fix: optimize CI rate limit strategy for batch efficiency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes for CI mode (process.env.CI === 'true'): - Remove grace period (was 10min) to enable continuous monitoring - Increase check frequency from 1% to 10% to catch low rate limits early - Raise proactive threshold from 200 to 500 requests - Increase resume threshold from 100 to 1000 requests This prevents wasting time on small batches (e.g. 184 requests = 2min work + 13min wait) by ensuring we work in larger 1000-5000 request batches for better time efficiency within the 170-minute timeout. Local mode unchanged: maintains user-friendly behavior with fewer interruptions. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- lib/github-api.js | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/lib/github-api.js b/lib/github-api.js index 8f1b2cc..148fd9e 100644 --- a/lib/github-api.js +++ b/lib/github-api.js @@ -48,9 +48,9 @@ async function checkRateLimit() { // Wait for rate limit to reset using polling async function waitForRateLimitReset(targetResetTime) { const POLL_INTERVAL = 60000; // 60 seconds - // In CI: use low threshold for incremental progress within timeout constraints + // In CI: use moderate threshold to balance efficiency and timeout constraints // Locally: use high threshold for better user experience (fewer interruptions) - const MIN_REMAINING_TO_CONTINUE = process.env.CI === 'true' ? 100 : 4500; + const MIN_REMAINING_TO_CONTINUE = process.env.CI === 'true' ? 1000 : 4500; const MAX_POLLS = 120; // Max 120 polls = 2 hours const estimatedWaitMinutes = Math.ceil(Math.max(0, targetResetTime - Date.now()) / 60000); @@ -121,21 +121,24 @@ async function rateLimitedRequest(url, options = {}) { await new Promise(resolve => setTimeout(resolve, RATE_LIMIT_DELAY - timeSinceLastRequest)); } - // Check rate limit proactively every 100 requests - // BUT skip proactive checks for 10 minutes after recovering from rate limit + // Check rate limit proactively + // In CI: check frequently with no grace period to maximize efficiency + // Locally: use grace period to reduce API overhead after recovery + const isCI = process.env.CI === 'true'; const timeSinceRecovery = Date.now() - lastRateLimitRecoveryTime; - const RECOVERY_GRACE_PERIOD = 10 * 60 * 1000; // 10 minutes + const RECOVERY_GRACE_PERIOD = isCI ? 0 : (10 * 60 * 1000); // No grace period in CI, 10min locally + const CHECK_FREQUENCY = isCI ? 0.10 : 0.01; // 10% in CI, 1% locally + const LOW_THRESHOLD = isCI ? 500 : 200; // Wait at 500 in CI for full batches, 200 locally - if (timeSinceRecovery > RECOVERY_GRACE_PERIOD && Math.random() < 0.01) { // 1% chance + if (timeSinceRecovery > RECOVERY_GRACE_PERIOD && Math.random() < CHECK_FREQUENCY) { const rateLimitStatus = await checkRateLimit(); - if (rateLimitStatus && rateLimitStatus.remaining < 200) { + if (rateLimitStatus && rateLimitStatus.remaining < LOW_THRESHOLD) { console.log(); console.log(chalk.yellow(`⚠️ Rate limit getting low: ${rateLimitStatus.remaining}/${rateLimitStatus.limit} remaining`)); console.log(chalk.yellow(` Proactively waiting for rate limit to reset...`)); - const isCI = process.env.CI === 'true'; if (isCI) { - console.log(chalk.cyan('🤖 CI mode: starting polling to wait for reset...')); + console.log(chalk.cyan('🤖 CI mode: waiting for full reset to maximize batch efficiency...')); await waitForRateLimitReset(rateLimitStatus.reset); } }