Agent Skills
Discover and share powerful Agent Skills for AI assistants
linear-rate-limits - Agent Skill - Agent Skills
Home/ Skills / linear-rate-limits Handle Linear API rate limiting and quotas effectively.
Use when dealing with rate limit errors, implementing throttling,
or optimizing API usage patterns.
Trigger with phrases like "linear rate limit", "linear throttling",
"linear API quota", "linear 429 error", "linear request limits".
Use the skills CLI to install this skill with one command. Auto-detects all installed AI assistants.
Method 1 - skills CLI
npx skills i jeremylongshore/claude-code-plugins-plus-skills/plugins/saas-packs/linear-pack/skills/linear-rate-limits CopyMethod 2 - openskills (supports sync & update)
npx openskills install jeremylongshore/claude-code-plugins-plus-skills CopyAuto-detects Claude Code, Cursor, Codex CLI, Gemini CLI, and more. One install, works everywhere.
Installation Path
Download and extract to one of the following locations:
Claude Code Cursor OpenCode Gemini CLI Codex CLI
~/.claude/skills/linear-rate-limits/ Back No setup needed. Let our cloud agents run this skill for you.
Select Model
Claude Haiku 4.5 $0.10 Claude Sonnet 4.5 $0.20 Claude Opus 4.5 $0.50 Claude Sonnet 4.5 $0.20 /task
Best for coding tasks
Try NowNo setup required
Linear Rate Limits
Overview
Understand and handle Linear API rate limits for reliable integrations.
Prerequisites
Linear SDK configured
Understanding of HTTP headers
Familiarity with async patterns
Linear Rate Limit Structure
Current Limits
Tier Requests/min Complexity/min Notes Standard 1,500 250,000 Most integrations Enterprise Higher Higher Contact Linear
X-RateLimit-Limit: 1500
X-RateLimit-Remaining: 1499
X-RateLimit-Reset: 1640000000
X-Complexity-Limit: 250000
X-Complexity-Cost: 50
X-Complexity-Remaining: 249950
Instructions
Step 1: Basic Rate Limit Handler
// lib/rate-limiter.ts
interface RateLimitState {
remaining : number ;
reset : Date ;
complexityRemaining : number ;
}
class LinearRateLimiter {
Step 2: Exponential Backoff
// lib/backoff.ts
interface BackoffOptions {
maxRetries ?: number ;
baseDelayMs ?: number ;
maxDelayMs ?: number ;
jitter
Step 3: Request Queue
// lib/queue.ts
type QueuedRequest < T > = {
fn : () => Promise < T >;
resolve : ( value : T ) => void
Step 4: Batch Operations
// lib/batch.ts
import { LinearClient } from "@linear/sdk" ;
interface BatchConfig {
batchSize : number ;
delayBetweenBatches : number ;
}
export
Step 5: Query Optimization
// Reduce complexity by limiting fields
const optimizedQuery = `
query Issues($filter: IssueFilter) {
issues(filter: $filter, first: 50) {
nodes {
id
identifier
title
# Avoid nested connections in loops
}
}
}
` ;
// Use SDK efficiently
async function getIssuesOptimized (
Output
Rate limit monitoring
Automatic retry with backoff
Request queuing and throttling
Batch processing utilities
Optimized query patterns
Error Handling
Error Cause Solution 429 Too Many RequestsRate limit exceeded Use backoff and queue Complexity exceededQuery too expensive Simplify query structure TimeoutLong-running query Paginate or split queries
Resources
Next Steps
Learn security best practices with linear-security-basics.
private state : RateLimitState = {
remaining: 1500 ,
reset: new Date (),
complexityRemaining: 250000 ,
};
updateFromHeaders ( headers : Headers ) : void {
const remaining = headers. get ( "x-ratelimit-remaining" );
const reset = headers. get ( "x-ratelimit-reset" );
const complexityRemaining = headers. get ( "x-complexity-remaining" );
if (remaining) this .state.remaining = parseInt (remaining);
if (reset) this .state.reset = new Date ( parseInt (reset) * 1000 );
if (complexityRemaining) {
this .state.complexityRemaining = parseInt (complexityRemaining);
}
}
async waitIfNeeded () : Promise < void > {
// If very low on requests, wait until reset
if ( this .state.remaining < 10 ) {
const waitMs = this .state.reset. getTime () - Date. now ();
if (waitMs > 0 ) {
console. log ( `Rate limit low, waiting ${ waitMs }ms...` );
await new Promise ( r => setTimeout (r, waitMs));
}
}
}
getState () : RateLimitState {
return { ... this .state };
}
}
export const rateLimiter = new LinearRateLimiter ();
?:
boolean
;
}
export async function withBackoff < T >(
fn : () => Promise < T >,
options : BackoffOptions = {}
) : Promise < T > {
const {
maxRetries = 5 ,
baseDelayMs = 1000 ,
maxDelayMs = 30000 ,
jitter = true ,
} = options;
let lastError : Error | undefined ;
for ( let attempt = 0 ; attempt < maxRetries; attempt ++ ) {
try {
return await fn ();
} catch ( error : any ) {
lastError = error;
// Only retry on rate limit errors
const isRateLimited =
error?.extensions?.code === "RATE_LIMITED" ||
error?.response?.status === 429 ;
if ( ! isRateLimited || attempt === maxRetries - 1 ) {
throw error;
}
// Calculate delay with exponential backoff
let delay = Math. min (baseDelayMs * Math. pow ( 2 , attempt), maxDelayMs);
// Add jitter to prevent thundering herd
if (jitter) {
delay += Math. random () * delay * 0.1 ;
}
// Check Retry-After header if available
const retryAfter = error?.response?.headers?. get ?.( "retry-after" );
if (retryAfter) {
delay = Math. max (delay, parseInt (retryAfter) * 1000 );
}
console. log (
`Rate limited, attempt ${ attempt + 1 }/${ maxRetries }, ` +
`retrying in ${ Math . round ( delay ) }ms...`
);
await new Promise ( r => setTimeout (r, delay));
}
}
throw lastError;
}
;
reject : ( error : Error ) => void ;
};
class RequestQueue {
private queue : QueuedRequest < any >[] = [];
private processing = false ;
private requestsPerSecond = 20 ; // Conservative rate
async add < T >( fn : () => Promise < T >) : Promise < T > {
return new Promise (( resolve , reject ) => {
this .queue. push ({ fn, resolve, reject });
this . process ();
});
}
private async process () : Promise < void > {
if ( this .processing) return ;
this .processing = true ;
while ( this .queue. length > 0 ) {
const request = this .queue. shift () ! ;
try {
const result = await request. fn ();
request. resolve (result);
} catch (error) {
request. reject (error as Error );
}
// Throttle requests
await new Promise ( r =>
setTimeout (r, 1000 / this .requestsPerSecond)
);
}
this .processing = false ;
}
get pending () : number {
return this .queue. length ;
}
}
export const requestQueue = new RequestQueue ();
async
function
batchProcess
<
T
,
R
>(
items : T [],
processor : ( item : T ) => Promise < R >,
config : BatchConfig = { batchSize: 10 , delayBetweenBatches: 1000 }
) : Promise < R []> {
const results : R [] = [];
const batches : T [][] = [];
// Split into batches
for ( let i = 0 ; i < items. length ; i += config.batchSize) {
batches. push (items. slice (i, i + config.batchSize));
}
for ( let i = 0 ; i < batches. length ; i ++ ) {
const batch = batches[i];
console. log ( `Processing batch ${ i + 1 }/${ batches . length }...` );
// Process batch in parallel
const batchResults = await Promise . all (batch. map (processor));
results. push ( ... batchResults);
// Delay between batches (except last)
if (i < batches. length - 1 ) {
await new Promise ( r => setTimeout (r, config.delayBetweenBatches));
}
}
return results;
}
// Usage example
async function updateManyIssues (
client : LinearClient ,
updates : { id : string ; priority : number }[]
) {
return batchProcess (
updates,
({ id , priority }) => client. updateIssue (id, { priority }),
{ batchSize: 10 , delayBetweenBatches: 2000 }
);
}
client
:
LinearClient
,
teamKey
:
string
) {
// Good: Single query with filter
return client. issues ({
filter: { team: { key: { eq: teamKey } } },
first: 50 ,
});
// Bad: N+1 queries
// const teams = await client.teams();
// for (const team of teams.nodes) {
// const issues = await team.issues(); // N queries!
// }
}