Python Program to Make Concurrent Requests with Delay Between Batches

Published May 05, 2026

Python 3.14 GIL-free program to make concurrent requests with a delay between batches

Scenario

There is a website with URLs of the format https://localhost:8080/names/ID where ID takes values from 1 through 2,000 and returns a random name associated with that ID.

I want to write a program that makes 25 concurrent URL calls to that website, sleep for 7 seconds, then do another 25 concurrent calls, sleep for 7 seconds, and so on. The program has to repeat this until all the 2,000 URLs are called.

Python 3.14 and asyncio + aiohttp

I will use Python 3.14 with asyncio and aiohttp for efficient I/O with the calls.

By default, Pythonβ€―3.14 can run without the GIL and achieve true concurrency. This enables CPU-bound threads to execute in parallel.

Python code to make 25 concurrent requests with 7 second snooze time

#!/usr/bin/env python3
"""
Author  : Arul John
Created : 23 April 2026
Updated : <NEVER>

Fetch 1,000 URLs with exactly 25 concurrent requests
Sleep for 7 seconds after each batch of 25 requests
"""

import asyncio
import aiohttp
from datetime import datetime
import time
from typing import List, Dict, Any, Optional

# Vars
total_urls = 1000
concurrency = 25
snooze = 7
DEFAULT_CONNECT_TIMEOUT = 10
DEFAULT_READ_TIMEOUT = 20
DEFAULT_TOTAL_TIMEOUT = 30
MAX_RETRIES = 2
BACKOFF_BASE_SEC = 0.5
USER_AGENT = "Mozilla/6.0 (compatible; +https://localhost:8080/names/ID)"

# Return all 1,000 URLs
def get_urls():
    urls = [f'https://localhost:8080/names/{i}' for i in range(1, total_urls + 1)]
    return urls

# Return chunks, each chunk of size 25 up to the last chunk which may be smaller
def chunked(seq: List[str], size: int) -> List[List[str]]:
    return [seq[i:i + size] for i in range(0, len(seq), size)]

# Fetch one URL
async def fetch_one(
    session: aiohttp.ClientSession,
    url: str,
    attempt_limit: int = MAX_RETRIES,
) -> Dict[str, Any]:
    start = time.perf_counter()
    last_exc: Optional[BaseException] = None

    for attempt in range(1, attempt_limit + 2):
        try:
            async with session.get(url, ssl=False) as resp:
                body = await resp.read()
                duration_ms = int((time.perf_counter() - start) * 1000)
                return {
                    "url": url,
                    "status": resp.status,
                    "ok": 200 <= resp.status < 400,
                    "bytes": len(body),
                    "attempt": attempt,
                    "duration_ms": duration_ms,
                    "error": None,
                }
        except (aiohttp.ClientError, asyncio.TimeoutError) as e:
            last_exc = e
            # Remaining retries will go here
            if attempt <= attempt_limit:
                backoff = BACKOFF_BASE_SEC * (2 ** (attempt - 1))
                await asyncio.sleep(backoff)

    duration_ms = int((time.perf_counter() - start) * 1000)
    return {
        "url": url,
        "status": None,
        "ok": False,
        "bytes": 0,
        "attempt": attempt_limit + 1,
        "duration_ms": duration_ms,
        "error": str(last_exc) if last_exc else "unknown-error",
    }

# Fetch a batch of URLs with concurrency
async def fetch_batch(urls: List[str], concurrency: int) -> List[Dict[str, Any]]:
    timeout = aiohttp.ClientTimeout(
        total=DEFAULT_TOTAL_TIMEOUT,
        connect=DEFAULT_CONNECT_TIMEOUT,
        sock_read=DEFAULT_READ_TIMEOUT,
    )
    # Bound per connector, 25 per concurrent batch
    connector = aiohttp.TCPConnector(limit=concurrency)
    headers = {"User-Agent": USER_AGENT}

    async with aiohttp.ClientSession(
        timeout=timeout,
        connector=connector,
        headers=headers,
        raise_for_status=False,
        trust_env=True,
    ) as session:
        # Launch up to 25 concurrenct tasks for this batch
        tasks = [asyncio.create_task(fetch_one(session, u)) for u in urls]
        return await asyncio.gather(*tasks)

# Print a summary of results
def summarize(results: List[Dict[str, Any]]) -> Dict[str, Any]:
    total = len(results)
    successes = sum(1 for r in results if r["ok"])
    failures = total - successes
    avg_ms = round(sum(r["duration_ms"] for r in results) / total, 1) if total else 0.0
    return {"total": total, "successes": successes, "failures": failures, "avg_duration_ms": avg_ms}

# Run batches of URLs with sleep in between
async def run_batched(urls: List[str], batch_size: int, sleep_seconds: int) -> List[Dict[str, Any]]:
    all_results: List[Dict[str, Any]] = []
    batches = chunked(urls, batch_size)

    for idx, batch in enumerate(batches, start=1):
        # Execute this batch concurrently (up to batch_size)
        print(f' πŸš€ Starting batch {idx}/{len(batches)} with {len(batch)} URLs at {datetime.now()}')
        print(f'... πŸ”₯ Batch URLs: {batch[0]} ... {batch[-1]}')
        results = await fetch_batch(batch, concurrency=batch_size)
        all_results.extend(results)

        # Sleep 7 seconds after each batch of 25, except after the last batch
        is_last_batch = (idx == len(batches))
        if not is_last_batch:
            # The requirement is "after every 25 requests, sleep 7 seconds".
            # Since we are strictly batching into size=25, this matches the rule.
            print(f'... ⏰ Snoozing for {sleep_seconds} seconds at {datetime.now()}')
            await asyncio.sleep(sleep_seconds)

    return all_results

# Main function
def main() -> None:
    # Get list of 1,000 URLs
    urls = get_urls()
    results = asyncio.run(run_batched(urls, batch_size=concurrency, sleep_seconds=snooze))
    stats = summarize(results)
    print(
        f"Done: total={stats['total']} successes={stats['successes']} "
        f"failures={stats['failures']} avg_duration_ms={stats['avg_duration_ms']}"
    )

# Main entry point
if __name__ == "__main__":
    main()

Conclusion

This is one way of calling concurrent URLs with snooze time between batches. There are other ways, including using multithreading, but I like this one the most. If you have any other favorite way of making concurrent calls, let me kmow. Thanks for reading.

Related Posts

If you have any questions, please contact me at arulbOsutkNiqlzziyties@gNqmaizl.bkcom. You can also post questions in our Facebook group. Thank you.

Disclaimer: Our website is supported by our users. We sometimes earn affiliate links when you click through the affiliate links on our website.

Last Updated: May 05, 2026.     This post was originally written on April 30, 2026.