Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ When batching is enabled:

- tasks are added to SQS by batches of 10, reducing the number of AWS operations
- it is not possible to get the task `MessageId`, as it is not known until the batch is sent
- care has to be taken about message size, as SQS limits both the individual message size and the maximum total payload size to 256 kB.
- care has to be taken about message size, as SQS limits both the individual message size and the maximum total payload size to 1 MB.

## Batch Reads

Expand Down Expand Up @@ -507,7 +507,7 @@ Localstack tests should perform faster than testing against AWS, and besides, th
Run [ElasticMQ](https://github.com/softwaremill/elasticmq) and make sure that the SQS endpoint is available by the address localhost:4566:

```bash
uv run -p 4566:9324 --rm -it softwaremill/elasticmq-native
docker run -p 4566:9324 --rm -it softwaremill/elasticmq-native
```

Then run
Expand Down
2 changes: 1 addition & 1 deletion sqs_workers/queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

DEFAULT_MESSAGE_GROUP_ID = "default"
SEND_BATCH_SIZE = 10
MAX_MESSAGE_LENGTH = 262144 # 256 KiB
MAX_MESSAGE_LENGTH = 1048576 # 1 MiB (since Aug 4, 2025)

if TYPE_CHECKING:
from collections.abc import Generator, Iterable
Expand Down
19 changes: 11 additions & 8 deletions tests/test_sqs.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import time

import botocore
import localstack_client.session
import pytest

from sqs_workers import (
Expand Down Expand Up @@ -157,17 +158,19 @@ def test_batch_should_keep_messages_until_overflow(sqs, queue_name):
assert len(queue.get_raw_messages(0)) == 1


def test_batch_flush_on_large_messages(sqs, queue_name):
def test_batch_flush_on_large_messages(sqs_session, sqs, queue_name):
if isinstance(sqs_session, localstack_client.session.Session):
pytest.xfail("ElasticMQ still has the old 256 KiB message limit")

queue = sqs.queue(queue_name)
say_hello_task = queue.connect_processor("say_hello", say_hello)

# 256KiB is our message limit
# 1 MiB is our message limit
with say_hello_task.batch():
# no messages after 9 tasks
for n in range(9):
# each message is approx 32427 Bytes
say_hello_task.delay(username=f"Homer {n} 🍩" * 1_000_000)
# first 9 items is ~283651 Bytes so flush is triggered
say_hello_task.delay(username=f"Homer {n} 🍩" * 4_000_000)
# first 9 items is ~1.1 MiB so flush is triggered
# and we walk back 1 item
assert len(queue.get_raw_messages(0)) == 8

Expand All @@ -182,11 +185,11 @@ def test_batch_fails_on_a_giant_message(sqs_session, sqs, queue_name):
queue = sqs.queue(queue_name)
say_hello_task = queue.connect_processor("say_hello", say_hello)

# 262144 Bytes is our message limit
# 1 MiB is our message limit
with say_hello_task.batch():
with pytest.raises(botocore.exceptions.ClientError) as excinfo:
# message ~264087 bytes long
say_hello_task.delay(username="Homer 🍩" * 10_150_000)
# message exceeds 1 MiB limit
say_hello_task.delay(username="Homer 🍩" * 40_600_000)
assert "MessageTooLong" in str(excinfo.value)


Expand Down