diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 879fed4..15ac9d7 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -71,7 +71,7 @@ jobs: run: docker-compose up -d - name: Code Coverage - run: go test ./... -timeout 600s -race -count=1 -covermode=atomic -coverprofile=coverage.txt + run: go test ./... -timeout 900s -race -count=1 -covermode=atomic -coverprofile=coverage.txt - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 diff --git a/DEBUG.md b/DEBUG.md new file mode 100644 index 0000000..89d8e25 --- /dev/null +++ b/DEBUG.md @@ -0,0 +1,453 @@ +=== RUN TestHandlerPauseAndResume + amqpx_test.go:430: + Error Trace: /home/behm015/Development/amqpx/amqpx_test.go:430 + /home/behm015/Development/amqpx/pool/subscriber.go:287 + /home/behm015/Development/amqpx/pool/subscriber.go:222 + /usr/local/go/src/runtime/asm_amd64.s:1650 + Error: Not equal: + expected: false + actual : true + Test: TestHandlerPauseAndResume + Messages: expected active to be false + + + +panic: test timed out after 10m0s +running tests: + TestBatchHandlerPauseAndResume (8m49s) + +goroutine 316 [running]: +testing.(*M).startAlarm.func1() + /usr/local/go/src/testing/testing.go:2259 +0x1fc +created by time.goFunc + /usr/local/go/src/time/sleep.go:176 +0x45 + +goroutine 1 [chan receive, 8 minutes]: +testing.(*T).Run(0xc000082ea0, {0x89c94f, 0x1e}, 0x8c4010) + /usr/local/go/src/testing/testing.go:1649 +0x856 +testing.runTests.func1(0x0?) + /usr/local/go/src/testing/testing.go:2054 +0x85 +testing.tRunner(0xc000082ea0, 0xc0000f9908) + /usr/local/go/src/testing/testing.go:1595 +0x239 +testing.runTests(0xc00009abe0?, {0xb33520, 0x9, 0x9}, {0x4a8459?, 0x4a9c31?, 0xb397c0?}) + /usr/local/go/src/testing/testing.go:2052 +0x897 +testing.(*M).Run(0xc00009abe0) + /usr/local/go/src/testing/testing.go:1925 +0xb58 +go.uber.org/goleak.VerifyTestMain({0x929a20, 0xc00009abe0}, {0xc0000f9e18, 0x3, 0x3}) + /home/behm015/go/pkg/mod/go.uber.org/goleak@v1.3.0/testmain.go:53 +0x65 +github.com/jxsl13/amqpx_test.TestMain(0xfdfb0802185865db?) + /home/behm015/Development/amqpx/amqpx_test.go:24 +0x2e9 +main.main() + _testmain.go:67 +0x308 + +goroutine 170 [semacquire, 7 minutes]: +sync.runtime_Semacquire(0xc0001287e8?) + /usr/local/go/src/runtime/sema.go:62 +0x25 +sync.(*WaitGroup).Wait(0xc0001287e0) + /usr/local/go/src/sync/waitgroup.go:116 +0xa5 +github.com/jxsl13/amqpx/pool.(*Subscriber).Close(0xc000128780) + /home/behm015/Development/amqpx/pool/subscriber.go:35 +0x12a +github.com/jxsl13/amqpx.(*AMQPX).Close.(*AMQPX).close.func1() + /home/behm015/Development/amqpx/amqpx.go:236 +0x96 +sync.(*Once).doSlow(0xb398d4, 0xc0000ad888) + /usr/local/go/src/sync/once.go:74 +0xf1 +sync.(*Once).Do(0xb398d4, 0xc0000ad878?) + /usr/local/go/src/sync/once.go:65 +0x45 +github.com/jxsl13/amqpx.(*AMQPX).close(...) + /home/behm015/Development/amqpx/amqpx.go:233 +github.com/jxsl13/amqpx.(*AMQPX).Close(0xb39840) + /home/behm015/Development/amqpx/amqpx.go:229 +0xf2 +github.com/jxsl13/amqpx.Close(...) + /home/behm015/Development/amqpx/amqpx.go:351 +github.com/jxsl13/amqpx_test.testBatchHandlerPauseAndResume(0xc00029a4e0) + /home/behm015/Development/amqpx/amqpx_test.go:751 +0x1331 +github.com/jxsl13/amqpx_test.TestBatchHandlerPauseAndResume(0x0?) + /home/behm015/Development/amqpx/amqpx_test.go:539 +0x31 +testing.tRunner(0xc00029a4e0, 0x8c4010) + /usr/local/go/src/testing/testing.go:1595 +0x239 +created by testing.(*T).Run in goroutine 1 + /usr/local/go/src/testing/testing.go:1648 +0x82b + +goroutine 34 [syscall, 9 minutes]: +os/signal.signal_recv() + /usr/local/go/src/runtime/sigqueue.go:152 +0x29 +os/signal.loop() + /usr/local/go/src/os/signal/signal_unix.go:23 +0x1d +created by os/signal.Notify.func1.1 in goroutine 19 + /usr/local/go/src/os/signal/signal.go:151 +0x47 + +goroutine 33 [select]: +github.com/rabbitmq/amqp091-go.(*Connection).heartbeater(0xc00017e6e0, 0x1bf08eb00, 0xc0002d82a0) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:761 +0x26d +created by github.com/rabbitmq/amqp091-go.(*Connection).openTune in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:1016 +0xb8c + +goroutine 291 [IO wait]: +internal/poll.runtime_pollWait(0x7f8b9ce04e68, 0x72) + /usr/local/go/src/runtime/netpoll.go:343 +0x85 +internal/poll.(*pollDesc).wait(0xc00014a4a0, 0xc000495000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0xb1 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc00014a480, {0xc000495000, 0x1000, 0x1000}) + /usr/local/go/src/internal/poll/fd_unix.go:164 +0x3e5 +net.(*netFD).Read(0xc00014a480, {0xc000495000, 0x1000, 0x1000}) + /usr/local/go/src/net/fd_posix.go:55 +0x4b +net.(*conn).Read(0xc0002da170, {0xc000495000, 0x1000, 0x1000}) + /usr/local/go/src/net/net.go:179 +0xad +bufio.(*Reader).Read(0xc0002d95c0, {0xc00023a159, 0x7, 0x7}) + /usr/local/go/src/bufio/bufio.go:244 +0x4be +io.ReadAtLeast({0x929c40, 0xc0002d95c0}, {0xc00023a159, 0x7, 0x7}, 0x7) + /usr/local/go/src/io/io.go:335 +0xd0 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +github.com/rabbitmq/amqp091-go.(*reader).ReadFrame(0xc000165f18) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/read.go:49 +0x98 +github.com/rabbitmq/amqp091-go.(*Connection).reader(0xc0000566e0, {0x929fa0?, 0xc0002da170}) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:726 +0x2ab +created by github.com/rabbitmq/amqp091-go.Open in goroutine 170 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:271 +0x67a + +goroutine 41 [IO wait]: +internal/poll.runtime_pollWait(0x7f8b9ce04d70, 0x72) + /usr/local/go/src/runtime/netpoll.go:343 +0x85 +internal/poll.(*pollDesc).wait(0xc0001283a0, 0xc00029f000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0xb1 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc000128380, {0xc00029f000, 0x1000, 0x1000}) + /usr/local/go/src/internal/poll/fd_unix.go:164 +0x3e5 +net.(*netFD).Read(0xc000128380, {0xc00029f000, 0x1000, 0x1000}) + /usr/local/go/src/net/fd_posix.go:55 +0x4b +net.(*conn).Read(0xc000158090, {0xc00029f000, 0x1000, 0x1000}) + /usr/local/go/src/net/net.go:179 +0xad +bufio.(*Reader).Read(0xc00014c780, {0xc00013c259, 0x7, 0x7}) + /usr/local/go/src/bufio/bufio.go:244 +0x4be +io.ReadAtLeast({0x929c40, 0xc00014c780}, {0xc00013c259, 0x7, 0x7}, 0x7) + /usr/local/go/src/io/io.go:335 +0xd0 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +github.com/rabbitmq/amqp091-go.(*reader).ReadFrame(0xc0000a9f18) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/read.go:49 +0x98 +github.com/rabbitmq/amqp091-go.(*Connection).reader(0xc00017e2c0, {0x929fa0?, 0xc000158090}) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:726 +0x2ab +created by github.com/rabbitmq/amqp091-go.Open in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:271 +0x67a + +goroutine 21 [select]: +github.com/rabbitmq/amqp091-go.(*Connection).heartbeater(0xc00017e2c0, 0x1bf08eb00, 0xc00008e660) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:761 +0x26d +created by github.com/rabbitmq/amqp091-go.(*Connection).openTune in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:1016 +0xb8c + +goroutine 58 [IO wait]: +internal/poll.runtime_pollWait(0x7f8b9ce04c78, 0x72) + /usr/local/go/src/runtime/netpoll.go:343 +0x85 +internal/poll.(*pollDesc).wait(0xc00034c120, 0xc000365000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0xb1 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc00034c100, {0xc000365000, 0x1000, 0x1000}) + /usr/local/go/src/internal/poll/fd_unix.go:164 +0x3e5 +net.(*netFD).Read(0xc00034c100, {0xc000365000, 0x1000, 0x1000}) + /usr/local/go/src/net/fd_posix.go:55 +0x4b +net.(*conn).Read(0xc000346030, {0xc000365000, 0x1000, 0x1000}) + /usr/local/go/src/net/net.go:179 +0xad +bufio.(*Reader).Read(0xc000344240, {0xc0003cc6d9, 0x7, 0x7}) + /usr/local/go/src/bufio/bufio.go:244 +0x4be +io.ReadAtLeast({0x929c40, 0xc000344240}, {0xc0003cc6d9, 0x7, 0x7}, 0x7) + /usr/local/go/src/io/io.go:335 +0xd0 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +github.com/rabbitmq/amqp091-go.(*reader).ReadFrame(0xc00016bf18) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/read.go:49 +0x98 +github.com/rabbitmq/amqp091-go.(*Connection).reader(0xc00017e160, {0x929fa0?, 0xc000346030}) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:726 +0x2ab +created by github.com/rabbitmq/amqp091-go.Open in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:271 +0x67a + +goroutine 137 [select]: +github.com/rabbitmq/amqp091-go.(*Connection).heartbeater(0xc000056000, 0x1bf08eb00, 0xc00008e420) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:761 +0x26d +created by github.com/rabbitmq/amqp091-go.(*Connection).openTune in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:1016 +0xb8c + +goroutine 238 [select]: +github.com/rabbitmq/amqp091-go.(*Connection).heartbeater(0xc000056840, 0x1bf08eb00, 0xc00008f3e0) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:761 +0x26d +created by github.com/rabbitmq/amqp091-go.(*Connection).openTune in goroutine 170 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:1016 +0xb8c + +goroutine 198 [select]: +github.com/rabbitmq/amqp091-go.(*Connection).heartbeater(0xc000056160, 0x1bf08eb00, 0xc000183b60) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:761 +0x26d +created by github.com/rabbitmq/amqp091-go.(*Connection).openTune in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:1016 +0xb8c + +goroutine 162 [IO wait]: +internal/poll.runtime_pollWait(0x7f8b9ce04a88, 0x72) + /usr/local/go/src/runtime/netpoll.go:343 +0x85 +internal/poll.(*pollDesc).wait(0xc000002120, 0xc0000d8000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0xb1 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc000002100, {0xc0000d8000, 0x1000, 0x1000}) + /usr/local/go/src/internal/poll/fd_unix.go:164 +0x3e5 +net.(*netFD).Read(0xc000002100, {0xc0000d8000, 0x1000, 0x1000}) + /usr/local/go/src/net/fd_posix.go:55 +0x4b +net.(*conn).Read(0xc000346048, {0xc0000d8000, 0x1000, 0x1000}) + /usr/local/go/src/net/net.go:179 +0xad +bufio.(*Reader).Read(0xc00008e240, {0xc0004db449, 0x7, 0x7}) + /usr/local/go/src/bufio/bufio.go:244 +0x4be +io.ReadAtLeast({0x929c40, 0xc00008e240}, {0xc0004db449, 0x7, 0x7}, 0x7) + /usr/local/go/src/io/io.go:335 +0xd0 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +github.com/rabbitmq/amqp091-go.(*reader).ReadFrame(0xc0003ddf18) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/read.go:49 +0x98 +github.com/rabbitmq/amqp091-go.(*Connection).reader(0xc000056000, {0x929fa0?, 0xc000346048}) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:726 +0x2ab +created by github.com/rabbitmq/amqp091-go.Open in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:271 +0x67a + +goroutine 294 [IO wait]: +internal/poll.runtime_pollWait(0x7f8b9ce04898, 0x72) + /usr/local/go/src/runtime/netpoll.go:343 +0x85 +internal/poll.(*pollDesc).wait(0xc00014a620, 0xc0001c5000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0xb1 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc00014a600, {0xc0001c5000, 0x1000, 0x1000}) + /usr/local/go/src/internal/poll/fd_unix.go:164 +0x3e5 +net.(*netFD).Read(0xc00014a600, {0xc0001c5000, 0x1000, 0x1000}) + /usr/local/go/src/net/fd_posix.go:55 +0x4b +net.(*conn).Read(0xc0002da1d0, {0xc0001c5000, 0x1000, 0x1000}) + /usr/local/go/src/net/net.go:179 +0xad +bufio.(*Reader).Read(0xc0002d98c0, {0xc0003cc459, 0x7, 0x7}) + /usr/local/go/src/bufio/bufio.go:244 +0x4be +io.ReadAtLeast({0x929c40, 0xc0002d98c0}, {0xc0003cc459, 0x7, 0x7}, 0x7) + /usr/local/go/src/io/io.go:335 +0xd0 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +github.com/rabbitmq/amqp091-go.(*reader).ReadFrame(0xc000169f18) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/read.go:49 +0x98 +github.com/rabbitmq/amqp091-go.(*Connection).reader(0xc000056840, {0x929fa0?, 0xc0002da1d0}) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:726 +0x2ab +created by github.com/rabbitmq/amqp091-go.Open in goroutine 170 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:271 +0x67a + +goroutine 77 [select]: +github.com/rabbitmq/amqp091-go.(*Connection).heartbeater(0xc00017e160, 0x1bf08eb00, 0xc000183200) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:761 +0x26d +created by github.com/rabbitmq/amqp091-go.(*Connection).openTune in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:1016 +0xb8c + +goroutine 123 [IO wait]: +internal/poll.runtime_pollWait(0x7f8b9ce04b80, 0x72) + /usr/local/go/src/runtime/netpoll.go:343 +0x85 +internal/poll.(*pollDesc).wait(0xc000128520, 0xc00033d000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0xb1 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc000128500, {0xc00033d000, 0x1000, 0x1000}) + /usr/local/go/src/internal/poll/fd_unix.go:164 +0x3e5 +net.(*netFD).Read(0xc000128500, {0xc00033d000, 0x1000, 0x1000}) + /usr/local/go/src/net/fd_posix.go:55 +0x4b +net.(*conn).Read(0xc00017c110, {0xc00033d000, 0x1000, 0x1000}) + /usr/local/go/src/net/net.go:179 +0xad +bufio.(*Reader).Read(0xc000182e40, {0xc00052b5d9, 0x7, 0x7}) + /usr/local/go/src/bufio/bufio.go:244 +0x4be +io.ReadAtLeast({0x929c40, 0xc000182e40}, {0xc00052b5d9, 0x7, 0x7}, 0x7) + /usr/local/go/src/io/io.go:335 +0xd0 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +github.com/rabbitmq/amqp091-go.(*reader).ReadFrame(0xc0003d9f18) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/read.go:49 +0x98 +github.com/rabbitmq/amqp091-go.(*Connection).reader(0xc00017e6e0, {0x929fa0?, 0xc00017c110}) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:726 +0x2ab +created by github.com/rabbitmq/amqp091-go.Open in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:271 +0x67a + +goroutine 212 [IO wait]: +internal/poll.runtime_pollWait(0x7f8b9ce04990, 0x72) + /usr/local/go/src/runtime/netpoll.go:343 +0x85 +internal/poll.(*pollDesc).wait(0xc000128620, 0xc000241000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0xb1 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc000128600, {0xc000241000, 0x1000, 0x1000}) + /usr/local/go/src/internal/poll/fd_unix.go:164 +0x3e5 +net.(*netFD).Read(0xc000128600, {0xc000241000, 0x1000, 0x1000}) + /usr/local/go/src/net/fd_posix.go:55 +0x4b +net.(*conn).Read(0xc0003461b8, {0xc000241000, 0x1000, 0x1000}) + /usr/local/go/src/net/net.go:179 +0xad +bufio.(*Reader).Read(0xc00014cd80, {0xc000431469, 0x7, 0x7}) + /usr/local/go/src/bufio/bufio.go:244 +0x4be +io.ReadAtLeast({0x929c40, 0xc00014cd80}, {0xc000431469, 0x7, 0x7}, 0x7) + /usr/local/go/src/io/io.go:335 +0xd0 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +github.com/rabbitmq/amqp091-go.(*reader).ReadFrame(0xc0003e3f18) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/read.go:49 +0x98 +github.com/rabbitmq/amqp091-go.(*Connection).reader(0xc000056160, {0x929fa0?, 0xc0003461b8}) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:726 +0x2ab +created by github.com/rabbitmq/amqp091-go.Open in goroutine 19 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:271 +0x67a + +goroutine 308 [sync.Mutex.Lock]: +sync.runtime_SemacquireMutex(0x929960?, 0x0?, 0xc0004252c0?) + /usr/local/go/src/runtime/sema.go:77 +0x25 +sync.(*Mutex).lockSlow(0xc0003c81d8) + /usr/local/go/src/sync/mutex.go:171 +0x213 +sync.(*Mutex).Lock(0xc0003c81d8) + /usr/local/go/src/sync/mutex.go:90 +0x55 +github.com/jxsl13/amqpx/pool.(*Connection).Recover(0xc0003c8160) + /home/behm015/Development/amqpx/pool/connection.go:300 +0x48 +github.com/jxsl13/amqpx/pool.(*Session).recover(0xc00014a100) + /home/behm015/Development/amqpx/pool/session.go:211 +0x46 +github.com/jxsl13/amqpx/pool.(*Session).Recover(0xc00014a100) + /home/behm015/Development/amqpx/pool/session.go:193 +0x89 +github.com/jxsl13/amqpx/pool.(*SessionPool).ReturnSession(0xc00014d260, 0xc00014a100, 0x1) + /home/behm015/Development/amqpx/pool/session_pool.go:155 +0x77 +github.com/jxsl13/amqpx/pool.(*Pool).ReturnSession(...) + /home/behm015/Development/amqpx/pool/pool.go:107 +github.com/jxsl13/amqpx/pool.(*Subscriber).batchConsume.func1() + /home/behm015/Development/amqpx/pool/subscriber.go:393 +0x22c +github.com/jxsl13/amqpx/pool.(*Subscriber).batchConsume(0xc000128780, 0xc00014c240) + /home/behm015/Development/amqpx/pool/subscriber.go:409 +0x6dc +github.com/jxsl13/amqpx/pool.(*Subscriber).batchConsumer(0xc000128780, 0xc00014c240, 0xc0001287e0) + /home/behm015/Development/amqpx/pool/subscriber.go:359 +0x3e5 +created by github.com/jxsl13/amqpx/pool.(*Subscriber).Start in goroutine 170 + /home/behm015/Development/amqpx/pool/subscriber.go:200 +0x5cf + +goroutine 223 [select]: +github.com/rabbitmq/amqp091-go.(*Connection).heartbeater(0xc0000562c0, 0x1bf08eb00, 0xc0002d8480) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:761 +0x26d +created by github.com/rabbitmq/amqp091-go.(*Connection).openTune in goroutine 170 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:1016 +0xb8c + +goroutine 278 [select]: +github.com/rabbitmq/amqp091-go.(*Connection).heartbeater(0xc0000566e0, 0x1bf08eb00, 0xc0001820c0) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:761 +0x26d +created by github.com/rabbitmq/amqp091-go.(*Connection).openTune in goroutine 170 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:1016 +0xb8c + +goroutine 306 [IO wait]: +internal/poll.runtime_pollWait(0x7f8b9ce046a8, 0x72) + /usr/local/go/src/runtime/netpoll.go:343 +0x85 +internal/poll.(*pollDesc).wait(0xc0001285a0, 0xc000262000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0xb1 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc000128580, {0xc000262000, 0x1000, 0x1000}) + /usr/local/go/src/internal/poll/fd_unix.go:164 +0x3e5 +net.(*netFD).Read(0xc000128580, {0xc000262000, 0x1000, 0x1000}) + /usr/local/go/src/net/fd_posix.go:55 +0x4b +net.(*conn).Read(0xc0002da0d0, {0xc000262000, 0x1000, 0x1000}) + /usr/local/go/src/net/net.go:179 +0xad +bufio.(*Reader).Read(0xc00014cf00, {0xc0003cc819, 0x7, 0x7}) + /usr/local/go/src/bufio/bufio.go:244 +0x4be +io.ReadAtLeast({0x929c40, 0xc00014cf00}, {0xc0003cc819, 0x7, 0x7}, 0x7) + /usr/local/go/src/io/io.go:335 +0xd0 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +github.com/rabbitmq/amqp091-go.(*reader).ReadFrame(0xc0003dff18) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/read.go:49 +0x98 +github.com/rabbitmq/amqp091-go.(*Connection).reader(0xc000056580, {0x929fa0?, 0xc0002da0d0}) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:726 +0x2ab +created by github.com/rabbitmq/amqp091-go.Open in goroutine 170 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:271 +0x67a + +goroutine 255 [IO wait]: +internal/poll.runtime_pollWait(0x7f8b9ce047a0, 0x72) + /usr/local/go/src/runtime/netpoll.go:343 +0x85 +internal/poll.(*pollDesc).wait(0xc000128120, 0xc0003ea000?, 0x0) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:84 +0xb1 +internal/poll.(*pollDesc).waitRead(...) + /usr/local/go/src/internal/poll/fd_poll_runtime.go:89 +internal/poll.(*FD).Read(0xc000128100, {0xc0003ea000, 0x1000, 0x1000}) + /usr/local/go/src/internal/poll/fd_unix.go:164 +0x3e5 +net.(*netFD).Read(0xc000128100, {0xc0003ea000, 0x1000, 0x1000}) + /usr/local/go/src/net/fd_posix.go:55 +0x4b +net.(*conn).Read(0xc0002da030, {0xc0003ea000, 0x1000, 0x1000}) + /usr/local/go/src/net/net.go:179 +0xad +bufio.(*Reader).Read(0xc0002d8420, {0xc00046d2a9, 0x7, 0x7}) + /usr/local/go/src/bufio/bufio.go:244 +0x4be +io.ReadAtLeast({0x929c40, 0xc0002d8420}, {0xc00046d2a9, 0x7, 0x7}, 0x7) + /usr/local/go/src/io/io.go:335 +0xd0 +io.ReadFull(...) + /usr/local/go/src/io/io.go:354 +github.com/rabbitmq/amqp091-go.(*reader).ReadFrame(0xc0000abf18) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/read.go:49 +0x98 +github.com/rabbitmq/amqp091-go.(*Connection).reader(0xc0000562c0, {0x929fa0?, 0xc0002da030}) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:726 +0x2ab +created by github.com/rabbitmq/amqp091-go.Open in goroutine 170 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:271 +0x67a + +goroutine 307 [select]: +github.com/rabbitmq/amqp091-go.(*Connection).heartbeater(0xc000056580, 0x1bf08eb00, 0xc000345da0) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:761 +0x26d +created by github.com/rabbitmq/amqp091-go.(*Connection).openTune in goroutine 170 + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:1016 +0xb8c + +goroutine 310 [sync.Mutex.Lock]: +sync.runtime_SemacquireMutex(0x4b3cce?, 0xd8?, 0x4a9c69?) + /usr/local/go/src/runtime/sema.go:77 +0x25 +sync.(*Mutex).lockSlow(0xc0003c81d8) + /usr/local/go/src/sync/mutex.go:171 +0x213 +sync.(*Mutex).Lock(0xc0003c81d8) + /usr/local/go/src/sync/mutex.go:90 +0x55 +github.com/jxsl13/amqpx/pool.(*Connection).channel(0xc0003c8160) + /home/behm015/Development/amqpx/pool/connection.go:356 +0x59 +github.com/jxsl13/amqpx/pool.(*Session).connect(0xc000128700) + /home/behm015/Development/amqpx/pool/session.go:164 +0x156 +github.com/jxsl13/amqpx/pool.(*Session).recover(0xc000128700) + /home/behm015/Development/amqpx/pool/session.go:219 +0x55 +github.com/jxsl13/amqpx/pool.(*Session).Recover(0xc000128700) + /home/behm015/Development/amqpx/pool/session.go:193 +0x89 +github.com/jxsl13/amqpx/pool.(*SessionPool).ReturnSession(0xc00014d260, 0xc000128700, 0x1) + /home/behm015/Development/amqpx/pool/session_pool.go:155 +0x77 +github.com/jxsl13/amqpx/pool.(*Pool).ReturnSession(...) + /home/behm015/Development/amqpx/pool/pool.go:107 +github.com/jxsl13/amqpx/pool.(*Subscriber).batchConsume.func1() + /home/behm015/Development/amqpx/pool/subscriber.go:393 +0x22c +github.com/jxsl13/amqpx/pool.(*Subscriber).batchConsume(0xc000128780, 0xc00014c300) + /home/behm015/Development/amqpx/pool/subscriber.go:409 +0x6dc +github.com/jxsl13/amqpx/pool.(*Subscriber).batchConsumer(0xc000128780, 0xc00014c300, 0xc0001287e0) + /home/behm015/Development/amqpx/pool/subscriber.go:359 +0x3e5 +created by github.com/jxsl13/amqpx/pool.(*Subscriber).Start in goroutine 170 + /home/behm015/Development/amqpx/pool/subscriber.go:200 +0x5cf + +goroutine 309 [runnable]: +github.com/rabbitmq/amqp091-go.(*allocator).reserve(0xc000505fa0, 0x6d0) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/allocator.go:103 +0x105 +github.com/rabbitmq/amqp091-go.(*allocator).next(0xc000505fa0) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/allocator.go:84 +0x176 +github.com/rabbitmq/amqp091-go.(*Connection).allocateChannel(0xc000056580) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:819 +0xf6 +github.com/rabbitmq/amqp091-go.(*Connection).openChannel(0xc0003c81d8?) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:847 +0x33 +github.com/rabbitmq/amqp091-go.(*Connection).Channel(...) + /home/behm015/go/pkg/mod/github.com/rabbitmq/amqp091-go@v1.9.0/connection.go:873 +github.com/jxsl13/amqpx/pool.(*Connection).channel(0xc0003c8160) + /home/behm015/Development/amqpx/pool/connection.go:358 +0xb6 +github.com/jxsl13/amqpx/pool.(*Session).connect(0xc00014a180) + /home/behm015/Development/amqpx/pool/session.go:164 +0x156 +github.com/jxsl13/amqpx/pool.(*Session).recover(0xc00014a180) + /home/behm015/Development/amqpx/pool/session.go:219 +0x55 +github.com/jxsl13/amqpx/pool.(*Session).Recover(0xc00014a180) + /home/behm015/Development/amqpx/pool/session.go:193 +0x89 +github.com/jxsl13/amqpx/pool.(*SessionPool).ReturnSession(0xc00014d260, 0xc00014a180, 0x1) + /home/behm015/Development/amqpx/pool/session_pool.go:155 +0x77 +github.com/jxsl13/amqpx/pool.(*Pool).ReturnSession(...) + /home/behm015/Development/amqpx/pool/pool.go:107 +github.com/jxsl13/amqpx/pool.(*Subscriber).batchConsume.func1() + /home/behm015/Development/amqpx/pool/subscriber.go:393 +0x22c +github.com/jxsl13/amqpx/pool.(*Subscriber).batchConsume(0xc000128780, 0xc00014c2a0) + /home/behm015/Development/amqpx/pool/subscriber.go:409 +0x6dc +github.com/jxsl13/amqpx/pool.(*Subscriber).batchConsumer(0xc000128780, 0xc00014c2a0, 0xc0001287e0) + /home/behm015/Development/amqpx/pool/subscriber.go:359 +0x3e5 +created by github.com/jxsl13/amqpx/pool.(*Subscriber).Start in goroutine 170 + /home/behm015/Development/amqpx/pool/subscriber.go:200 +0x5cf +FAIL github.com/jxsl13/amqpx 600.024s +FAIL \ No newline at end of file diff --git a/Makefile b/Makefile index 281401c..22ff27e 100644 --- a/Makefile +++ b/Makefile @@ -4,5 +4,8 @@ environment: docker-compose up -d +down: + docker-compose down + test: - go test -v -race -count=1 ./... \ No newline at end of file + go test -v -race -count=1 ./... diff --git a/README.md b/README.md index 3e49334..67768a3 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ - connection & session (channel) pooling - reconnect handling - batch processing +- pause/resume consumers - clean shutdown handling - sane defaults - resilience & robustness over performance by default (publisher & subscriber acks) @@ -37,6 +38,7 @@ import ( "os/signal" "github.com/jxsl13/amqpx" + "github.com/jxsl13/amqpx/pool" "github.com/jxsl13/amqpx/logging" ) @@ -44,21 +46,21 @@ func main() { ctx, cancel := signal.NotifyContext(context.Background()) defer cancel() - amqpx.RegisterTopologyCreator(func(t *amqpx.Topologer) error { + amqpx.RegisterTopologyCreator(func(t *pool.Topologer) error { // error handling omitted for brevity t.ExchangeDeclare("example-exchange", "topic") // durable exchange by default t.QueueDeclare("example-queue") // durable quorum queue by default t.QueueBind("example-queue", "route.name.v1.event", "example-exchange") return nil }) - amqpx.RegisterTopologyDeleter(func(t *amqpx.Topologer) error { + amqpx.RegisterTopologyDeleter(func(t *pool.Topologer) error { // error handling omitted for brevity t.QueueDelete("example-queue") t.ExchangeDelete("example-exchange") return nil }) - amqpx.RegisterHandler("example-queue", func(msg amqpx.Delivery) error { + amqpx.RegisterHandler("example-queue", func(msg pool.Delivery) error { fmt.Println("received message:", string(msg.Body)) fmt.Println("canceling context") cancel() @@ -73,7 +75,7 @@ func main() { ) defer amqpx.Close() - amqpx.Publish("example-exchange", "route.name.v1.event", amqpx.Publishing{ + amqpx.Publish("example-exchange", "route.name.v1.event", pool.Publishing{ ContentType: "application/json", Body: []byte("my test event"), }) @@ -95,6 +97,7 @@ import ( "os/signal" "github.com/jxsl13/amqpx" + "github.com/jxsl13/amqpx/pool" "github.com/jxsl13/amqpx/logging" ) @@ -113,18 +116,18 @@ func main() { ctx, cancel := signal.NotifyContext(context.Background()) defer cancel() - amqpx.RegisterTopologyCreator(func(t *amqpx.Topologer) error { + amqpx.RegisterTopologyCreator(func(t *pool.Topologer) error { // error handling omitted for brevity t.ExchangeDeclare("example-exchange", "topic", - amqpx.ExchangeDeclareOptions{ + pool.ExchangeDeclareOptions{ Durable: true, }, ) t.QueueDeclare("example-queue", - amqpx.QueueDeclareOptions{ + pool.QueueDeclareOptions{ Durable: true, - Args: amqpx.QuorumQueue, + Args: pool.QuorumQueue, }, ) t.QueueBind("example-queue", "route.name.v1.event", "example-exchange") @@ -139,7 +142,7 @@ func main() { amqpx.RegisterHandler("example-queue", ExampleConsumer(cancel), - amqpx.ConsumeOptions{ + pool.ConsumeOptions{ ConsumerTag: "example-queue-cunsumer", Exclusive: true, }, diff --git a/amqpx.go b/amqpx.go index 692cd5d..4d53a9c 100644 --- a/amqpx.go +++ b/amqpx.go @@ -1,13 +1,14 @@ package amqpx import ( + "context" + "errors" "fmt" "strings" "sync" "time" "github.com/jxsl13/amqpx/pool" - "github.com/rabbitmq/amqp091-go" ) var ( @@ -16,37 +17,7 @@ var ( ) type ( - Table = amqp091.Table - - Topologer = pool.Topologer - TopologyFunc func(*Topologer) error - - Delivery = amqp091.Delivery - HandlerFunc = func(Delivery) error - BatchHandlerFunc = func([]Delivery) error - - ConsumeOptions = pool.ConsumeOptions - Publishing = pool.Publishing - - ExchangeDeclareOptions = pool.ExchangeDeclareOptions - ExchangeDeleteOptions = pool.ExchangeDeleteOptions - ExchangeBindOptions = pool.ExchangeBindOptions - ExchangeUnbindOptions = pool.ExchangeUnbindOptions - - QueueDeclareOptions = pool.QueueDeclareOptions - QueueDeleteOptions = pool.QueueDeleteOptions - QueueBindOptions = pool.QueueBindOptions -) - -var ( - // QuorumQueue is the argument you need to pass in order to create a quorum queue. - QuorumQueue = pool.QuorumQueue -) - -const ( - // DeadLetterExchangeKey can be used in order to create a dead letter exchange - // https://www.rabbitmq.com/dlx.html - DeadLetterExchangeKey = "x-dead-letter-exchange" + TopologyFunc func(*pool.Topologer) error ) type AMQPX struct { @@ -55,12 +26,14 @@ type AMQPX struct { sub *pool.Subscriber mu sync.Mutex - handlers []pool.Handler - batchHandlers []pool.BatchHandler + handlers []*pool.Handler + batchHandlers []*pool.BatchHandler topologies []TopologyFunc topologyDeleters []TopologyFunc + closeTimeout time.Duration + startOnce sync.Once closeOnce sync.Once } @@ -71,13 +44,34 @@ func New() *AMQPX { pub: nil, sub: nil, - handlers: make([]pool.Handler, 0), - batchHandlers: make([]pool.BatchHandler, 0), + handlers: make([]*pool.Handler, 0), + batchHandlers: make([]*pool.BatchHandler, 0), topologies: make([]TopologyFunc, 0), topologyDeleters: make([]TopologyFunc, 0), } } +// Reset closes the current package and resets its state before it was initialized and started. +func (a *AMQPX) Reset() error { + a.mu.Lock() + defer a.mu.Unlock() + err := a.close() + + a.pubPool = nil + a.pub = nil + a.sub = nil + + a.handlers = make([]*pool.Handler, 0) + a.batchHandlers = make([]*pool.BatchHandler, 0) + + a.topologies = make([]TopologyFunc, 0) + a.topologyDeleters = make([]TopologyFunc, 0) + + a.startOnce = sync.Once{} + a.closeOnce = sync.Once{} + return err +} + // NewURL creates a new connection string for the NewSessionFactory // hostname: e.g. localhost // port: e.g. 5672 @@ -120,29 +114,24 @@ func (a *AMQPX) RegisterTopologyDeleter(finalizer TopologyFunc) { // RegisterHandler registers a handler function for a specific queue. // consumer can be set to a unique consumer name (if left empty, a unique name will be generated) -func (a *AMQPX) RegisterHandler(queue string, handlerFunc HandlerFunc, option ...ConsumeOptions) { - if handlerFunc == nil { - panic("handlerFunc must not be nil") - } - +func (a *AMQPX) RegisterHandler(queue string, handlerFunc pool.HandlerFunc, option ...pool.ConsumeOptions) *pool.Handler { a.mu.Lock() defer a.mu.Unlock() - o := ConsumeOptions{} + o := pool.ConsumeOptions{} if len(option) > 0 { o = option[0] } - a.handlers = append(a.handlers, pool.Handler{ - Queue: queue, - ConsumeOptions: o, - HandlerFunc: handlerFunc, - }) + handler := pool.NewHandler(queue, handlerFunc, o) + a.handlers = append(a.handlers, handler) + return handler } // RegisterBatchHandler registers a handler function for a specific queue that processes batches. // consumer can be set to a unique consumer name (if left empty, a unique name will be generated) -func (a *AMQPX) RegisterBatchHandler(queue string, maxBatchSize int, flushTimeout time.Duration, handlerFunc BatchHandlerFunc, option ...ConsumeOptions) { +func (a *AMQPX) RegisterBatchHandler(queue string, handlerFunc pool.BatchHandlerFunc, option ...pool.BatchHandlerOption) *pool.BatchHandler { + // maxBatchSize int, flushTimeout time.Duration, if handlerFunc == nil { panic("handlerFunc must not be nil") } @@ -150,18 +139,9 @@ func (a *AMQPX) RegisterBatchHandler(queue string, maxBatchSize int, flushTimeou a.mu.Lock() defer a.mu.Unlock() - o := ConsumeOptions{} - if len(option) > 0 { - o = option[0] - } - - a.batchHandlers = append(a.batchHandlers, pool.BatchHandler{ - Queue: queue, - MaxBatchSize: maxBatchSize, - FlushTimeout: flushTimeout, - ConsumeOptions: o, - HandlerFunc: handlerFunc, - }) + handler := pool.NewBatchHandler(queue, handlerFunc, option...) + a.batchHandlers = append(a.batchHandlers, handler) + return handler } // Start starts the subscriber and publisher pools. @@ -185,12 +165,17 @@ func (a *AMQPX) Start(connectUrl string, options ...Option) (err error) { PublisherConnections: 1, PublisherSessions: 10, SubscriberConnections: 1, + CloseTimeout: 15 * time.Second, } for _, o := range options { o(&option) } + // affects the topology deleter when close is called + // which stops deleting or reconnecting after the timeout + a.closeTimeout = option.CloseTimeout + // publisher and subscriber need to have different tcp connections (tcp pushback prevention) a.pubPool, err = pool.New( connectUrl, @@ -226,10 +211,13 @@ func (a *AMQPX) Start(connectUrl string, options ...Option) (err error) { // create subscriber pool in case handlers were registered requiredHandlers := len(a.handlers) + len(a.batchHandlers) if requiredHandlers > 0 { - sessions := requiredHandlers - connections := option.SubscriberConnections - if connections < 0 || sessions < connections { - connections = sessions + var ( + sessions = requiredHandlers + connections = requiredHandlers + ) + + if option.SubscriberConnections > connections { + connections = option.SubscriberConnections } // subscriber needs as many channels as there are handler functions @@ -247,14 +235,19 @@ func (a *AMQPX) Start(connectUrl string, options ...Option) (err error) { if err != nil { return } - a.sub = pool.NewSubscriber(subPool, pool.SubscriberWithAutoClosePool(true)) + a.sub = pool.NewSubscriber(subPool, + pool.SubscriberWithAutoClosePool(true), + ) for _, h := range a.handlers { a.sub.RegisterHandler(h) } for _, bh := range a.batchHandlers { a.sub.RegisterBatchHandler(bh) } - a.sub.Start() + err = a.sub.Start() + if err != nil { + return + } } }) return err @@ -277,16 +270,21 @@ func (a *AMQPX) close() (err error) { a.pub.Close() } - if a.pubPool != nil { - topologer := pool.NewTopologer(a.pubPool) + if a.pubPool != nil && len(a.topologyDeleters) > 0 { + ctx, cancel := context.WithTimeout(context.Background(), a.closeTimeout) + defer cancel() + + topologer := pool.NewTopologer( + a.pubPool, + pool.TopologerWithContext(ctx), + pool.TopologerWithTransientSessions(true), + ) for _, f := range a.topologyDeleters { - e := f(topologer) - if e != nil { - err = e - return - } + err = errors.Join(err, f(topologer)) } + } + if a.pubPool != nil { // finally close the publisher pool // which is also used for topology. a.pubPool.Close() @@ -296,7 +294,8 @@ func (a *AMQPX) close() (err error) { } // Publish a message to a specific exchange with a given routingKey. -func (a *AMQPX) Publish(exchange string, routingKey string, msg Publishing) error { +// You may set exchange to "" and routingKey to your queue name in order to publish directly to a queue. +func (a *AMQPX) Publish(exchange string, routingKey string, msg pool.Publishing) error { a.mu.Lock() defer a.mu.Unlock() if a.pub == nil { @@ -307,7 +306,7 @@ func (a *AMQPX) Publish(exchange string, routingKey string, msg Publishing) erro } // Get is only supposed to be used for testing, do not use get for polling any broker queues. -func (a *AMQPX) Get(queue string, autoAck bool) (msg *Delivery, ok bool, err error) { +func (a *AMQPX) Get(queue string, autoAck bool) (msg pool.Delivery, ok bool, err error) { a.mu.Lock() defer a.mu.Unlock() if a.pub == nil { @@ -318,24 +317,6 @@ func (a *AMQPX) Get(queue string, autoAck bool) (msg *Delivery, ok bool, err err return a.pub.Get(queue, autoAck) } -// Reset closes the current package and resets its state before it was initialized and started. -func (a *AMQPX) Reset() { - a.mu.Lock() - defer a.mu.Unlock() - a.close() - - a.pubPool = nil - a.pub = nil - a.sub = nil - - a.handlers = make([]pool.Handler, 0) - a.topologies = make([]TopologyFunc, 0) - a.topologyDeleters = make([]TopologyFunc, 0) - - a.startOnce = sync.Once{} - a.closeOnce = sync.Once{} -} - // RegisterTopology registers a topology creating function that is called upon // Start. The creation of topologie sis the first step before any publisher or subscriber is started. func RegisterTopologyCreator(topology TopologyFunc) { @@ -350,8 +331,16 @@ func RegisterTopologyDeleter(finalizer TopologyFunc) { // RegisterHandler registers a handler function for a specific queue. // consumer can be set to a unique consumer name (if left empty, a unique name will be generated) -func RegisterHandler(queue string, handlerFunc HandlerFunc, option ...ConsumeOptions) { - amqpx.RegisterHandler(queue, handlerFunc, option...) +// The returned handler can be used to pause message processing and resume paused processing. +// The processing must have been started with Start before it can be paused or resumed. +func RegisterHandler(queue string, handlerFunc pool.HandlerFunc, option ...pool.ConsumeOptions) *pool.Handler { + return amqpx.RegisterHandler(queue, handlerFunc, option...) +} + +// RegisterBatchHandler registers a handler function for a specific queue that processes batches. +// consumer can be set to a unique consumer name (if left empty, a unique name will be generated) +func RegisterBatchHandler(queue string, handlerFunc pool.BatchHandlerFunc, option ...pool.BatchHandlerOption) *pool.BatchHandler { + return amqpx.RegisterBatchHandler(queue, handlerFunc, option...) } // Start starts the subscriber and publisher pools. @@ -371,16 +360,17 @@ func Close() error { } // Publish a message to a specific exchange with a given routingKey. -func Publish(exchange string, routingKey string, msg Publishing) error { +// You may set exchange to "" and routingKey to your queue name in order to publish directly to a queue. +func Publish(exchange string, routingKey string, msg pool.Publishing) error { return amqpx.Publish(exchange, routingKey, msg) } // Get is only supposed to be used for testing, do not use get for polling any broker queues. -func Get(queue string, autoAck bool) (msg *Delivery, ok bool, err error) { +func Get(queue string, autoAck bool) (msg pool.Delivery, ok bool, err error) { return amqpx.Get(queue, autoAck) } // Reset closes the current package and resets its state before it was initialized and started. -func Reset() { - amqpx.Reset() +func Reset() error { + return amqpx.Reset() } diff --git a/amqpx_options.go b/amqpx_options.go index 34ba71c..0cdf1cf 100644 --- a/amqpx_options.go +++ b/amqpx_options.go @@ -17,6 +17,8 @@ type option struct { PublisherConnections int PublisherSessions int SubscriberConnections int + + CloseTimeout time.Duration } type Option func(*option) @@ -142,3 +144,16 @@ func WithPoolOption(po pool.Option) Option { o.PoolOptions = append(o.PoolOptions, po) } } + +// WithCloseTimeout affects the duration that the topology deleter functions are allowed to delete topologies. +// This timeout is especially interesting for containerized environments where containers may potentionally be killed after +// a specific timeout. To we want to cancel deletion operations before those hard kill comes into play. +func WithCloseTimeout(timeout time.Duration) Option { + return func(o *option) { + if timeout <= 0 { + o.CloseTimeout = 15 * time.Second + } else { + o.CloseTimeout = timeout + } + } +} diff --git a/amqpx_test.go b/amqpx_test.go index 0180263..68061b8 100644 --- a/amqpx_test.go +++ b/amqpx_test.go @@ -2,7 +2,9 @@ package amqpx_test import ( "context" + "fmt" "os/signal" + "sync" "syscall" "testing" "time" @@ -11,109 +13,79 @@ import ( "github.com/jxsl13/amqpx/logging" "github.com/jxsl13/amqpx/pool" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.uber.org/goleak" ) +var ( + connectURL = amqpx.NewURL("localhost", 5672, "admin", "password") +) + +// WARNING: Do not assert consumer counts, as those values are too flaky and break tests all over th eplace func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) + goleak.VerifyTestMain( + m, + goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), + goleak.IgnoreTopFunction("github.com/rabbitmq/amqp091-go.(*Connection).heartbeater"), + goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), + ) } -func createTopology(t *amqpx.Topologer) error { - // documentation: https://www.cloudamqp.com/blog/part4-rabbitmq-for-beginners-exchanges-routing-keys-bindings.html#:~:text=The%20routing%20key%20is%20a%20message%20attribute%20added%20to%20the,routing%20key%20of%20the%20message. - err := t.ExchangeDeclare("exchange-01", "topic") - if err != nil { - return err - } - - err = t.QueueDeclare("queue-01") - if err != nil { - return err - } - - err = t.QueueBind("queue-01", "event-01", "exchange-01") - if err != nil { - return err - } - - err = t.ExchangeDeclare("exchange-02", "topic") - if err != nil { - return err - } - - err = t.QueueDeclare("queue-02") - if err != nil { - return err - } - - err = t.QueueBind("queue-02", "event-02", "exchange-02") - if err != nil { - return err - } +func TestExchangeDeclarePassive(t *testing.T) { + defer amqpx.Reset() - err = t.ExchangeDeclare("exchange-03", "topic") - if err != nil { - return err - } + eName := "exchange-01" + var err error + amqpx.RegisterTopologyCreator(func(t *pool.Topologer) error { + return createExchange(eName, t) + }) - err = t.QueueDeclare("queue-03") - if err != nil { - return err - } + amqpx.RegisterTopologyDeleter(func(t *pool.Topologer) error { + return deleteExchange(eName, t) + }) - err = t.QueueBind("queue-03", "event-03", "exchange-03") - if err != nil { - return err - } - return nil + err = amqpx.Start( + connectURL, + amqpx.WithLogger(logging.NewTestLogger(t)), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(2), + ) + assert.NoError(t, err) } -func deleteTopology(t *amqpx.Topologer) error { - _, err := t.QueueDelete("queue-01") - if err != nil { - return err - } - - _, err = t.QueueDelete("queue-02") - if err != nil { - return err - } - - _, err = t.QueueDelete("queue-03") - if err != nil { - return err - } - - err = t.ExchangeDelete("exchange-01") - if err != nil { - return err - } +func TestQueueDeclarePassive(t *testing.T) { + defer amqpx.Reset() - err = t.ExchangeDelete("exchange-02") - if err != nil { - return err - } + qName := "queue-01" + var err error + amqpx.RegisterTopologyCreator(func(t *pool.Topologer) error { + return createQueue(qName, t) + }) - err = t.ExchangeDelete("exchange-03") - if err != nil { - return err - } + amqpx.RegisterTopologyDeleter(func(t *pool.Topologer) error { + return deleteQueue(qName, t) + }) - return nil + err = amqpx.Start( + connectURL, + amqpx.WithLogger(logging.NewTestLogger(t)), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(2), + ) + assert.NoError(t, err) } func TestAMQPXPub(t *testing.T) { - log := logging.NewTestLogger(t) defer amqpx.Reset() amqpx.RegisterTopologyCreator(createTopology) amqpx.RegisterTopologyDeleter(deleteTopology) err := amqpx.Start( - amqpx.NewURL("localhost", 5672, "admin", "password"), - amqpx.WithLogger(log), + connectURL, + amqpx.WithLogger(logging.NewNoOpLogger()), amqpx.WithPublisherConnections(1), amqpx.WithPublisherSessions(2), - amqpx.WithPoolOption(pool.WithSlowClose(true)), // needed for goroutine leak tests ) if err != nil { assert.NoError(t, err) @@ -128,7 +100,7 @@ func TestAMQPXPub(t *testing.T) { event := "TestAMQPXPub - event content" // publish event to first queue - err = amqpx.Publish("exchange-01", "event-01", amqpx.Publishing{ + err = amqpx.Publish("exchange-01", "event-01", pool.Publishing{ ContentType: "application/json", Body: []byte(event), }) @@ -138,7 +110,7 @@ func TestAMQPXPub(t *testing.T) { } var ( - msg *amqpx.Delivery + msg pool.Delivery ok bool ) for i := 0; i < 20; i++ { @@ -154,8 +126,7 @@ func TestAMQPXPub(t *testing.T) { break } - if msg == nil || !ok { - assert.NotNil(t, msg) + if !ok { assert.True(t, ok) return } @@ -175,18 +146,17 @@ func TestAMQPXSubAndPub(t *testing.T) { eventContent := "TestAMQPXSubAndPub - event content" - amqpx.RegisterHandler("queue-01", func(msg amqpx.Delivery) error { + amqpx.RegisterHandler("queue-01", func(msg pool.Delivery) error { log.Info("subscriber of queue-01") cancel() return nil }) err := amqpx.Start( - amqpx.NewURL("localhost", 5672, "admin", "password"), - amqpx.WithLogger(log), + connectURL, + amqpx.WithLogger(logging.NewNoOpLogger()), amqpx.WithPublisherConnections(1), amqpx.WithPublisherSessions(5), - amqpx.WithPoolOption(pool.WithSlowClose(true)), // needed for goroutine leaks tests ) if err != nil { assert.NoError(t, err) @@ -195,7 +165,7 @@ func TestAMQPXSubAndPub(t *testing.T) { // publish event to first queue - err = amqpx.Publish("exchange-01", "event-01", amqpx.Publishing{ + err = amqpx.Publish("exchange-01", "event-01", pool.Publishing{ ContentType: "application/json", Body: []byte(eventContent), }) @@ -224,27 +194,28 @@ func TestAMQPXSubAndPubMulti(t *testing.T) { eventContent := "TestAMQPXSubAndPub - event content" // publish -> queue-01 -> subscriber-01 -> queue-02 -> subscriber-02 -> queue-03 -> subscriber-03 -> cancel context - amqpx.RegisterHandler("queue-01", func(msg amqpx.Delivery) error { + amqpx.RegisterHandler("queue-01", func(msg pool.Delivery) error { log.Info("handler of subscriber-01") - err := amqpx.Publish("exchange-02", "event-02", amqpx.Publishing{ + err := amqpx.Publish("exchange-02", "event-02", pool.Publishing{ ContentType: msg.ContentType, Body: msg.Body, }) if err != nil { log.Error("subscriber-01:", err) + assert.NoError(t, err) } return nil }, - amqpx.ConsumeOptions{ConsumerTag: "subscriber-01"}, + pool.ConsumeOptions{ConsumerTag: "subscriber-01"}, ) - amqpx.RegisterHandler("queue-02", func(msg amqpx.Delivery) error { + amqpx.RegisterHandler("queue-02", func(msg pool.Delivery) error { log.Info("handler of subscriber-02") - err := amqpx.Publish("exchange-03", "event-03", amqpx.Publishing{ + err := amqpx.Publish("exchange-03", "event-03", pool.Publishing{ ContentType: msg.ContentType, Body: msg.Body, }) @@ -254,20 +225,19 @@ func TestAMQPXSubAndPubMulti(t *testing.T) { } return nil - }, amqpx.ConsumeOptions{ConsumerTag: "subscriber-02"}) + }, pool.ConsumeOptions{ConsumerTag: "subscriber-02"}) - amqpx.RegisterHandler("queue-03", func(msg amqpx.Delivery) error { + amqpx.RegisterHandler("queue-03", func(msg pool.Delivery) error { log.Info("handler of subscriber-03: canceling context!") cancel() return nil - }, amqpx.ConsumeOptions{ConsumerTag: "subscriber-03"}) + }, pool.ConsumeOptions{ConsumerTag: "subscriber-03"}) err := amqpx.Start( - amqpx.NewURL("localhost", 5672, "admin", "password"), + connectURL, amqpx.WithLogger(log), amqpx.WithPublisherConnections(1), amqpx.WithPublisherSessions(5), - amqpx.WithPoolOption(pool.WithSlowClose(true)), // needed for goroutine leak tests ) if err != nil { assert.NoError(t, err) @@ -276,7 +246,54 @@ func TestAMQPXSubAndPubMulti(t *testing.T) { // publish event to first queue - err = amqpx.Publish("exchange-01", "event-01", amqpx.Publishing{ + err = amqpx.Publish("exchange-01", "event-01", pool.Publishing{ + ContentType: "application/json", + Body: []byte(eventContent), + }) + if err != nil { + assert.NoError(t, err) + return + } + + // will be canceled when the event has reache dthe third handler + <-ctx.Done() + log.Info("context canceled, closing test.") + err = amqpx.Close() + assert.NoError(t, err) +} + +func TestAMQPXSubHandler(t *testing.T) { + log := logging.NewTestLogger(t) + defer amqpx.Reset() + + amqpx.RegisterTopologyCreator(createTopology) + amqpx.RegisterTopologyDeleter(deleteTopology) + + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGINT) + defer cancel() + + eventContent := "TestAMQPXSubAndPub - event content" + + amqpx.RegisterHandler("queue-01", func(msg pool.Delivery) error { + log.Info("subscriber of queue-01") + cancel() + return nil + }) + + err := amqpx.Start( + connectURL, + amqpx.WithLogger(logging.NewNoOpLogger()), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(5), + ) + if err != nil { + assert.NoError(t, err) + return + } + + // publish event to first queue + + err = amqpx.Publish("exchange-01", "event-01", pool.Publishing{ ContentType: "application/json", Body: []byte(eventContent), }) @@ -291,3 +308,568 @@ func TestAMQPXSubAndPubMulti(t *testing.T) { err = amqpx.Close() assert.NoError(t, err) } + +func TestCreateDeleteTopology(t *testing.T) { + log := logging.NewTestLogger(t) + defer amqpx.Reset() + + amqpx.RegisterTopologyCreator(createTopology) + amqpx.RegisterTopologyDeleter(deleteTopology) + + err := amqpx.Start( + connectURL, + amqpx.WithLogger(log), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(2), + ) + assert.NoError(t, err) +} + +func TestPauseResumeHandlerNoProcessing(t *testing.T) { + var err error + queueName := "testPauseResumeHandler-01" + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGINT) + defer cancel() + + log := logging.NewTestLogger(t) + + amqp := amqpx.New() + amqp.RegisterTopologyCreator(func(t *pool.Topologer) error { + _, err := t.QueueDeclare(queueName) + if err != nil { + return err + } + return nil + }) + + amqp.RegisterTopologyDeleter(func(t *pool.Topologer) error { + _, err := t.QueueDelete(queueName) + if err != nil { + return err + } + return nil + }) + + handler := amqp.RegisterHandler(queueName, func(d pool.Delivery) error { + log.Info("received message") + return nil + }) + + err = amqp.Start(connectURL, + amqpx.WithLogger( + logging.NewNoOpLogger(), + ), + amqpx.WithContext(ctx), + ) + if err != nil { + assert.NoError(t, err) + return + } + defer func() { + err = amqp.Close() + assert.NoError(t, err) + }() + + for i := 0; i < 5; i++ { + t.Logf("iteration %d", i) + assertActive(t, handler, true) + + err = handler.Pause(context.Background()) + if err != nil { + assert.NoError(t, err) + return + } + + assertActive(t, handler, false) + + err = handler.Resume(context.Background()) + if err != nil { + assert.NoError(t, err) + return + } + + assertActive(t, handler, true) + } +} + +func TestHandlerPauseAndResume(t *testing.T) { + for i := 0; i < 10; i++ { + t.Logf("iteration %d", i) + testHandlerPauseAndResume(t) + } +} + +func testHandlerPauseAndResume(t *testing.T) { + var err error + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGINT) + defer cancel() + + log := logging.NewTestLogger(t) + defer func() { + assert.NoError(t, amqpx.Reset()) + }() + + options := []amqpx.Option{ + amqpx.WithLogger(logging.NewNoOpLogger()), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(5), + } + + amqpxPublish := amqpx.New() + amqpxPublish.RegisterTopologyCreator(createTopology) + + eventContent := "TestHandlerPauseAndResume - event content" + + var ( + publish = 500 + cnt = 0 + ) + + // step 1 - fill queue with messages + amqpx.RegisterTopologyDeleter(deleteTopology) + err = amqpxPublish.Start(connectURL, options...) + require.NoError(t, err) + + // fill queue with messages + for i := 0; i < publish; i++ { + err := amqpxPublish.Publish("exchange-01", "event-01", pool.Publishing{ + ContentType: "application/json", + Body: []byte(fmt.Sprintf("%s: message number %d", eventContent, i)), + }) + if err != nil { + assert.NoError(t, err) + return + } + } + + // step 2 - process messages, pause, wait, resume, process rest, cancel context + handler01 := amqpx.RegisterHandler("queue-01", func(msg pool.Delivery) (err error) { + cnt++ + if cnt == publish/3 || cnt == publish/3*2 { + err = amqpx.Publish("exchange-02", "event-02", pool.Publishing{ + ContentType: "application/json", + Body: []byte(fmt.Sprintf("%s: hit %d messages, toggling processing", eventContent, cnt)), + }) + assert.NoError(t, err) + } + + return nil + }) + + running := true + amqpx.RegisterHandler("queue-02", func(msg pool.Delivery) (err error) { + log.Infof("received toggle request: %s", string(msg.Body)) + queue := handler01.Queue() + + if running { + running = false + + assertActive(t, handler01, true) + + err = handler01.Pause(context.Background()) + assert.NoError(t, err) + + assertActive(t, handler01, false) + } else { + running = true + + assertActive(t, handler01, false) + + err = handler01.Resume(context.Background()) + assert.NoError(t, err) + log.Infof("resumed processing of %s", queue) + + assertActive(t, handler01, true) + + // trigger cancelation + err = amqpxPublish.Publish("exchange-03", "event-03", pool.Publishing{ + ContentType: "application/json", + Body: []byte(fmt.Sprintf("%s: delayed toggle back", eventContent)), + }) + assert.NoError(t, err) + } + return nil + }) + + var once sync.Once + amqpx.RegisterHandler("queue-03", func(msg pool.Delivery) (err error) { + once.Do(func() { + + log.Info("pausing handler") + assertActive(t, handler01, true) + err = handler01.Pause(context.Background()) + if err != nil { + assert.NoError(t, err) + return + } + assertActive(t, handler01, false) + + go func() { + // delay cancelation (due to ack) + time.Sleep(3 * time.Second) + cancel() + }() + }) + return nil + }) + + assertActive(t, handler01, false) + + err = amqpx.Start( + connectURL, + amqpx.WithLogger(logging.NewNoOpLogger()), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(5), + ) + if err != nil { + assert.NoError(t, err) + return + } + + // will be canceled when the event has reache dthe third handler + <-ctx.Done() + log.Info("context canceled, closing test.") + assert.NoError(t, amqpx.Close()) + assertActive(t, handler01, false) +} + +func TestBatchHandlerPauseAndResume(t *testing.T) { + for i := 0; i < 10; i++ { + testBatchHandlerPauseAndResume(t) + } +} + +func testBatchHandlerPauseAndResume(t *testing.T) { + var err error + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGINT) + defer cancel() + + log := logging.NewTestLogger(t) + defer func() { + assert.NoError(t, amqpx.Reset()) + }() + + options := []amqpx.Option{ + amqpx.WithLogger(logging.NewNoOpLogger()), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(5), + } + + amqpxPublish := amqpx.New() + amqpxPublish.RegisterTopologyCreator(createTopology) + err = amqpxPublish.Start(connectURL, options...) + require.NoError(t, err) + + eventContent := "TestBatchHandlerPauseAndResume - event content" + + var ( + publish = 500 + cnt = 0 + ) + + // step 1 - fill queue with messages + amqpx.RegisterTopologyDeleter(deleteTopology) + + // fill queue with messages + for i := 0; i < publish; i++ { + err := amqpxPublish.Publish("exchange-01", "event-01", pool.Publishing{ + ContentType: "application/json", + Body: []byte(fmt.Sprintf("%s: message number %d", eventContent, i)), + }) + if err != nil { + assert.NoError(t, err) + return + } + } + + // step 2 - process messages, pause, wait, resume, process rest, cancel context + handler01 := amqpx.RegisterBatchHandler("queue-01", func(msgs []pool.Delivery) (err error) { + for _, msg := range msgs { + cnt++ + if cnt == publish/3 || cnt == publish/3*2 { + err = amqpx.Publish("exchange-02", "event-02", pool.Publishing{ + ContentType: "application/json", + Body: []byte(fmt.Sprintf("%s: hit %d messages, toggling processing: %s", eventContent, cnt, string(msg.Body))), + }) + assert.NoError(t, err) + } + } + return nil + }) + + running := true + amqpx.RegisterBatchHandler("queue-02", func(msgs []pool.Delivery) (err error) { + queue := handler01.Queue() + + for _, msg := range msgs { + log.Infof("received toggle request: %s", string(msg.Body)) + if running { + running = false + + assertActive(t, handler01, true) + + err = handler01.Pause(context.Background()) + assert.NoError(t, err) + + assertActive(t, handler01, false) + } else { + running = true + + assertActive(t, handler01, false) + + err = handler01.Resume(context.Background()) + assert.NoError(t, err) + log.Infof("resumed processing of %s", queue) + + assertActive(t, handler01, true) + + // trigger cancelation + err = amqpxPublish.Publish("exchange-03", "event-03", pool.Publishing{ + ContentType: "application/json", + Body: []byte(fmt.Sprintf("%s: delayed toggle back", eventContent)), + }) + assert.NoError(t, err) + } + } + return nil + }) + + var once sync.Once + amqpx.RegisterBatchHandler("queue-03", func(msgs []pool.Delivery) (err error) { + _ = msgs[0] + once.Do(func() { + + assertActive(t, handler01, true) + err = handler01.Pause(context.Background()) + if err != nil { + assert.NoError(t, err) + return + } + assertActive(t, handler01, false) + + go func() { + // delay cancelation (due to ack) + time.Sleep(3 * time.Second) + cancel() + }() + }) + + return nil + }) + + assertActive(t, handler01, false) + + err = amqpx.Start( + connectURL, + amqpx.WithLogger(logging.NewNoOpLogger()), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(5), + ) + if err != nil { + assert.NoError(t, err) + return + } + + // will be canceled when the event has reache dthe third handler + <-ctx.Done() + log.Info("context canceled, closing test.") + assert.NoError(t, amqpx.Close()) + assertActive(t, handler01, false) +} + +type handlerStats interface { + Queue() string + IsActive(ctx context.Context) (active bool, err error) +} + +func TestHandlerReset(t *testing.T) { + for i := 0; i < 5; i++ { + testHandlerReset(t) + } + t.Log("done") +} + +func testHandlerReset(t *testing.T) { + var err error + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGINT) + defer cancel() + + log := logging.NewTestLogger(t) + defer func() { + assert.NoError(t, amqpx.Reset()) + }() + + options := []amqpx.Option{ + amqpx.WithLogger(logging.NewNoOpLogger()), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(5), + } + + amqpxPublish := amqpx.New() + amqpxPublish.RegisterTopologyCreator(createTopology) + err = amqpxPublish.Start(connectURL, options...) + require.NoError(t, err) + + eventContent := "TestBatchHandlerReset - event content" + + var ( + publish = 50 + cnt = 0 + ) + + // step 1 - fill queue with messages + amqpx.RegisterTopologyDeleter(deleteTopology) + + // fill queue with messages + for i := 0; i < publish; i++ { + err := amqpxPublish.Publish("exchange-01", "event-01", pool.Publishing{ + ContentType: "application/json", + Body: []byte(fmt.Sprintf("%s: message number %d", eventContent, i)), + }) + if err != nil { + assert.NoError(t, err) + return + } + } + + done := make(chan struct{}) + // step 2 - process messages, pause, wait, resume, process rest, cancel context + handler01 := amqpx.RegisterHandler("queue-01", func(msgs pool.Delivery) (err error) { + cnt++ + if cnt == publish { + close(done) + } + return nil + }) + + assertActive(t, handler01, false) + + err = amqpx.Start( + connectURL, + amqpx.WithLogger(logging.NewNoOpLogger()), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(5), + ) + if err != nil { + assert.NoError(t, err) + return + } + + assertActive(t, handler01, true) + + // will be canceled when the event has reached the third handler + <-done + cancel() + + <-ctx.Done() + log.Info("context canceled, closing test.") + assert.NoError(t, amqpx.Close()) + + // after close + assertActive(t, handler01, false) +} + +func TestBatchHandlerReset(t *testing.T) { + for i := 0; i < 5; i++ { + testBatchHandlerReset(t) + } + t.Log("done") +} + +func testBatchHandlerReset(t *testing.T) { + var err error + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGINT) + defer cancel() + + log := logging.NewTestLogger(t) + defer func() { + assert.NoError(t, amqpx.Reset()) + }() + + options := []amqpx.Option{ + amqpx.WithLogger(logging.NewNoOpLogger()), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(5), + } + + amqpxPublish := amqpx.New() + amqpxPublish.RegisterTopologyCreator(createTopology) + err = amqpxPublish.Start(connectURL, options...) + require.NoError(t, err) + + eventContent := "TestBatchHandlerReset - event content" + + var ( + publish = 50 + cnt = 0 + ) + + // step 1 - fill queue with messages + amqpx.RegisterTopologyDeleter(deleteTopology) + + // fill queue with messages + for i := 0; i < publish; i++ { + err := amqpxPublish.Publish("exchange-01", "event-01", pool.Publishing{ + ContentType: "application/json", + Body: []byte(fmt.Sprintf("%s: message number %d", eventContent, i)), + }) + if err != nil { + assert.NoError(t, err) + return + } + } + + done := make(chan struct{}) + // step 2 - process messages, pause, wait, resume, process rest, cancel context + handler01 := amqpx.RegisterBatchHandler("queue-01", func(msgs []pool.Delivery) (err error) { + cnt += len(msgs) + + if cnt == publish { + close(done) + } + return nil + }) + + assertActive(t, handler01, false) + + err = amqpx.Start( + connectURL, + amqpx.WithLogger(logging.NewNoOpLogger()), + amqpx.WithPublisherConnections(1), + amqpx.WithPublisherSessions(5), + ) + if err != nil { + assert.NoError(t, err) + return + } + + assertActive(t, handler01, true) + + // will be canceled when the event has reached the third handler + <-done + cancel() + + <-ctx.Done() + log.Info("context canceled, closing test.") + assert.NoError(t, amqpx.Close()) + + // after close + assertActive(t, handler01, false) +} + +func assertActive(t *testing.T, handler handlerStats, expected bool) { + active, err := handler.IsActive(context.Background()) + if err != nil { + assert.NoError(t, err) + return + } + + if expected != active { + as := "active" + if !expected { + as = "inactive" + } + assert.Equalf(t, expected, active, "expected handler of queue %s to be %q", handler.Queue(), as) + return + } +} diff --git a/go.mod b/go.mod index c557ad0..f2a873d 100644 --- a/go.mod +++ b/go.mod @@ -1,19 +1,20 @@ module github.com/jxsl13/amqpx -go 1.18 +go 1.20 require ( - github.com/Shopify/toxiproxy/v2 v2.6.0 + github.com/Shopify/toxiproxy/v2 v2.7.0 github.com/Workiva/go-datastructures v1.1.1 github.com/rabbitmq/amqp091-go v1.9.0 github.com/stretchr/testify v1.8.4 - go.uber.org/goleak v1.2.1 + go.uber.org/goleak v1.3.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 7d6f7e8..b0b84af 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -github.com/Shopify/toxiproxy/v2 v2.6.0 h1:qAHKkHlGuB31epYq/nE7CJsdVVn8Nn88vBRuRhNWC9g= -github.com/Shopify/toxiproxy/v2 v2.6.0/go.mod h1:RQ4MED2Cw96l+VbfXq85MXYSwVyXoZvaZKkVznD+yrc= +github.com/Shopify/toxiproxy/v2 v2.7.0 h1:Zz2jdyqtYw1SpihfMWzLFGpOO92p9effjAkURG57ifc= +github.com/Shopify/toxiproxy/v2 v2.7.0/go.mod h1:k0V84e/dLQmVNuI6S0G7TpXCl611OSRYdptoxm0XTzA= github.com/Workiva/go-datastructures v1.1.1 h1:9G5u1UqKt6ABseAffHGNfbNQd7omRlWE5QaxNruzhE0= github.com/Workiva/go-datastructures v1.1.1/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -20,8 +20,9 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rabbitmq/amqp091-go v1.9.0 h1:qrQtyzB4H8BQgEuJwhmVQqVHB9O4+MNDJCCAcpc3Aoo= github.com/rabbitmq/amqp091-go v1.9.0/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -32,8 +33,9 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= diff --git a/helpers_test.go b/helpers_test.go new file mode 100644 index 0000000..40eb281 --- /dev/null +++ b/helpers_test.go @@ -0,0 +1,170 @@ +package amqpx_test + +import ( + "errors" + "fmt" + + "github.com/jxsl13/amqpx/pool" +) + +func createTopology(t *pool.Topologer) (err error) { + // documentation: https://www.cloudamqp.com/blog/part4-rabbitmq-for-beginners-exchanges-routing-keys-bindings.html#:~:text=The%20routing%20key%20is%20a%20message%20attribute%20added%20to%20the,routing%20key%20of%20the%20message. + + err = createExchange("exchange-01", t) + if err != nil { + return err + } + + err = createQueue("queue-01", t) + if err != nil { + return err + } + + err = t.QueueBind("queue-01", "event-01", "exchange-01") + if err != nil { + return err + } + + err = createExchange("exchange-02", t) + if err != nil { + return err + } + + err = createQueue("queue-02", t) + if err != nil { + return err + } + + err = t.QueueBind("queue-02", "event-02", "exchange-02") + if err != nil { + return err + } + + err = createExchange("exchange-03", t) + if err != nil { + return err + } + + err = createQueue("queue-03", t) + if err != nil { + return err + } + err = t.QueueBind("queue-03", "event-03", "exchange-03") + if err != nil { + return err + } + return nil +} + +func deleteTopology(t *pool.Topologer) (err error) { + + err = deleteQueue("queue-01", t) + if err != nil { + return err + } + + err = deleteQueue("queue-02", t) + if err != nil { + return err + } + + err = deleteQueue("queue-03", t) + if err != nil { + return err + } + + err = deleteExchange("exchange-01", t) + if err != nil { + return err + } + + err = deleteExchange("exchange-02", t) + if err != nil { + return err + } + + err = deleteExchange("exchange-03", t) + if err != nil { + return err + } + + return nil +} + +func createQueue(name string, t *pool.Topologer) (err error) { + _, err = t.QueueDeclarePassive(name) + if !errors.Is(err, pool.ErrNotFound) { + if err != nil { + return fmt.Errorf("queue %s was found even tho it should not exist: %w", name, err) + } + return fmt.Errorf("queue %s was found even tho it should not exist", name) + } + + _, err = t.QueueDeclare(name) + if err != nil { + return err + } + + _, err = t.QueueDeclarePassive(name) + if err != nil { + return fmt.Errorf("queue %s was not found even tho it should exist: %w", name, err) + } + return nil +} + +func deleteQueue(name string, t *pool.Topologer) (err error) { + _, err = t.QueueDeclarePassive(name) + if err != nil { + return fmt.Errorf("%q does not exist but is supposed to be deleted", name) + } + + _, err = t.QueueDelete(name) + if err != nil { + return err + } + + _, err = t.QueueDeclarePassive(name) + if err == nil { + return fmt.Errorf("%q still exists after deletion", name) + } + return nil +} + +func createExchange(name string, t *pool.Topologer) (err error) { + err = t.ExchangeDeclarePassive(name, pool.ExchangeKindTopic) + if !errors.Is(err, pool.ErrNotFound) { + if err != nil { + return fmt.Errorf("exchange %s was found even tho it should not exist: %w", name, err) + } + return fmt.Errorf("exchange %s was found even tho it should not exist", name) + } + + err = t.ExchangeDeclare(name, pool.ExchangeKindTopic) + if err != nil { + return err + } + + err = t.ExchangeDeclarePassive(name, pool.ExchangeKindTopic) + if err != nil { + return fmt.Errorf("exchange %s was not found even tho it should exist: %w", name, err) + } + return nil +} + +func deleteExchange(name string, t *pool.Topologer) (err error) { + err = t.ExchangeDeclarePassive(name, pool.ExchangeKindTopic) + if err != nil { + return fmt.Errorf("exchange %s was not found even tho it should exist: %w", name, err) + } + + err = t.ExchangeDelete(name) + if err != nil { + return err + } + + err = t.ExchangeDeclarePassive(name, pool.ExchangeKindTopic) + if !errors.Is(err, pool.ErrNotFound) { + return fmt.Errorf("exchange %s was found even tho it should not exist: %w", name, err) + } + return nil +} diff --git a/pool/connection.go b/pool/connection.go index efe04b0..f9b5fa2 100644 --- a/pool/connection.go +++ b/pool/connection.go @@ -39,8 +39,6 @@ type Connection struct { cancel context.CancelFunc log logging.Logger - - slowClose bool } // NewConnection creates a connection wrapper. @@ -54,7 +52,6 @@ func NewConnection(connectUrl, name string, options ...ConnectionOption) (*Conne ConnectionTimeout: 30 * time.Second, BackoffPolicy: newDefaultBackoffPolicy(time.Second, 15*time.Second), Ctx: context.Background(), - SlowClose: false, } // apply options @@ -96,8 +93,6 @@ func NewConnection(connectUrl, name string, options ...ConnectionOption) (*Conne log: option.Logger, lastConnLoss: time.Now(), - - slowClose: option.SlowClose, // for leak tests } err = conn.Connect() @@ -133,28 +128,7 @@ func (ch *Connection) Close() (err error) { ch.cancel() // close derived context if !ch.isClosed() { - // wait for dangling goroutines to timeout before closing. - // upon recovery the standard library still has some goroutines open - // that are only closed upon some tcp connection timeout. - // Those routinges poll the network. - awaitTimeout := time.Until(ch.lastConnLoss.Add(ch.conn.Config.Heartbeat)) - if ch.slowClose && awaitTimeout > 0 { - // in long running applications that were able to reestablish their connection - // this sleep should not affect their shutdown duration much. - // in short runing applications like the tests, shutdown takes longer, as we - // frequently kill the network connection in order to test the reconnects - // which requires to wait for the background network connections to timeout - // in order to prevent dangling goroutines from being killed. - time.Sleep(awaitTimeout) - } - return ch.conn.Close() // close internal channel - } else { - // same wait as above case - awaitTimeout := time.Until(ch.lastConnLoss.Add(ch.heartbeat)) - if ch.slowClose && awaitTimeout > 0 { - time.Sleep(awaitTimeout) - } } return nil diff --git a/pool/connection_options.go b/pool/connection_options.go index 4e4482c..badbc85 100644 --- a/pool/connection_options.go +++ b/pool/connection_options.go @@ -16,8 +16,6 @@ type connectionOption struct { BackoffPolicy BackoffFunc Ctx context.Context TLSConfig *tls.Config - - SlowClose bool // set to true for goleak tests } type ConnectionOption func(*connectionOption) @@ -81,13 +79,3 @@ func ConnectionWithTLS(config *tls.Config) ConnectionOption { co.TLSConfig = config } } - -// ConnectionWithSlowClose is only needed for integration tests. -// It waits for standard library tcp connection goroutines to properly timeout. -// So that we don't get false positives in our leak tests. -// Set to true in order to wait for dangling goroutines to timeout before closing the connection. -func ConnectionWithSlowClose(slowClose bool) ConnectionOption { - return func(co *connectionOption) { - co.SlowClose = slowClose - } -} diff --git a/pool/connection_pool.go b/pool/connection_pool.go index 8788b81..23a6b2e 100644 --- a/pool/connection_pool.go +++ b/pool/connection_pool.go @@ -32,8 +32,6 @@ type ConnectionPool struct { cancel context.CancelFunc log logging.Logger - - slowClose bool } // NewConnectionPool creates a new connection pool which has a maximum size it @@ -54,8 +52,7 @@ func NewConnectionPool(connectUrl string, numConns int, options ...ConnectionPoo ConnTimeout: 30 * time.Second, TLSConfig: nil, - Logger: logging.NewNoOpLogger(), - SlowClose: false, // for leak tests + Logger: logging.NewNoOpLogger(), } // apply options @@ -94,8 +91,6 @@ func newConnectionPoolFromOption(connectUrl string, option connectionPoolOption) cancel: cancel, log: option.Logger, - - slowClose: option.SlowClose, } cp.debug("initializing pool connections") @@ -139,7 +134,6 @@ func (cp *ConnectionPool) deriveCachedConnection(id int) (*Connection, error) { ConnectionWithTLS(cp.tls), ConnectionWithCached(true), ConnectionWithLogger(cp.log), - ConnectionWithSlowClose(cp.slowClose), ) } @@ -172,7 +166,6 @@ func (cp *ConnectionPool) GetTransientConnection(ctx context.Context) (*Connecti ConnectionWithHeartbeatInterval(cp.heartbeat), ConnectionWithTLS(cp.tls), ConnectionWithCached(false), - ConnectionWithSlowClose(cp.slowClose), ) if err == nil { return conn, nil diff --git a/pool/connection_pool_options.go b/pool/connection_pool_options.go index 223d571..a5d8f10 100644 --- a/pool/connection_pool_options.go +++ b/pool/connection_pool_options.go @@ -6,6 +6,8 @@ import ( "math/rand" "os" "path/filepath" + "runtime/debug" + "strings" "time" "github.com/jxsl13/amqpx/logging" @@ -22,17 +24,25 @@ type connectionPoolOption struct { TLSConfig *tls.Config Logger logging.Logger - - SlowClose bool // for leak tests } type ConnectionPoolOption func(*connectionPoolOption) func defaultAppName() string { + + if bi, ok := debug.ReadBuildInfo(); ok && bi.Path != "" { + parts := strings.Split(bi.Path, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + } + + // fallback appNameWithExt := filepath.Base(os.Args[0]) ext := filepath.Ext(appNameWithExt) appNameWithoutExt := appNameWithExt[:len(appNameWithExt)-len(ext)] return appNameWithoutExt + } // ConnectionPoolWithLogger allows to set a custom logger. @@ -106,12 +116,21 @@ func ConnectionPoolWithTLS(config *tls.Config) ConnectionPoolOption { type BackoffFunc func(retry int) (sleep time.Duration) func newDefaultBackoffPolicy(min, max time.Duration) BackoffFunc { + r := rand.New(rand.NewSource(time.Now().Unix())) + + factor := time.Second + for _, scale := range []time.Duration{time.Hour, time.Minute, time.Second, time.Millisecond, time.Microsecond, time.Nanosecond} { + d := min.Truncate(scale) + if d > 0 { + factor = scale + break + } + } return func(retry int) (sleep time.Duration) { - r := rand.New(rand.NewSource(time.Now().Unix())) - wait := 2 << maxi(0, mini(32, retry)) * time.Second - jitter := time.Duration(r.Int63n(int64(wait) / 5)) // max 20% jitter + wait := 2 << maxi(0, mini(32, retry)) * factor + jitter := time.Duration(r.Int63n(int64(maxi(1, int(wait)/5)))) // max 20% jitter wait = min + wait + jitter if wait > max { wait = max @@ -133,12 +152,3 @@ func maxi(a, b int) int { } return b } - -// ConnectionPoolWithSlowCLose is onl yinteresting for integration tests of this library. -// We want to wait for dangling tcp standard library goroutines to timeout before we signal that -// the connection pool is closed. This option affects every connection in the connection pool. -func ConnectionPoolWithSlowClose(slowClose bool) ConnectionPoolOption { - return func(po *connectionPoolOption) { - po.SlowClose = slowClose - } -} diff --git a/pool/connection_pool_test.go b/pool/connection_pool_test.go index 959715d..c480cb4 100644 --- a/pool/connection_pool_test.go +++ b/pool/connection_pool_test.go @@ -12,7 +12,7 @@ import ( func TestNewConnectionPool(t *testing.T) { connections := 5 - p, err := pool.NewConnectionPool("amqp://admin:password@localhost:5672", connections, + p, err := pool.NewConnectionPool(connectURL, connections, pool.ConnectionPoolWithName("TestNewConnectionPool"), pool.ConnectionPoolWithLogger(logging.NewTestLogger(t)), ) @@ -42,7 +42,7 @@ func TestNewConnectionPool(t *testing.T) { func TestNewConnectionPoolDisconnect(t *testing.T) { connections := 100 - p, err := pool.NewConnectionPool("amqp://admin:password@localhost:5672", connections, + p, err := pool.NewConnectionPool(connectURL, connections, pool.ConnectionPoolWithName("TestNewConnectionPoolDisconnect"), pool.ConnectionPoolWithLogger(logging.NewTestLogger(t)), ) diff --git a/pool/connection_test.go b/pool/connection_test.go index b33d190..12d95de 100644 --- a/pool/connection_test.go +++ b/pool/connection_test.go @@ -14,10 +14,9 @@ import ( func TestNewSingleConnection(t *testing.T) { c, err := pool.NewConnection( - "amqp://admin:password@localhost:5672", + connectURL, "TestNewSingleConnection", pool.ConnectionWithLogger(logging.NewTestLogger(t)), - pool.ConnectionWithSlowClose(true), ) if err != nil { @@ -35,10 +34,9 @@ func TestNewSingleConnectionWithDisconnect(t *testing.T) { started() defer stopped() c, err := pool.NewConnection( - "amqp://admin:password@localhost:5672", + connectURL, "TestNewSingleConnection", pool.ConnectionWithLogger(logging.NewTestLogger(t)), - pool.ConnectionWithSlowClose(true), ) if err != nil { @@ -61,10 +59,9 @@ func TestNewConnection(t *testing.T) { defer wg.Done() c, err := pool.NewConnection( - "amqp://admin:password@localhost:5672", + connectURL, fmt.Sprintf("TestNewConnection-%d", id), pool.ConnectionWithLogger(logging.NewTestLogger(t)), - pool.ConnectionWithSlowClose(true), ) if err != nil { assert.NoError(t, err) @@ -98,10 +95,9 @@ func TestNewConnectionDisconnect(t *testing.T) { defer wg.Done() c, err := pool.NewConnection( - "amqp://admin:password@localhost:5672", + connectURL, fmt.Sprintf("TestNewConnectionDisconnect-%d", id), //pool.ConnectionWithLogger(logging.NewTestLogger(t)), - pool.ConnectionWithSlowClose(true), ) if err != nil { assert.NoError(t, err) diff --git a/pool/delivery.go b/pool/delivery.go new file mode 100644 index 0000000..8c30086 --- /dev/null +++ b/pool/delivery.go @@ -0,0 +1,43 @@ +package pool + +import "github.com/rabbitmq/amqp091-go" + +/* +Delivery captures the fields for a previously delivered message resident in +a queue to be delivered by the server to a consumer from Channel.Consume or +Channel.Get. + + type Delivery struct { + Acknowledger Acknowledger // the channel from which this delivery arrived + + Headers Table // Application or header exchange table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // queue implementation use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user - should be authenticated user + AppId string // application use - creating application id + + // Valid only with Channel.Consume + ConsumerTag string + + // Valid only with Channel.Get + MessageCount uint32 + + DeliveryTag uint64 + Redelivered bool + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + Body []byte + } +*/ +type Delivery = amqp091.Delivery diff --git a/pool/errors.go b/pool/errors.go index c602d12..dfb331c 100644 --- a/pool/errors.go +++ b/pool/errors.go @@ -1,6 +1,7 @@ package pool import ( + "context" "errors" "github.com/rabbitmq/amqp091-go" @@ -16,6 +17,10 @@ var ( errInvalidPoolSize = errors.New("invalid pool size") ErrPoolInitializationFailed = errors.New("pool initialization failed") ErrClosed = errors.New("closed") + + // ErrNotFound is returned by ExchangeDeclarePassive or QueueDeclarePassive in the case that + // the queue was not found. + ErrNotFound = errors.New("not found") ) var ( @@ -39,8 +44,7 @@ func recoverable(err error) bool { // invalid usage of the amqp protocol is not recoverable ae := &amqp091.Error{} - switch { - case errors.As(err, &ae): + if errors.As(err, &ae) { switch ae.Code { case notImplemented: return false @@ -57,6 +61,12 @@ func recoverable(err error) bool { } } + if errors.Is(err, context.Canceled) { + return false + } + + // TODO: errors.Is(err, context.DeadlineExceeded) also needed? + // every other unknown error is recoverable return true } diff --git a/pool/exchange.go b/pool/exchange.go new file mode 100644 index 0000000..32c961d --- /dev/null +++ b/pool/exchange.go @@ -0,0 +1,75 @@ +package pool + +type ExchangeKind string + +const ( + /* + The first RabbitMQ exchange type, the direct exchange, uses a message routing key to transport messages to queues. + The routing key is a message attribute that the producer adds to the message header. You can consider the routing + key to be an “address” that the exchange uses to determine how the message should be routed. A message is delivered + to the queue with the binding key that exactly matches the message’s routing key. + + The direct exchange’s default exchange is “amq. direct“, which AMQP brokers must offer for communication. + As is shown in the figure, queue A (create_pdf_queue) is tied to a direct exchange (pdf_events) with the binding + key “pdf_create”. When a new message arrives at the direct exchange with the routing key “pdf_create”, the exchange + sends it to the queue where the binding key = routing key; which is queue A in this example (create_pdf_queue). + */ + ExchangeKindDirect ExchangeKind = "direct" + + /* + A fanout exchange, like direct and topic exchange, duplicates and routes a received message to any associated queues, + regardless of routing keys or pattern matching. Here, your provided keys will be entirely ignored. + + Fanout exchanges are useful when the same message needs to be passed to one or perhaps more queues with consumers who may + process the message differently. As shown in the image, a message received by the fanout exchange is copied and routed to all + three queues associated with the exchange. When something happens, such as a sporting event or weather forecast, all connected + mobile devices will be notified. For the fanout RabbitMQ exchange type, “amq.fanout” is the default exchange that must be provided + by AMQP brokers. + */ + ExchangeKindFanOut ExchangeKind = "fanout" + + /* + Topic RabbitMQ exchange type sends messages to queues depending on wildcard matches between the routing key and the queue binding’s routing pattern. + Messages are routed to one or more queues based on a pattern that matches a message routing key. A list of words separated by a period must be used + as the routing key (.). + + The routing patterns may include an asterisk (“*”) to match a word in a specified position of the routing key (for example, a routing pattern of + “agreements.*.*.b.*” only matches routing keys with “agreements” as the first word and “b” as the fourth word). + A pound symbol (“#”) denotes a match of zero or more words. + + In topic exchange, consumers indicate which topics are of interest to them. The consumer establishes a queue and binds it to the exchange using a + certain routing pattern. All messages with a routing key that matches the routing pattern are routed to the queue, where they will remain until + the consumer consumes them. For the topic RabbitMQ exchange type, “amq.topic” is the default topic exchange that AMQP brokers must provide for + message exchange. + */ + ExchangeKindTopic ExchangeKind = "topic" + + /* + A headers RabbitMQ exchange type is a message routing system that uses arguments with headers and optional values to route messages. + Header exchanges are identical to topic exchanges, except that instead of using routing keys, messages are routed based on header values. + If the value of the header equals the value of supply during binding, the message matches. + + In the binding between exchange and queue, a specific argument termed “x-match” indicates whether all headers must match or only one. + For the message to match, any common header between the message and the binding should match, or all of the headers referenced in the + binding must be present in the message. + + The “x-match” property has two possible values: “any” and “all,” with “all” being the default. A value of “all” indicates that all + header pairs (key, value) must match, whereas “any” indicates that at least one pair must match. Instead of a string, headers can be + built with a larger range of data types, such as integers or hashes. The headers exchange type (when used with the binding option “any”) + is useful for steering messages containing a subset of known (unordered) criteria. + + For the header RabbitMQ exchange type, “amq.headers” is the default topic exchange that AMQP brokers must supply. + */ + ExchangeKindHeaders ExchangeKind = "headers" +) + +const ( + /* + ExchangeKeyDeadLetter can be used in order to create a dead letter exchange (reference: https://www.rabbitmq.com/dlx.html) + Some queue consumers may be unable to process certain alerts, and the queue itself may reject messages as a result of certain events. + For instance, a message is dropped if there is no matching queue for it. In that instance, Dead Letter Exchanges must be implemented + so that those messages can be saved and reprocessed later. The “Dead Letter Exchange” is an AMQP enhancement provided by RabbitMQ. + This exchange has the capability of capturing messages that are not deliverable. + */ + ExchangeKeyDeadLetter = "x-dead-letter-exchange" +) diff --git a/pool/helpers_context.go b/pool/helpers_context.go new file mode 100644 index 0000000..c79a90b --- /dev/null +++ b/pool/helpers_context.go @@ -0,0 +1,370 @@ +package pool + +import ( + "context" + "fmt" + "sync" +) + +func newCancelContext(parentCtx context.Context) *cancelContext { + ctx, cancel := context.WithCancel(parentCtx) + return &cancelContext{ + ctx: ctx, + cancel: cancel, + } +} + +type cancelContext struct { + mu sync.Mutex + closed bool + ctx context.Context + cancel context.CancelFunc +} + +func (c *cancelContext) CancelWithContext(ctx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.cancelWithContext(ctx) +} + +func (c *cancelContext) cancelWithContext(ctx context.Context) error { + + if ctx == nil { + panic("ctx is nil") + } + + if c.ctx == nil { + panic("c.ctx is nil") + } + + if c.closed { + return nil + } + + select { + case <-ctx.Done(): + // unexpectedly aborted cancelation + return ctx.Err() + case <-c.ctx.Done(): + // already canceled + return nil + default: + // cancel context + c.cancel() + c.closed = true + + // wait for the channel to be closed + select { + case <-ctx.Done(): + // unexpectedly aborted cancelation + return ctx.Err() + case <-c.ctx.Done(): + // finally canceled + return nil + } + } +} + +func (c *cancelContext) Cancel() { + c.mu.Lock() + defer c.mu.Unlock() + + if c.ctx == nil { + panic("ctx is nil") + } + + if c.closed { + return + } + + select { + case <-c.ctx.Done(): + // already canceled + return + default: + c.cancel() + <-c.ctx.Done() + c.closed = true + } +} + +func (c *cancelContext) Context() context.Context { + c.mu.Lock() + defer c.mu.Unlock() + + return c.ctx +} + +func (c *cancelContext) Done() <-chan struct{} { + c.mu.Lock() + defer c.mu.Unlock() + + return c.ctx.Done() +} + +// Reset resets the cancel context to be active again +func (c *cancelContext) Reset(parentCtx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.closed { + // still active, nothing to reset + return nil + } + c.ctx, c.cancel = context.WithCancel(parentCtx) + c.closed = false + return nil +} + +type stateContext struct { + mu sync.Mutex + + parentCtx context.Context + + closed bool + + // canceled upon pausing + pausing *cancelContext + + // canceled upon resuming + resuming *cancelContext + + // back channel to handler + // called from consumer + paused *cancelContext + + // back channel to handler + // called from consumer + resumed *cancelContext +} + +func newStateContext(ctx context.Context) *stateContext { + sc := &stateContext{ + parentCtx: ctx, + pausing: newCancelContext(ctx), + resuming: newCancelContext(ctx), + paused: newCancelContext(ctx), + resumed: newCancelContext(ctx), + } + + sc.pausing.Cancel() + sc.paused.Cancel() + return sc +} + +// reset creates the initial state of the object +// initial state is the transitional state resuming (= startup and resuming after pause) +// the passed context is the parent context of all new contexts that spawn from this. +// After start has been called, all contexts are alive except for the resuming context which is canceled by default. +func (sc *stateContext) Start(ctx context.Context) (err error) { + sc.mu.Lock() + defer sc.mu.Unlock() + + defer func() { + if err != nil { + sc.closeUnguarded() + } + }() + + // override upon startup + sc.parentCtx = ctx + sc.closed = false + + // reset context + err = sc.pausing.Reset(sc.parentCtx) + if err != nil { + return err + } + err = sc.paused.Reset(sc.parentCtx) + if err != nil { + return err + } + err = sc.resuming.Reset(sc.parentCtx) + if err != nil { + return err + } + err = sc.resumed.Reset(sc.parentCtx) + if err != nil { + return err + } + + // cancel last context to indicate the running state + sc.resuming.Cancel() // called last + return nil +} + +func (sc *stateContext) Paused() { + // explicitly NO mutex lock + sc.paused.Cancel() +} + +func (sc *stateContext) Resumed() { + // explicitly NO mutex lock + sc.resumed.Cancel() +} + +func (sc *stateContext) Resuming() doner { + return sc.resuming +} + +func (sc *stateContext) Pausing() doner { + return sc.pausing +} + +func (sc *stateContext) Pause(ctx context.Context) error { + select { + case <-sc.paused.Done(): + // already paused + return nil + default: + // continue + } + + err := func() error { + sc.mu.Lock() + defer sc.mu.Unlock() + err := sc.resuming.Reset(sc.parentCtx) + if err != nil { + return fmt.Errorf("%w: %v", ErrPauseFailed, err) + } + err = sc.resumed.Reset(sc.parentCtx) + if err != nil { + return fmt.Errorf("%w: %v", ErrPauseFailed, err) + } + err = sc.pausing.CancelWithContext(ctx) // must be called last + if err != nil { + return fmt.Errorf("%w: %v", ErrPauseFailed, err) + } + return nil + }() + if err != nil { + return err + } + + select { + case <-sc.paused.Done(): + // waid until paused + return nil + case <-ctx.Done(): + return fmt.Errorf("%w: %v", ErrPauseFailed, ctx.Err()) + } +} + +// Resume allows to continue the processing of a queue after it has been paused using Pause +func (sc *stateContext) Resume(ctx context.Context) error { + select { + case <-sc.resumed.Done(): + // already resumed + return nil + default: + // continue + } + + err := func() error { + sc.mu.Lock() + defer sc.mu.Unlock() + err := sc.pausing.Reset(sc.parentCtx) + if err != nil { + return fmt.Errorf("%w: %v", ErrResumeFailed, err) + } + err = sc.paused.Reset(sc.parentCtx) + if err != nil { + return fmt.Errorf("%w: %v", ErrResumeFailed, err) + } + + err = sc.resuming.CancelWithContext(sc.parentCtx) // must be called last + if err != nil { + return fmt.Errorf("%w: %v", ErrResumeFailed, err) + } + return nil + }() + if err != nil { + return err + } + + select { + case <-sc.resumed.Done(): + // wait until resumed + return nil + case <-ctx.Done(): + return fmt.Errorf("%w: %v", ErrResumeFailed, ctx.Err()) + } +} + +func (sc *stateContext) IsActive(ctx context.Context) (active bool, err error) { + closed := func() bool { + sc.mu.Lock() + defer sc.mu.Unlock() + return sc.closed + }() + if closed { + return false, nil + } + + select { + case <-sc.resumed.Done(): + return true, nil + case <-sc.paused.Done(): + return false, nil + case <-ctx.Done(): + return false, fmt.Errorf("failed to check state: %w", ctx.Err()) + } +} + +func (sc *stateContext) AwaitResumed(ctx context.Context) (err error) { + closed := func() bool { + sc.mu.Lock() + defer sc.mu.Unlock() + return sc.closed + }() + if closed { + return ErrClosed + } + + select { + case <-sc.resumed.Done(): + return nil + case <-ctx.Done(): + return fmt.Errorf("failed to check state: %w", ctx.Err()) + } +} + +func (sc *stateContext) AwaitPaused(ctx context.Context) (err error) { + closed := func() bool { + sc.mu.Lock() + defer sc.mu.Unlock() + return sc.closed + }() + if closed { + return ErrClosed + } + + select { + case <-sc.paused.Done(): + return nil + case <-ctx.Done(): + return fmt.Errorf("failed to check state: %w", ctx.Err()) + } +} + +// close closes all active contexts +// in order to prevent dangling goroutines +// When closing you may want to use pause first and then close for the final cleanup +func (sc *stateContext) Close() { + sc.mu.Lock() + defer sc.mu.Unlock() + sc.closeUnguarded() +} + +func (sc *stateContext) closeUnguarded() { + sc.pausing.Cancel() + sc.paused.Cancel() + sc.resuming.Cancel() + sc.resumed.Cancel() + sc.closed = true +} + +type doner interface { + Done() <-chan struct{} + Context() context.Context +} diff --git a/pool/pool.go b/pool/pool.go index d84d950..bff25c9 100644 --- a/pool/pool.go +++ b/pool/pool.go @@ -6,11 +6,10 @@ import ( "time" "github.com/jxsl13/amqpx/logging" - "github.com/rabbitmq/amqp091-go" ) var ( - QuorumQueue = amqp091.Table{ + QuorumQueue = Table{ "x-queue-type": "quorum", } ) @@ -45,8 +44,6 @@ func New(connectUrl string, numConns, numSessions int, options ...Option) (*Pool TLSConfig: nil, Logger: logger, - - SlowClose: false, // needed for goroutine leak tests }, spo: sessionPoolOption{ Size: numSessions, @@ -88,6 +85,14 @@ func (p *Pool) GetSession() (*Session, error) { return p.sp.GetSession() } +// GetSessionCtx returns a new session from the pool, only returns an error upon shutdown or when the passed context was canceled. +func (p *Pool) GetSessionCtx(ctx context.Context) (*Session, error) { + if p.sp.ctx == ctx { + return p.sp.GetSession() + } + return p.sp.GetSessionCtx(ctx) +} + // GetTransientSession returns a new session which is decoupled from anyshutdown mechanism, thus // requiring a context for timeout handling. // The session does also use a transient connection which is closed when the transient session is closed. diff --git a/pool/pool_options.go b/pool/pool_options.go index a9f1161..7c42c56 100644 --- a/pool/pool_options.go +++ b/pool/pool_options.go @@ -75,14 +75,6 @@ func WithTLS(config *tls.Config) Option { } } -// WithSlowClose is only needed for connection pool and for integration tests. -// This option enables the waiting for goroutine timeouts for every connection it handles. -func WithSlowClose(slowClose bool) Option { - return func(po *poolOption) { - ConnectionPoolWithSlowClose(slowClose)(&po.cpo) - } -} - // WithBufferSize allows to configurethe size of // the confirmation, error & blocker buffers of all sessions func WithBufferSize(size int) Option { diff --git a/pool/pool_test.go b/pool/pool_test.go index 3ea8d31..d0ff8b6 100644 --- a/pool/pool_test.go +++ b/pool/pool_test.go @@ -5,21 +5,31 @@ import ( "testing" "time" + "github.com/jxsl13/amqpx" "github.com/jxsl13/amqpx/logging" "github.com/jxsl13/amqpx/pool" "github.com/stretchr/testify/assert" "go.uber.org/goleak" ) +var ( + connectURL = amqpx.NewURL("localhost", 5672, "admin", "password") +) + func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) + goleak.VerifyTestMain( + m, + goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), + goleak.IgnoreTopFunction("github.com/rabbitmq/amqp091-go.(*Connection).heartbeater"), + goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), + ) } func TestNew(t *testing.T) { connections := 2 sessions := 10 - p, err := pool.New("amqp://admin:password@localhost:5672", connections, sessions, + p, err := pool.New(connectURL, connections, sessions, pool.WithName("TestNew"), pool.WithLogger(logging.NewTestLogger(t)), ) diff --git a/pool/private_test.go b/pool/private_test.go new file mode 100644 index 0000000..b56c702 --- /dev/null +++ b/pool/private_test.go @@ -0,0 +1,43 @@ +package pool + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestBackoffPolicy(t *testing.T) { + backoffMultiTest(t, 15, 3) + backoffMultiTest(t, 32, 4) + backoffMultiTest(t, 64, 5) +} + +func backoffMultiTest(t *testing.T, factor int, iterations int) { + backoffTest(t, time.Hour, time.Duration(factor)*time.Hour, iterations) + backoffTest(t, time.Minute, time.Duration(factor)*time.Minute, iterations) + backoffTest(t, time.Second, time.Duration(factor)*time.Second, iterations) + backoffTest(t, time.Millisecond, time.Duration(factor)*time.Millisecond, iterations) + backoffTest(t, time.Nanosecond, time.Duration(factor)*time.Nanosecond, iterations) +} + +func backoffTest(t *testing.T, min, max time.Duration, expectedInterations int) { + for i := 0; i < 5000; i++ { + backoff := newDefaultBackoffPolicy(min, max) + + previous := time.Duration(0) + iterations := 0 + for i := 0; i < 128; i++ { + sleep := backoff(i) + if sleep == max { + break + } + require.Less(t, previous, sleep) + require.LessOrEqual(t, sleep, max) + previous = sleep + iterations++ + } + + require.Equalf(t, expectedInterations, iterations, "expected that many iterations: min: %v max: %v", min, max) + } +} diff --git a/pool/publisher.go b/pool/publisher.go index ac033e1..4a0a87f 100644 --- a/pool/publisher.go +++ b/pool/publisher.go @@ -6,7 +6,6 @@ import ( "time" "github.com/jxsl13/amqpx/logging" - "github.com/rabbitmq/amqp091-go" ) type Publisher struct { @@ -68,6 +67,8 @@ func NewPublisher(p *Pool, options ...PublisherOption) *Publisher { return pub } +// Publish a message to a specific exchange with a given routingKey. +// You may set exchange to "" and routingKey to your queue name in order to publish directly to a queue. func (p *Publisher) Publish(exchange string, routingKey string, msg Publishing) error { for { @@ -128,10 +129,10 @@ func (p *Publisher) publish(exchange string, routingKey string, msg Publishing) } // Get is only supposed to be used for testing, do not use get for polling any broker queues. -func (p *Publisher) Get(queue string, autoAck bool) (msg *amqp091.Delivery, ok bool, err error) { +func (p *Publisher) Get(queue string, autoAck bool) (msg Delivery, ok bool, err error) { s, err := p.pool.GetSession() if err != nil && errors.Is(err, ErrClosed) { - return nil, false, err + return Delivery{}, false, err } defer func() { // return session diff --git a/pool/publisher_test.go b/pool/publisher_test.go index 0c1e13e..36d881c 100644 --- a/pool/publisher_test.go +++ b/pool/publisher_test.go @@ -14,7 +14,7 @@ import ( func TestPublisher(t *testing.T) { connections := 1 sessions := 10 // publisher sessions + consumer sessions - p, err := pool.New("amqp://admin:password@localhost:5672", + p, err := pool.New(connectURL, connections, sessions, pool.WithName("TestPublisher"), @@ -42,7 +42,7 @@ func TestPublisher(t *testing.T) { } queueName := fmt.Sprintf("TestPublisher-Queue-%d", id) - err = s.QueueDeclare(queueName) + _, err = s.QueueDeclare(queueName) if err != nil { assert.NoError(t, err) return diff --git a/pool/queue.go b/pool/queue.go new file mode 100644 index 0000000..2c51a7c --- /dev/null +++ b/pool/queue.go @@ -0,0 +1,14 @@ +package pool + +import "github.com/rabbitmq/amqp091-go" + +/* + type Queue struct { + Name string // server confirmed or generated name + Messages int // count of messages not awaiting acknowledgment + Consumers int // number of consumers receiving deliveries + } + +Queue captures the current server state of the queue on the server returned from Channel.QueueDeclare or Channel.QueueInspect. +*/ +type Queue amqp091.Queue diff --git a/pool/session.go b/pool/session.go index 3a07e66..64b04b4 100644 --- a/pool/session.go +++ b/pool/session.go @@ -2,6 +2,7 @@ package pool import ( "context" + "errors" "fmt" "sync" "time" @@ -240,6 +241,10 @@ func (s *Session) AwaitConfirm(ctx context.Context, expectedTag uint64) error { select { case confirm, ok := <-s.confirms: if !ok { + err := s.error() + if err != nil { + return err + } return fmt.Errorf("confirms channel %w", ErrClosed) } if !confirm.Ack { @@ -270,7 +275,7 @@ func (s *Session) AwaitConfirm(ctx context.Context, expectedTag uint64) error { type Publishing struct { // Application or exchange specific fields, // the headers exchange will inspect this field. - Headers amqp091.Table + Headers Table // Properties ContentType string // MIME content type @@ -371,27 +376,21 @@ func (s *Session) retryPublish(f func() (uint64, error)) (tag uint64, err error) } // Get is only supposed to be used for testing purposes, do not us eit to poll the queue periodically. -func (s *Session) Get(queue string, autoAck bool) (msg *amqp091.Delivery, ok bool, err error) { +func (s *Session) Get(queue string, autoAck bool) (msg Delivery, ok bool, err error) { s.mu.Lock() defer s.mu.Unlock() - return s.retryGet(func() (*amqp091.Delivery, bool, error) { - m, ok, err := s.channel.Get(queue, autoAck) - return &m, ok, err - }) -} - -func (s *Session) retryGet(f func() (*amqp091.Delivery, bool, error)) (msg *amqp091.Delivery, ok bool, err error) { - for { - msg, ok, err := f() - if err == nil { - return msg, ok, nil - } - err = s.tryRecover(err) + err = s.retry(func() error { + msg, ok, err = s.channel.Get(queue, autoAck) if err != nil { - return nil, false, err + return err } + return nil + }) + if err != nil { + return Delivery{}, false, err } + return msg, ok, nil } // Nack rejects the message. @@ -431,7 +430,7 @@ type ConsumeOptions struct { // Optional arguments can be provided that have specific semantics for the queue or server. NoWait bool // Args are aditional implementation dependent parameters. - Args amqp091.Table + Args Table } // Consume immediately starts delivering queued messages. @@ -449,7 +448,7 @@ type ConsumeOptions struct { // Inflight messages, limited by Channel.Qos will be buffered until received from the returned chan. // When the Channel or Connection is closed, all buffered and inflight messages will be dropped. // When the consumer identifier tag is cancelled, all inflight messages will be delivered until the returned chan is closed. -func (s *Session) Consume(queue string, option ...ConsumeOptions) (<-chan amqp091.Delivery, error) { +func (s *Session) Consume(queue string, option ...ConsumeOptions) (<-chan Delivery, error) { s.mu.Lock() defer s.mu.Unlock() @@ -471,9 +470,13 @@ func (s *Session) Consume(queue string, option ...ConsumeOptions) (<-chan amqp09 } + var ( + c <-chan Delivery + err error + ) // retries to connect and attempts to start a consumer - c, err := s.consumeRetry(func() (<-chan amqp091.Delivery, error) { - return s.channel.Consume( + err = s.retry(func() error { + c, err = s.channel.Consume( queue, o.ConsumerTag, o.AutoAck, @@ -482,6 +485,10 @@ func (s *Session) Consume(queue string, option ...ConsumeOptions) (<-chan amqp09 o.NoWait, o.Args, ) + if err != nil { + return err + } + return nil }) if err != nil { return nil, err @@ -491,17 +498,70 @@ func (s *Session) Consume(queue string, option ...ConsumeOptions) (<-chan amqp09 return c, nil } -func (s *Session) consumeRetry(f func() (<-chan amqp091.Delivery, error)) (<-chan amqp091.Delivery, error) { - for { - c, err := f() - if err == nil { - return c, nil - } - err = s.tryRecover(err) +// Consume immediately starts delivering queued messages. +// +// Begin receiving on the returned chan Delivery before any other operation on the Connection or Channel. +// Continues deliveries to the returned chan Delivery until Channel.Cancel, Connection.Close, Channel.Close, or an AMQP exception occurs. +// Consumers must range over the chan to ensure all deliveries are received. +// +// Unreceived deliveries will block all methods on the same connection. +// All deliveries in AMQP must be acknowledged. +// It is expected of the consumer to call Delivery.Ack after it has successfully processed the delivery. +// +// If the consumer is cancelled or the channel or connection is closed any unacknowledged deliveries will be requeued at the end of the same queue. +// +// Inflight messages, limited by Channel.Qos will be buffered until received from the returned chan. +// When the Channel or Connection is closed, all buffered and inflight messages will be dropped. +// When the consumer identifier tag is cancelled, all inflight messages will be delivered until the returned chan is closed. +func (s *Session) ConsumeWithContext(ctx context.Context, queue string, option ...ConsumeOptions) (<-chan Delivery, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // defaults + o := ConsumeOptions{ + AutoAck: false, + Exclusive: false, + NoLocal: false, // not used by RabbitMQ + NoWait: false, + Args: nil, + } + if len(option) > 0 { + o = option[0] + } + + if o.ConsumerTag == "" { + // use our own consumer naming + o.ConsumerTag = s.Name() + + } + + var ( + c <-chan Delivery + err error + ) + // retries to connect and attempts to start a consumer + err = s.retry(func() error { + c, err = s.channel.ConsumeWithContext( + ctx, + queue, + o.ConsumerTag, + o.AutoAck, + o.Exclusive, + o.NoLocal, + o.NoWait, + o.Args, + ) if err != nil { - return nil, err + return err } + return nil + }) + if err != nil { + return nil, err } + s.consumers[o.ConsumerTag] = true + + return c, nil } func (s *Session) retry(f func() error) error { @@ -566,9 +626,9 @@ type ExchangeDeclareOptions struct { // The channel may be closed as a result of an error. Add a NotifyClose listener // to respond to any exceptions. NoWait bool - // Optional amqp091.Table of arguments that are specific to the server's implementation of + // Optional Table of arguments that are specific to the server's implementation of // the exchange can be sent for exchange types that require extra parameters. - Args amqp091.Table + Args Table } // ExchangeDeclare declares an exchange on the server. If the exchange does not @@ -588,7 +648,7 @@ type ExchangeDeclareOptions struct { // how messages are routed through it. Once an exchange is declared, its type // cannot be changed. The common types are "direct", "fanout", "topic" and // "headers". -func (s *Session) ExchangeDeclare(name string, kind string, option ...ExchangeDeclareOptions) error { +func (s *Session) ExchangeDeclare(name string, kind ExchangeKind, option ...ExchangeDeclareOptions) error { s.mu.Lock() defer s.mu.Unlock() @@ -607,7 +667,41 @@ func (s *Session) ExchangeDeclare(name string, kind string, option ...ExchangeDe return s.retry(func() error { return s.channel.ExchangeDeclare( name, - kind, + string(kind), + o.Durable, + o.AutoDelete, + o.Internal, + o.NoWait, + o.Args, + ) + }) +} + +// ExchangeDeclarePassive is functionally and parametrically equivalent to +// ExchangeDeclare, except that it sets the "passive" attribute to true. A passive +// exchange is assumed by RabbitMQ to already exist, and attempting to connect to a +// non-existent exchange will cause RabbitMQ to throw an exception. This function +// can be used to detect the existence of an exchange. +func (s *Session) ExchangeDeclarePassive(name string, kind ExchangeKind, option ...ExchangeDeclareOptions) error { + s.mu.Lock() + defer s.mu.Unlock() + + // sane defaults + o := ExchangeDeclareOptions{ + Durable: true, + AutoDelete: false, + Internal: false, + NoWait: false, + Args: nil, + } + if len(option) > 0 { + o = option[0] + } + + err := s.retry(func() error { + return s.channel.ExchangeDeclarePassive( + name, + string(kind), o.Durable, o.AutoDelete, o.Internal, @@ -615,6 +709,16 @@ func (s *Session) ExchangeDeclare(name string, kind string, option ...ExchangeDe o.Args, ) }) + + if err == nil { + return nil + } + + ae := &amqp091.Error{} + if errors.As(err, &ae) { + return fmt.Errorf("exchange %w: %w", ErrNotFound, ae) + } + return err } type ExchangeDeleteOptions struct { @@ -691,7 +795,7 @@ type QueueDeclareOptions struct { // NoWait bool // Args are additional properties you can set, like the queue type. - Args amqp091.Table + Args Table } // QueueDeclare declares a queue to hold messages and deliver to consumers. @@ -715,7 +819,7 @@ type QueueDeclareOptions struct { // // When the error return value is not nil, you can assume the queue could not be // declared with these parameters, and the channel will be closed. -func (s *Session) QueueDeclare(name string, option ...QueueDeclareOptions) error { +func (s *Session) QueueDeclare(name string, option ...QueueDeclareOptions) (Queue, error) { s.mu.Lock() defer s.mu.Unlock() @@ -729,9 +833,52 @@ func (s *Session) QueueDeclare(name string, option ...QueueDeclareOptions) error if len(option) > 0 { o = option[0] } + var ( + err error + queue amqp091.Queue + ) + err = s.retry(func() error { + queue, err = s.channel.QueueDeclare( + name, + o.Durable, + o.AutoDelete, + o.Exclusive, + o.NoWait, + o.Args, + ) + return err + }) + if err != nil { + return Queue{}, err + } - return s.retry(func() error { - _, err := s.channel.QueueDeclare( + return Queue(queue), nil +} + +// QueueDeclarePassive is functionally and parametrically equivalent to QueueDeclare, except that it sets the "passive" attribute to true. +// A passive queue is assumed by RabbitMQ to already exist, and attempting to connect to a non-existent queue will cause RabbitMQ to throw an exception. +// This function can be used to test for the existence of a queue. +func (s *Session) QueueDeclarePassive(name string, option ...QueueDeclareOptions) (Queue, error) { + s.mu.Lock() + defer s.mu.Unlock() + + o := QueueDeclareOptions{ + Durable: true, + AutoDelete: false, + Exclusive: false, + NoWait: false, + Args: QuorumQueue, + } + if len(option) > 0 { + o = option[0] + } + + var ( + err error + queue amqp091.Queue + ) + err = s.retry(func() error { + queue, err = s.channel.QueueDeclarePassive( name, o.Durable, o.AutoDelete, @@ -741,6 +888,16 @@ func (s *Session) QueueDeclare(name string, option ...QueueDeclareOptions) error ) return err }) + + if err == nil { + return Queue(queue), nil + } + + ae := &amqp091.Error{} + if errors.As(err, &ae) { + return Queue{}, fmt.Errorf("queue %w: %w", ErrNotFound, ae) + } + return Queue{}, err } // QueueDeleteOptions are options for deleting a queue. @@ -804,7 +961,7 @@ type QueueBindOptions struct { // closed with an error. NoWait bool // Additional implementation specific arguments - Args amqp091.Table + Args Table } // QueueBind binds an exchange to a queue so that publishings to the exchange will @@ -874,12 +1031,12 @@ func (s *Session) QueueBind(queueName string, routingKey string, exchange string // arguments. // It is possible to send and empty string for the exchange name which means to // unbind the queue from the default exchange. -func (s *Session) QueueUnbind(name string, routingKey string, exchange string, arg ...amqp091.Table) error { +func (s *Session) QueueUnbind(name string, routingKey string, exchange string, arg ...Table) error { s.mu.Lock() defer s.mu.Unlock() // default - var option amqp091.Table = nil + var option Table = nil if len(arg) > 0 { option = arg[0] } @@ -889,6 +1046,41 @@ func (s *Session) QueueUnbind(name string, routingKey string, exchange string, a }) } +type QueuePurgeOptions struct { + // If NoWait is true, do not wait for the server response and the number of messages purged will not be meaningful. + NoWait bool +} + +// QueuePurge removes all messages from the named queue which are not waiting to be acknowledged. +// Messages that have been delivered but have not yet been acknowledged will not be removed. +// When successful, returns the number of messages purged. +func (s *Session) QueuePurge(name string, options ...QueuePurgeOptions) (int, error) { + opt := QueuePurgeOptions{ + NoWait: false, + } + if len(options) > 0 { + opt = options[0] + } + + var ( + numPurgedMessages int = 0 + err error + ) + + err = s.retry(func() error { + numPurgedMessages, err = s.channel.QueuePurge(name, opt.NoWait) + if err != nil { + return err + } + return nil + }) + + if err != nil { + return 0, err + } + return numPurgedMessages, nil +} + type ExchangeBindOptions struct { // When NoWait is true, do not wait for the server to confirm the binding. If any // error occurs the channel will be closed. Add a listener to NotifyClose to @@ -896,7 +1088,7 @@ type ExchangeBindOptions struct { NoWait bool // Optional arguments specific to the exchanges bound can also be specified. - Args amqp091.Table + Args Table } // ExchangeBind binds an exchange to another exchange to create inter-exchange @@ -956,7 +1148,7 @@ type ExchangeUnbindOptions struct { // Optional arguments that are specific to the type of exchanges bound can also be // provided. These must match the same arguments specified in ExchangeBind to // identify the binding. - Args amqp091.Table + Args Table } // ExchangeUnbind unbinds the destination exchange from the source exchange on the @@ -1079,7 +1271,7 @@ flush: return confirms } -// Error returns the first error from the errors channel +// Error returns all errors from the errors channel // and flushes all other pending errors from the channel // In case that there are no errors, nil is returned. func (s *Session) Error() error { @@ -1095,19 +1287,14 @@ func (s *Session) error() error { ) for { select { - case <-s.catchShutdown(): - return fmt.Errorf("session %w", ErrClosed) case e, ok := <-s.errors: if !ok { - return amqp091.ErrClosed - } - // only overwrite with the first error - if err == nil { - err = e - } else { - // flush all other errors after the first one - continue + if err != nil { + return err + } + return fmt.Errorf("errors channel %w", ErrClosed) } + err = errors.Join(err, e) default: return err } diff --git a/pool/session_pool.go b/pool/session_pool.go index 3d026bd..f4313bb 100644 --- a/pool/session_pool.go +++ b/pool/session_pool.go @@ -104,6 +104,23 @@ func (sp *SessionPool) GetSession() (*Session, error) { } } +// GetSessionCtx gets a pooled session. +// blocks until a session is acquired from the pool, the session pool was closed or the passed context was canceled. +func (sp *SessionPool) GetSessionCtx(ctx context.Context) (*Session, error) { + select { + case <-sp.catchShutdown(): + return nil, ErrClosed + case <-ctx.Done(): + return nil, ctx.Err() + case session, ok := <-sp.sessions: + if !ok { + return nil, fmt.Errorf("failed to get session: %w", ErrClosed) + } + + return session, nil + } +} + // GetTransientSession returns a transient session. // This method may return an error when the context ha sbeen closed before a session could be obtained. // A transient session creates a transient connection under the hood. @@ -144,7 +161,9 @@ func (sp *SessionPool) ReturnSession(session *Session, erred bool) { } else { // healthy sessions may contain pending confirmation messages // cleanup confirmations from previous session usage - session.flushConfirms() + _ = session.flushConfirms() + // flush errors + _ = session.Error() } select { diff --git a/pool/session_pool_test.go b/pool/session_pool_test.go index 7ce0243..ec9a219 100644 --- a/pool/session_pool_test.go +++ b/pool/session_pool_test.go @@ -13,7 +13,7 @@ import ( func TestNewSessionPool(t *testing.T) { connections := 1 sessions := 10 - p, err := pool.NewConnectionPool("amqp://admin:password@localhost:5672", connections, + p, err := pool.NewConnectionPool(connectURL, connections, pool.ConnectionPoolWithName("TestNewConnectionPool"), pool.ConnectionPoolWithLogger(logging.NewTestLogger(t)), ) diff --git a/pool/session_test.go b/pool/session_test.go index 0aeca7c..0d481de 100644 --- a/pool/session_test.go +++ b/pool/session_test.go @@ -15,10 +15,9 @@ import ( func TestNewSession(t *testing.T) { c, err := pool.NewConnection( - "amqp://admin:password@localhost:5672", + connectURL, "TestNewSession", pool.ConnectionWithLogger(logging.NewTestLogger(t)), - pool.ConnectionWithSlowClose(true), ) if err != nil { assert.NoError(t, err) @@ -43,7 +42,7 @@ func TestNewSession(t *testing.T) { }() queueName := fmt.Sprintf("TestNewSession-Queue-%d", id) - err = s.QueueDeclare(queueName) + _, err = s.QueueDeclare(queueName) if err != nil { assert.NoError(t, err) return @@ -135,10 +134,9 @@ func TestNewSession(t *testing.T) { func TestNewSessionDisconnect(t *testing.T) { c, err := pool.NewConnection( - "amqp://admin:password@localhost:5672", + connectURL, "TestNewSessionDisconnect", pool.ConnectionWithLogger(logging.NewTestLogger(t)), - pool.ConnectionWithSlowClose(true), ) if err != nil { assert.NoError(t, err) @@ -208,7 +206,7 @@ func TestNewSessionDisconnect(t *testing.T) { started2() queueName := fmt.Sprintf("TestNewSession-Queue-%d", id) - err = s.QueueDeclare(queueName) + _, err = s.QueueDeclare(queueName) if err != nil { assert.NoError(t, err) return @@ -316,3 +314,59 @@ func TestNewSessionDisconnect(t *testing.T) { wg.Wait() time.Sleep(10 * time.Second) // await dangling io goroutines to timeout } + +func TestNewSessionQueueDeclarePassive(t *testing.T) { + var wg sync.WaitGroup + + defer func() { + wg.Wait() + time.Sleep(10 * time.Second) // await dangling io goroutines to timeout + }() + + c, err := pool.NewConnection( + connectURL, + "TestNewSessionQueueDeclarePassive", + pool.ConnectionWithLogger(logging.NewTestLogger(t)), + ) + if err != nil { + assert.NoError(t, err) + return + } + defer func() { + c.Close() // can be nil or error + }() + + session, err := pool.NewSession(c, fmt.Sprintf("TestNewSessionQueueDeclarePassive-%d", 1), pool.SessionWithConfirms(true)) + if err != nil { + assert.NoError(t, err) + return + } + defer func() { + assert.NoError(t, session.Close()) + }() + + for i := 0; i < 100; i++ { + qname := fmt.Sprintf("TestNewSessionQueueDeclarePassive-queue-%d", i) + q, err := session.QueueDeclare(qname) + if err != nil { + assert.NoError(t, err) + return + } + assert.Equalf(t, 0, q.Consumers, "expected 0 consumers when declaring a queue: %s", qname) + + // executed upon return + defer func() { + _, err := session.QueueDelete(qname) + assert.NoErrorf(t, err, "failed to delete queue: %s", qname) + }() + + q, err = session.QueueDeclarePassive(qname) + if err != nil { + assert.NoErrorf(t, err, "QueueDeclarePassive failed for queue: %s", qname) + return + } + + assert.Equalf(t, 0, q.Consumers, "queue should not have any consumers: %s", qname) + } + +} diff --git a/pool/subscriber.go b/pool/subscriber.go index b1714b2..b1de22c 100644 --- a/pool/subscriber.go +++ b/pool/subscriber.go @@ -8,7 +8,6 @@ import ( "time" "github.com/jxsl13/amqpx/logging" - "github.com/rabbitmq/amqp091-go" ) type Subscriber struct { @@ -17,8 +16,8 @@ type Subscriber struct { mu sync.Mutex started bool - handlers []Handler - batchHandlers []BatchHandler + handlers []*Handler + batchHandlers []*BatchHandler ctx context.Context cancel context.CancelFunc @@ -64,14 +63,14 @@ func NewSubscriber(p *Pool, options ...SubscriberOption) *Subscriber { o(&option) } + // decouple from parent in order to individually close the context ctx, cancel := context.WithCancel(option.Ctx) sub := &Subscriber{ pool: p, autoClosePool: option.AutoClosePool, - - ctx: ctx, - cancel: cancel, + ctx: ctx, + cancel: cancel, log: option.Logger, } @@ -80,35 +79,10 @@ func NewSubscriber(p *Pool, options ...SubscriberOption) *Subscriber { } // HandlerFunc is basically a handler for incoming messages/events. -type HandlerFunc func(amqp091.Delivery) error +type HandlerFunc func(Delivery) error // BatchHandlerFunc is a handler for incoming batches of messages/events -type BatchHandlerFunc func([]amqp091.Delivery) error - -// Handler is a struct that contains all parameters needed in order to register a handler function. -type Handler struct { - Queue string - ConsumeOptions - HandlerFunc HandlerFunc -} - -// BatchHandler is a struct that contains all parameter sneeded i order to register a batch handler function. -type BatchHandler struct { - Queue string - - // When <= 0, will be set to 50 - // Number of messages a batch may contain at most - // before processing is triggered - MaxBatchSize int - - // FlushTimeout is the duration that is waited for the next message from a queue before - // the batch is closed and passed for processing. - // This value should be less than 30m (which is the (n)ack timeout of RabbitMQ) - // when <= 0, will be set to 5s - FlushTimeout time.Duration - ConsumeOptions - HandlerFunc BatchHandlerFunc -} +type BatchHandlerFunc func([]Delivery) error // RegisterHandlerFunc registers a consumer function that starts a consumer upon subscriber startup. // The consumer is identified by a string that is unique and scoped for all consumers on this channel. @@ -129,7 +103,7 @@ type BatchHandler struct { // Inflight messages, limited by Channel.Qos will be buffered until received from the returned chan. // When the Channel or Connection is closed, all buffered and inflight messages will be dropped. // When the consumer identifier tag is cancelled, all inflight messages will be delivered until the returned chan is closed. -func (s *Subscriber) RegisterHandlerFunc(queue string, hf HandlerFunc, options ...ConsumeOptions) { +func (s *Subscriber) RegisterHandlerFunc(queue string, hf HandlerFunc, options ...ConsumeOptions) *Handler { option := ConsumeOptions{ ConsumerTag: "", AutoAck: false, @@ -142,28 +116,21 @@ func (s *Subscriber) RegisterHandlerFunc(queue string, hf HandlerFunc, options . option = options[0] } - s.RegisterHandler( - Handler{ - Queue: queue, - ConsumeOptions: option, - HandlerFunc: hf, - }, - ) + handler := NewHandler(queue, hf, option) + s.RegisterHandler(handler) + return handler } -func (s *Subscriber) RegisterHandler(handler Handler) { - if handler.HandlerFunc == nil { - panic("handler.HandlerFunc must not be nil") - } +func (s *Subscriber) RegisterHandler(handler *Handler) { s.mu.Lock() defer s.mu.Unlock() s.handlers = append(s.handlers, handler) - s.log.WithFields(withConsumerIfSet(handler.ConsumerTag, + s.log.WithFields(withConsumerIfSet(handler.ConsumeOptions().ConsumerTag, map[string]any{ "subscriber": s.pool.Name(), - "queue": handler.Queue, + "queue": handler.Queue(), })).Info("registered message handler") } @@ -173,149 +140,136 @@ func (s *Subscriber) RegisterHandler(handler Handler) { // and then you'd have to wait indefinitly for those 20 messages to be processed, as it might take a long time for another message to arrive in the queue. // This is where your flushTimeout comes into play. In order to wait at most for the period of flushTimeout until a new message arrives // before processing the batch in your handler function. -func (s *Subscriber) RegisterBatchHandlerFunc(queue string, maxBatchSize int, flushTimeout time.Duration, hf BatchHandlerFunc, options ...ConsumeOptions) { - - option := ConsumeOptions{ - ConsumerTag: "", - AutoAck: false, - Exclusive: false, - NoLocal: false, - NoWait: false, - Args: nil, - } - if len(options) > 0 { - option = options[0] - } - - s.RegisterBatchHandler( - BatchHandler{ - Queue: queue, - ConsumeOptions: option, - MaxBatchSize: maxBatchSize, - FlushTimeout: flushTimeout, - HandlerFunc: hf, - }, - ) +func (s *Subscriber) RegisterBatchHandlerFunc(queue string, hf BatchHandlerFunc, options ...BatchHandlerOption) *BatchHandler { + handler := NewBatchHandler(queue, hf, options...) + s.RegisterBatchHandler(handler) + return handler } -func (s *Subscriber) RegisterBatchHandler(handler BatchHandler) { - if handler.HandlerFunc == nil { - panic("handler.HandlerFunc must not be nil") - } +// RegisterBatchHandler registers a custom handler that MIGHT not be closed in case that the subscriber is closed. +// The passed batch handler may be derived from a different parent context. +func (s *Subscriber) RegisterBatchHandler(handler *BatchHandler) { // TODO: do we want to introduce a BatchSizeLimit // which would keep track of accumulated payload memory limits // of all the messages of a batch and process the messages // in case we hit that memory limit within the current batch. - if handler.MaxBatchSize <= 0 { - handler.MaxBatchSize = 50 - } - - if handler.FlushTimeout <= 0 { - handler.FlushTimeout = 5 * time.Second - } - s.mu.Lock() defer s.mu.Unlock() s.batchHandlers = append(s.batchHandlers, handler) - s.log.WithFields(withConsumerIfSet(handler.ConsumerTag, + opts := handler.Config() + + s.log.WithFields(withConsumerIfSet(handler.ConsumeOptions().ConsumerTag, map[string]any{ "subscriber": s.pool.Name(), - "queue": handler.Queue, - "maxBatchSize": handler.MaxBatchSize, - "flushTimeout": handler.FlushTimeout.String(), - })).Info("registered batch handler") + "queue": opts.Queue, + "maxBatchSize": opts.MaxBatchSize, // TODO: optimize so that we don't call getters multiple times (mutex contention) + "flushTimeout": handler.FlushTimeout, + })).Info("registered batch message handler") } // Start starts the consumers for all registered handler functions // This method is not blocking. Use Wait() to wait for all routines to shut down // via context cancelation (e.g. via a signal) -func (s *Subscriber) Start() { +func (s *Subscriber) Start() (err error) { s.mu.Lock() defer s.mu.Unlock() - if s.started { panic("subscriber cannot be started more than once") } s.debugSimple("starting subscriber...") defer func() { - // after starting everything we want to set started to true - s.started = true - s.infoSimple("started") + if err != nil { + s.Close() + } else { + // after starting everything we want to set started to true + s.started = true + s.infoSimple("started") + } }() s.debugSimple(fmt.Sprintf("starting %d handler routine(s)", len(s.handlers))) for _, h := range s.handlers { s.wg.Add(1) go s.consumer(h, &s.wg) + err = h.awaitResumed(s.ctx) + if err != nil { + return fmt.Errorf("failed to start consumer for queue %s: %w", h.Queue(), err) + } } s.debugSimple(fmt.Sprintf("starting %d batch handler routine(s)", len(s.batchHandlers))) for _, bh := range s.batchHandlers { s.wg.Add(1) go s.batchConsumer(bh, &s.wg) - } -} - -func (s *Subscriber) consumer(h Handler, wg *sync.WaitGroup) { - defer wg.Done() - var err error - for { - err = s.consume(h) - if errors.Is(err, ErrClosed) { - return + err = bh.awaitResumed(s.ctx) + if err != nil { + return fmt.Errorf("failed to start batch consumer for queue %s: %w", bh.Queue(), err) } } + return nil } -func (s *Subscriber) batchConsumer(bh BatchHandler, wg *sync.WaitGroup) { +func (s *Subscriber) consumer(h *Handler, wg *sync.WaitGroup) { defer wg.Done() + defer h.close() var err error + // trigger initial startup + // channel below + opts, err := h.start(s.ctx) + if err != nil { + s.error(opts.ConsumerTag, opts.Queue, err, "failed to start consumer") + return + } + for { - err = s.batchConsume(bh) - if errors.Is(err, ErrClosed) { + select { + case <-s.catchShutdown(): return + case <-h.resuming().Done(): + err = s.consume(h) + if errors.Is(err, ErrClosed) { + return + } } } } -func (s *Subscriber) consume(h Handler) (err error) { - s.debugConsumer(h.ConsumerTag, "starting consumer...") +func (s *Subscriber) consume(h *Handler) (err error) { + opts, err := h.start(s.ctx) + if err != nil { + return err + } + defer h.paused() + + s.debugConsumer(opts.ConsumerTag, "starting consumer...") session, err := s.pool.GetSession() if err != nil { return err } defer func() { - if err == nil { - // no error - s.pool.ReturnSession(session, false) - s.infoConsumer(h.ConsumerTag, "closed") - } else if errors.Is(err, ErrClosed) { - // graceful shutdown - s.pool.ReturnSession(session, false) - s.infoConsumer(h.ConsumerTag, "closed") - } else { - // actual error - s.pool.ReturnSession(session, true) - s.warnConsumer(h.ConsumerTag, err, "closed unexpectedly") - } + // err evaluation upon defer + s.returnSession(h, session, err) }() // got a working session - delivery, err := session.Consume( - h.Queue, - h.ConsumeOptions, + delivery, err := session.ConsumeWithContext( + h.pausing().Context(), + opts.Queue, + opts.ConsumeOptions, ) if err != nil { return err } - s.infoConsumer(h.ConsumerTag, "started") + + h.resumed() + s.infoConsumer(opts.ConsumerTag, "started") for { select { case <-s.catchShutdown(): @@ -325,17 +279,17 @@ func (s *Subscriber) consume(h Handler) (err error) { return ErrDeliveryClosed } - s.infoHandler(h.ConsumerTag, msg.Exchange, msg.RoutingKey, h.Queue, "received message") - err = h.HandlerFunc(msg) - if h.AutoAck { + s.infoHandler(opts.ConsumerTag, msg.Exchange, msg.RoutingKey, opts.Queue, "received message") + err = opts.HandlerFunc(msg) + if opts.AutoAck { if err != nil { // we cannot really do anything to recover from a processing error in this case - s.errorHandler(h.ConsumerTag, msg.Exchange, msg.RoutingKey, h.Queue, fmt.Errorf("processing failed: dropping message: %w", err)) + s.errorHandler(opts.ConsumerTag, msg.Exchange, msg.RoutingKey, opts.Queue, fmt.Errorf("processing failed: dropping message: %w", err)) } else { - s.infoHandler(h.ConsumerTag, msg.Exchange, msg.RoutingKey, h.Queue, "processed message") + s.infoHandler(opts.ConsumerTag, msg.Exchange, msg.RoutingKey, opts.Queue, "processed message") } } else { - poolErr := s.ackPostHandle(h, msg.DeliveryTag, msg.Exchange, msg.RoutingKey, session, err) + poolErr := s.ackPostHandle(opts, msg.DeliveryTag, msg.Exchange, msg.RoutingKey, session, err) if poolErr != nil { return poolErr } @@ -345,7 +299,7 @@ func (s *Subscriber) consume(h Handler) (err error) { } // (n)ack delivery and signal that message was processed by the service -func (s *Subscriber) ackPostHandle(h Handler, deliveryTag uint64, exchange, routingKey string, session *Session, handlerErr error) (err error) { +func (s *Subscriber) ackPostHandle(opts HandlerConfig, deliveryTag uint64, exchange, routingKey string, session *Session, handlerErr error) (err error) { var ackErr error if handlerErr != nil { // requeue message if possible @@ -357,7 +311,7 @@ func (s *Subscriber) ackPostHandle(h Handler, deliveryTag uint64, exchange, rout // if (n)ack fails, we know that the connection died // potentially before processing already. if ackErr != nil { - s.warnHandler(h.ConsumerTag, exchange, routingKey, h.Queue, ackErr, "(n)ack failed") + s.warnHandler(opts.ConsumerTag, exchange, routingKey, opts.Queue, ackErr, "(n)ack failed") poolErr := session.Recover() if poolErr != nil { // only returns an error upon shutdown @@ -371,49 +325,73 @@ func (s *Subscriber) ackPostHandle(h Handler, deliveryTag uint64, exchange, rout // (n)acked successfully if handlerErr != nil { - s.infoHandler(h.ConsumerTag, exchange, routingKey, h.Queue, "nacked message") + s.infoHandler(opts.ConsumerTag, exchange, routingKey, opts.Queue, "nacked message") } else { - s.infoHandler(h.ConsumerTag, exchange, routingKey, h.Queue, "acked message") + s.infoHandler(opts.ConsumerTag, exchange, routingKey, opts.Queue, "acked message") } // successfully handled message return nil } -func (s *Subscriber) batchConsume(bh BatchHandler) (err error) { - s.debugConsumer(bh.ConsumerTag, "starting batch consumer...") +func (s *Subscriber) batchConsumer(h *BatchHandler, wg *sync.WaitGroup) { + defer wg.Done() + defer h.close() + + var err error + // initialize all handler contexts + // to be in state resuming + opts, err := h.start(s.ctx) + if err != nil { + s.error(opts.ConsumerTag, opts.Queue, err, "failed to start batch handler consumer") + return + } + + for { + select { + case <-s.catchShutdown(): + return + case <-h.resuming().Done(): + err = s.batchConsume(h) + if errors.Is(err, ErrClosed) { + return + } + } + } +} + +func (s *Subscriber) batchConsume(h *BatchHandler) (err error) { + opts, err := h.start(s.ctx) + if err != nil { + return err + } + defer h.paused() + + s.debugConsumer(opts.ConsumerTag, "starting batch consumer...") session, err := s.pool.GetSession() if err != nil { return err } defer func() { - if err == nil { - // no error - s.pool.ReturnSession(session, false) - s.infoConsumer(bh.ConsumerTag, "closed") - } else if errors.Is(err, ErrClosed) { - // graceful shutdown - s.pool.ReturnSession(session, false) - s.infoConsumer(bh.ConsumerTag, "closed") - } else { - // actual error - s.pool.ReturnSession(session, true) - s.warnConsumer(bh.ConsumerTag, err, "closed unexpectedly") - } + // err evaluation upon defer + s.returnSession(h, session, err) }() // got a working session - delivery, err := session.Consume( - bh.Queue, - bh.ConsumeOptions, + delivery, err := session.ConsumeWithContext( + h.pausing().Context(), + opts.Queue, + opts.ConsumeOptions, ) if err != nil { return err } - s.infoConsumer(bh.ConsumerTag, "started") + + h.resumed() + s.infoConsumer(opts.ConsumerTag, "started") // preallocate memory for batch - batch := make([]amqp091.Delivery, 0, bh.MaxBatchSize) + batch := make([]Delivery, 0, maxi(1, opts.MaxBatchSize)) defer func() { if len(batch) > 0 && errors.Is(err, ErrClosed) { // requeue all not yet processed messages in batch slice @@ -423,13 +401,13 @@ func (s *Subscriber) batchConsume(bh BatchHandler) (err error) { // There is no way to recover form this state in case an error is returned from the Nack call. nackErr := batch[len(batch)-1].Nack(true, true) if nackErr != nil { - s.warnBatchHandler(bh.ConsumerTag, bh.Queue, bh.MaxBatchSize, err, "failed to nack and requeue batch upon shutdown") + s.warnBatchHandler(opts.ConsumerTag, opts.Queue, opts.MaxBatchSize, err, "failed to nack and requeue batch upon shutdown") } } }() var ( - timer = time.NewTimer(bh.FlushTimeout) + timer = time.NewTimer(opts.FlushTimeout) drained = false ) defer closeTimer(timer, &drained) @@ -443,7 +421,7 @@ func (s *Subscriber) batchConsume(bh BatchHandler) (err error) { for { // reset the timer - resetTimer(timer, bh.FlushTimeout, &drained) + resetTimer(timer, opts.FlushTimeout, &drained) select { case <-s.catchShutdown(): @@ -453,7 +431,7 @@ func (s *Subscriber) batchConsume(bh BatchHandler) (err error) { return ErrDeliveryClosed } batch = append(batch, msg) - if len(batch) == bh.MaxBatchSize { + if len(batch) == opts.MaxBatchSize { break collectBatch } @@ -476,18 +454,18 @@ func (s *Subscriber) batchConsume(bh BatchHandler) (err error) { lastDeliveryTag = batch[len(batch)-1].DeliveryTag ) - s.infoBatchHandler(bh.ConsumerTag, bh.Queue, batchSize, "received batch") - err = bh.HandlerFunc(batch) + s.infoBatchHandler(opts.ConsumerTag, opts.Queue, batchSize, "received batch") + err = opts.HandlerFunc(batch) // no acks required - if bh.AutoAck { + if opts.AutoAck { if err != nil { // we cannot really do anything to recover from a processing error in this case - s.errorBatchHandler(bh.ConsumerTag, bh.Queue, batchSize, fmt.Errorf("processing failed: dropping batch: %w", err)) + s.errorBatchHandler(opts.ConsumerTag, opts.Queue, batchSize, fmt.Errorf("processing failed: dropping batch: %w", err)) } else { - s.infoBatchHandler(bh.ConsumerTag, bh.Queue, batchSize, "processed batch") + s.infoBatchHandler(opts.ConsumerTag, opts.Queue, batchSize, "processed batch") } } else { - poolErr := s.ackBatchPostHandle(bh, lastDeliveryTag, batchSize, session, err) + poolErr := s.ackBatchPostHandle(opts, lastDeliveryTag, batchSize, session, err) if poolErr != nil { return poolErr } @@ -495,7 +473,7 @@ func (s *Subscriber) batchConsume(bh BatchHandler) (err error) { } } -func (s *Subscriber) ackBatchPostHandle(bh BatchHandler, lastDeliveryTag uint64, currentBatchSize int, session *Session, handlerErr error) (err error) { +func (s *Subscriber) ackBatchPostHandle(opts BatchHandlerConfig, lastDeliveryTag uint64, currentBatchSize int, session *Session, handlerErr error) (err error) { var ackErr error // processing failed if handlerErr != nil { @@ -509,7 +487,7 @@ func (s *Subscriber) ackBatchPostHandle(bh BatchHandler, lastDeliveryTag uint64, // if (n)ack fails, we know that the connection died // potentially before processing already. if ackErr != nil { - s.warnBatchHandler(bh.ConsumerTag, bh.Queue, currentBatchSize, ackErr, "batch (n)ack failed") + s.warnBatchHandler(opts.ConsumerTag, opts.Queue, currentBatchSize, ackErr, "batch (n)ack failed") poolErr := session.Recover() if poolErr != nil { // only returns an error upon shutdown @@ -523,14 +501,44 @@ func (s *Subscriber) ackBatchPostHandle(bh BatchHandler, lastDeliveryTag uint64, // (n)acked successfully if handlerErr != nil { - s.infoBatchHandler(bh.ConsumerTag, bh.Queue, currentBatchSize, "nacked batch") + s.infoBatchHandler(opts.ConsumerTag, opts.Queue, currentBatchSize, "nacked batch") } else { - s.infoBatchHandler(bh.ConsumerTag, bh.Queue, currentBatchSize, "acked batch") + s.infoBatchHandler(opts.ConsumerTag, opts.Queue, currentBatchSize, "acked batch") } // successfully handled message return nil } +type handler interface { + ConsumeOptions() ConsumeOptions + Queue() string + pausing() doner +} + +func (s *Subscriber) returnSession(h handler, session *Session, err error) { + opts := h.ConsumeOptions() + + if errors.Is(err, ErrClosed) { + // graceful shutdown + s.pool.ReturnSession(session, false) + s.infoConsumer(opts.ConsumerTag, "closed") + return + } + + select { + case <-h.pausing().Done(): + // expected closing due to context cancelation + // cancel errors the underlying channel + // A canceled session is an erred session. + s.pool.ReturnSession(session, true) + s.infoConsumer(opts.ConsumerTag, "paused") + default: + // actual error + s.pool.ReturnSession(session, true) + s.warnConsumer(opts.ConsumerTag, err, "closed unexpectedly") + } +} + func (s *Subscriber) catchShutdown() <-chan struct{} { return s.ctx.Done() } @@ -629,3 +637,11 @@ func withConsumerIfSet(consumer string, m map[string]any) map[string]any { } return m } + +func (s *Subscriber) error(consumer, queue string, err error, a ...any) { + s.log.WithFields(withConsumerIfSet(consumer, map[string]any{ + "subscriber": s.pool.Name(), + "queue": queue, + "error": err, + })).Error(a...) +} diff --git a/pool/subscriber_batch_handler.go b/pool/subscriber_batch_handler.go new file mode 100644 index 0000000..52c59bf --- /dev/null +++ b/pool/subscriber_batch_handler.go @@ -0,0 +1,210 @@ +package pool + +import ( + "context" + "sync" + "time" +) + +const ( + defaultMaxBatchSize = 50 + defaultFlushTimeout = 5 * time.Second +) + +// NewHandler creates a new handler which is primarily a combination of your passed +// handler function and the queue name from which the handler fetches messages and processes those. +// Additionally, the handler allows you to pause and resume processing from the provided queue. +func NewBatchHandler(queue string, hf BatchHandlerFunc, options ...BatchHandlerOption) *BatchHandler { + if hf == nil { + panic("handlerFunc must not be nil") + } + + // sane defaults + h := &BatchHandler{ + sc: newStateContext(context.Background()), + queue: queue, + handlerFunc: hf, + maxBatchSize: defaultMaxBatchSize, + flushTimeout: defaultFlushTimeout, + consumeOpts: ConsumeOptions{ + ConsumerTag: "", + AutoAck: false, + Exclusive: false, + NoLocal: false, + NoWait: false, + Args: nil, + }, + } + + for _, opt := range options { + opt(h) + } + + return h +} + +// BatchHandler is a struct that contains all parameters needed in order to register a batch handler function. +type BatchHandler struct { + mu sync.Mutex + queue string + handlerFunc BatchHandlerFunc + consumeOpts ConsumeOptions + + // When <= 0, will be set to 50 + // Number of messages a batch may contain at most + // before processing is triggered + maxBatchSize int + + // FlushTimeout is the duration that is waited for the next message from a queue before + // the batch is closed and passed for processing. + // This value should be less than 30m (which is the (n)ack timeout of RabbitMQ) + // when <= 0, will be set to 5s + flushTimeout time.Duration + + sc *stateContext +} + +// BatchHandlerConfig is a read only snapshot of the current handler's configuration. +type BatchHandlerConfig struct { + Queue string + ConsumeOptions + + HandlerFunc BatchHandlerFunc + MaxBatchSize int + FlushTimeout time.Duration +} + +func (h *BatchHandler) close() { + h.sc.Close() +} + +// reset creates the initial state of the object +// initial state is the transitional state resuming (= startup and resuming after pause) +// the passed context is the parent context of all new contexts that spawn from this +func (h *BatchHandler) start(ctx context.Context) (opts BatchHandlerConfig, err error) { + h.mu.Lock() + defer h.mu.Unlock() + opts = h.configUnguarded() + err = h.sc.Start(ctx) + return opts, err +} + +func (h *BatchHandler) Config() BatchHandlerConfig { + h.mu.Lock() + defer h.mu.Unlock() + return h.configUnguarded() +} + +func (h *BatchHandler) configUnguarded() BatchHandlerConfig { + return BatchHandlerConfig{ + Queue: h.queue, + HandlerFunc: h.handlerFunc, + MaxBatchSize: h.maxBatchSize, + FlushTimeout: h.flushTimeout, + ConsumeOptions: h.consumeOpts, + } +} + +// Pause allows to halt the processing of a queue after the processing has been started by the subscriber. +func (h *BatchHandler) Pause(ctx context.Context) error { + return h.sc.Pause(ctx) +} + +func (h *BatchHandler) pausing() doner { + return h.sc.Pausing() +} + +func (h *BatchHandler) paused() { + h.sc.Paused() +} + +// Resume allows to continue the processing of a queue after it has been paused using Pause +func (h *BatchHandler) Resume(ctx context.Context) error { + return h.sc.Resume(ctx) +} + +func (h *BatchHandler) resuming() doner { + return h.sc.Resuming() +} + +func (h *BatchHandler) resumed() { + h.sc.Resumed() +} + +func (h *BatchHandler) IsActive(ctx context.Context) (active bool, err error) { + return h.sc.IsActive(ctx) +} + +func (h *BatchHandler) awaitResumed(ctx context.Context) error { + return h.sc.AwaitResumed(ctx) +} + +func (h *BatchHandler) awaitPaused(ctx context.Context) error { + return h.sc.AwaitPaused(ctx) +} + +func (h *BatchHandler) Queue() string { + h.mu.Lock() + defer h.mu.Unlock() + return h.queue +} + +// SetQueue changes the current queue to another queue +// from which the handler consumes messages. +// The actual change is effective after pausing and resuming the handler. +func (h *BatchHandler) SetQueue(queue string) { + h.mu.Lock() + defer h.mu.Unlock() + h.queue = queue +} + +// SetHandlerFunc changes the current handler function to another +// handler function which processes messages.. +// The actual change is effective after pausing and resuming the handler. +func (h *BatchHandler) SetHandlerFunc(hf BatchHandlerFunc) { + h.mu.Lock() + defer h.mu.Unlock() + h.handlerFunc = hf +} + +func (h *BatchHandler) ConsumeOptions() ConsumeOptions { + h.mu.Lock() + defer h.mu.Unlock() + return h.consumeOpts +} + +func (h *BatchHandler) SetConsumeOptions(consumeOpts ConsumeOptions) { + h.mu.Lock() + defer h.mu.Unlock() + h.consumeOpts = consumeOpts +} + +func (h *BatchHandler) MaxBatchSize() int { + h.mu.Lock() + defer h.mu.Unlock() + return h.maxBatchSize +} + +func (h *BatchHandler) SetMaxBatchSize(maxBatchSize int) { + h.mu.Lock() + defer h.mu.Unlock() + if maxBatchSize <= 0 { + maxBatchSize = defaultMaxBatchSize + } + h.maxBatchSize = maxBatchSize +} + +func (h *BatchHandler) FlushTimeout() time.Duration { + h.mu.Lock() + defer h.mu.Unlock() + return h.flushTimeout +} + +func (h *BatchHandler) SetFlushTimeout(flushTimeout time.Duration) { + h.mu.Lock() + defer h.mu.Unlock() + if flushTimeout <= 0 { + flushTimeout = defaultFlushTimeout + } + h.flushTimeout = flushTimeout +} diff --git a/pool/subscriber_handler.go b/pool/subscriber_handler.go new file mode 100644 index 0000000..e903d48 --- /dev/null +++ b/pool/subscriber_handler.go @@ -0,0 +1,173 @@ +package pool + +import ( + "context" + "errors" + "sync" +) + +var ( + // ErrPauseFailed is returned by (Batch)Handler.Pause in case that the passed context is canceled + ErrPauseFailed = errors.New("failed to pause handler") + + // ErrResumeFailed is returned by (Batch)Handler.Resume in case that the passed context is canceled + ErrResumeFailed = errors.New("failed to resume handler") +) + +// NewHandler creates a new handler which is primarily a combination of your passed +// handler function and the queue name from which the handler fetches messages and processes those. +// Additionally, the handler allows you to pause and resume processing from the provided queue. +func NewHandler(queue string, hf HandlerFunc, option ...ConsumeOptions) *Handler { + if hf == nil { + panic("handlerFunc must not be nil") + } + copt := ConsumeOptions{ + ConsumerTag: "", + AutoAck: false, + Exclusive: false, + NoLocal: false, + NoWait: false, + Args: nil, + } + if len(option) > 0 { + copt = option[0] + } + + h := &Handler{ + queue: queue, + handlerFunc: hf, + consumeOpts: copt, + sc: newStateContext(context.Background()), + } + + return h +} + +// Handler is a struct that contains all parameters needed in order to register a handler function +// to the provided queue. Additionally, the handler allows you to pause and resume processing or messages. +type Handler struct { + mu sync.Mutex + queue string + handlerFunc HandlerFunc + consumeOpts ConsumeOptions + + // not guarded by mutex + sc *stateContext +} + +// HandlerConfig is a read only snapshot of the current handler's configuration. +// This internal data structure is used in the corresponsing consumer. +type HandlerConfig struct { + Queue string + ConsumeOptions + + HandlerFunc HandlerFunc +} + +func (h *Handler) close() { + h.sc.Close() +} + +// reset creates the initial state of the object +// initial state is the transitional state resuming (= startup and resuming after pause) +// the passed context is the parent context of all new contexts that spawn from this. +// After start has been called, all contexts are alive except for the resuming context which is canceled by default. +func (h *Handler) start(ctx context.Context) (opts HandlerConfig, err error) { + h.mu.Lock() + defer h.mu.Unlock() + opts = h.configUnguarded() + err = h.sc.Start(ctx) + return opts, err + +} + +func (h *Handler) Config() HandlerConfig { + h.mu.Lock() + defer h.mu.Unlock() + return h.configUnguarded() +} + +func (h *Handler) configUnguarded() HandlerConfig { + return HandlerConfig{ + Queue: h.queue, + HandlerFunc: h.handlerFunc, + ConsumeOptions: h.consumeOpts, + } +} + +// Pause allows to halt the processing of a queue after the processing has been started by the subscriber. +func (h *Handler) Pause(ctx context.Context) error { + return h.sc.Pause(ctx) +} + +func (h *Handler) pausing() doner { + return h.sc.Pausing() +} + +func (h *Handler) paused() { + h.sc.Paused() +} + +// Resume allows to continue the processing of a queue after it has been paused using Pause +func (h *Handler) Resume(ctx context.Context) error { + return h.sc.Resume(ctx) +} + +func (h *Handler) resuming() doner { + return h.sc.Resuming() +} + +func (h *Handler) resumed() { + h.sc.Resumed() +} + +func (h *Handler) IsActive(ctx context.Context) (active bool, err error) { + return h.sc.IsActive(ctx) +} + +func (h *Handler) awaitResumed(ctx context.Context) error { + return h.sc.AwaitResumed(ctx) +} + +func (h *Handler) awaitPaused(ctx context.Context) error { + return h.sc.AwaitPaused(ctx) +} + +func (h *Handler) Queue() string { + h.mu.Lock() + defer h.mu.Unlock() + return h.queue +} + +// SetQueue changes the current queue to another queue +// from which the handler consumes messages. +// The actual change is effective after pausing and resuming the handler. +func (h *Handler) SetQueue(queue string) { + h.mu.Lock() + defer h.mu.Unlock() + h.queue = queue +} + +// SetHandlerFunc changes the current handler function to another +// handler function which processes messages.. +// The actual change is effective after pausing and resuming the handler. +func (h *Handler) SetHandlerFunc(hf HandlerFunc) { + h.mu.Lock() + defer h.mu.Unlock() + h.handlerFunc = hf +} + +func (h *Handler) ConsumeOptions() ConsumeOptions { + h.mu.Lock() + defer h.mu.Unlock() + return h.consumeOpts +} + +// SetConsumeOptions changes the current handler function to another +// handler function which processes messages.. +// The actual change is effective after pausing and resuming the handler. +func (h *Handler) SetConsumeOptions(consumeOpts ConsumeOptions) { + h.mu.Lock() + defer h.mu.Unlock() + h.consumeOpts = consumeOpts +} diff --git a/pool/subscriber_handler_options.go b/pool/subscriber_handler_options.go new file mode 100644 index 0000000..eb6ce22 --- /dev/null +++ b/pool/subscriber_handler_options.go @@ -0,0 +1,31 @@ +package pool + +import "time" + +type BatchHandlerOption func(*BatchHandler) + +func WithMaxBatchSize(size int) BatchHandlerOption { + return func(bh *BatchHandler) { + if size <= 0 { + bh.maxBatchSize = defaultMaxBatchSize + } else { + bh.maxBatchSize = size + } + } +} + +func WithBatchFlushTimeout(d time.Duration) BatchHandlerOption { + return func(bh *BatchHandler) { + if d <= 0 { + bh.flushTimeout = defaultFlushTimeout + } else { + bh.flushTimeout = d + } + } +} + +func WithBatchConsumeOptions(opts ConsumeOptions) BatchHandlerOption { + return func(bh *BatchHandler) { + bh.consumeOpts = opts + } +} diff --git a/pool/subscriber_test.go b/pool/subscriber_test.go index d5302f7..986ad4c 100644 --- a/pool/subscriber_test.go +++ b/pool/subscriber_test.go @@ -9,14 +9,13 @@ import ( "github.com/jxsl13/amqpx/logging" "github.com/jxsl13/amqpx/pool" - "github.com/rabbitmq/amqp091-go" "github.com/stretchr/testify/assert" ) func TestSubscriber(t *testing.T) { sessions := 2 // publisher sessions + consumer sessions - p, err := pool.New("amqp://admin:password@localhost:5672", 1, sessions, pool.WithConfirms(true), pool.WithLogger(logging.NewTestLogger(t))) + p, err := pool.New(connectURL, 1, sessions, pool.WithConfirms(true), pool.WithLogger(logging.NewTestLogger(t))) if err != nil { assert.NoError(t, err) return @@ -39,7 +38,7 @@ func TestSubscriber(t *testing.T) { defer p.ReturnSession(ts, false) queueName := fmt.Sprintf("TestSubscriber-Queue-%d", id) - err = ts.QueueDeclare(queueName) + _, err = ts.QueueDeclare(queueName) if err != nil { assert.NoError(t, err) return @@ -79,7 +78,7 @@ func TestSubscriber(t *testing.T) { defer sub.Close() sub.RegisterHandlerFunc(queueName, - func(msg amqp091.Delivery) error { + func(msg pool.Delivery) error { // handler func receivedMsg := string(msg.Body) @@ -124,7 +123,7 @@ func TestBatchSubscriber(t *testing.T) { numMessages = 50 batchTimeout = 10 * time.Second // keep this at a higher number for slow machines ) - p, err := pool.New("amqp://admin:password@localhost:5672", 1, sessions, pool.WithConfirms(true), pool.WithLogger(logging.NewTestLogger(t))) + p, err := pool.New(connectURL, 1, sessions, pool.WithConfirms(true), pool.WithLogger(logging.NewTestLogger(t))) if err != nil { assert.NoError(t, err) return @@ -147,7 +146,7 @@ func TestBatchSubscriber(t *testing.T) { defer p.ReturnSession(ts, false) queueName := fmt.Sprintf("TestBatchSubscriber-Queue-%d", id) - err = ts.QueueDeclare(queueName) + _, err = ts.QueueDeclare(queueName) if err != nil { assert.NoError(t, err) return @@ -202,8 +201,8 @@ func TestBatchSubscriber(t *testing.T) { batchCount := 0 messageCount := 0 - sub.RegisterBatchHandlerFunc(queueName, batchSize, batchTimeout, - func(msgs []amqp091.Delivery) error { + sub.RegisterBatchHandlerFunc(queueName, + func(msgs []pool.Delivery) error { log := logging.NewTestLogger(t) assert.Equal(t, batchSize, len(msgs)) @@ -221,10 +220,12 @@ func TestBatchSubscriber(t *testing.T) { } return nil }, - pool.ConsumeOptions{ + pool.WithMaxBatchSize(batchSize), + pool.WithBatchFlushTimeout(batchTimeout), + pool.WithBatchConsumeOptions(pool.ConsumeOptions{ ConsumerTag: fmt.Sprintf("Consumer-%s", queueName), Exclusive: true, - }, + }), ) sub.Start() diff --git a/pool/table.go b/pool/table.go new file mode 100644 index 0000000..3bb53cb --- /dev/null +++ b/pool/table.go @@ -0,0 +1,38 @@ +package pool + +import "github.com/rabbitmq/amqp091-go" + +/* + Table is a dynamic map of arguments that may be passed additionally to functions. + + type Table map[string]interface{} + +Table stores user supplied fields of the following types: + + bool + byte + int8 + float32 + float64 + int + int16 + int32 + int64 + nil + string + time.Time + amqp.Decimal + amqp.Table + []byte + []interface{} - containing above types + +Functions taking a table will immediately fail when the table contains a value of an unsupported type. + +The caller must be specific in which precision of integer it wishes to +encode. + +Use a type assertion when reading values from a table for type conversion. + +RabbitMQ expects int32 for integer values. +*/ +type Table = amqp091.Table diff --git a/pool/topologer.go b/pool/topologer.go index 517a00c..f900092 100644 --- a/pool/topologer.go +++ b/pool/topologer.go @@ -4,13 +4,14 @@ import ( "context" "github.com/jxsl13/amqpx/logging" - "github.com/rabbitmq/amqp091-go" ) type Topologer struct { pool *Pool - log logging.Logger + transientOnly bool + log logging.Logger + ctx context.Context } func NewTopologer(p *Pool, options ...TopologerOption) *Topologer { @@ -20,6 +21,7 @@ func NewTopologer(p *Pool, options ...TopologerOption) *Topologer { option := topologerOption{ Logger: p.sp.log, // derive logger from session pool + Ctx: p.Context(), } for _, o := range options { @@ -29,15 +31,19 @@ func NewTopologer(p *Pool, options ...TopologerOption) *Topologer { top := &Topologer{ pool: p, log: option.Logger, + ctx: option.Ctx, } return top } +// TODO: it should be possible to pass a custom context in here so that we can define +// timeouts, especially for a topology deleter which operates on a closed context and needs a new one. func (t *Topologer) getSession() (*Session, error) { - if t.pool.SessionPoolSize() == 0 { - return t.pool.GetTransientSession(context.Background()) + + if t.transientOnly || t.pool.SessionPoolSize() == 0 { + return t.pool.GetTransientSession(t.ctx) } - return t.pool.GetSession() + return t.pool.GetSessionCtx(t.ctx) } // ExchangeDeclare declares an exchange on the server. If the exchange does not @@ -56,7 +62,7 @@ func (t *Topologer) getSession() (*Session, error) { // how messages are routed through it. Once an exchange is declared, its type // cannot be changed. The common types are "direct", "fanout", "topic" and // "headers". -func (t *Topologer) ExchangeDeclare(name string, kind string, option ...ExchangeDeclareOptions) (err error) { +func (t *Topologer) ExchangeDeclare(name string, kind ExchangeKind, option ...ExchangeDeclareOptions) (err error) { s, err := t.getSession() if err != nil { return err @@ -71,6 +77,26 @@ func (t *Topologer) ExchangeDeclare(name string, kind string, option ...Exchange return s.ExchangeDeclare(name, kind, option...) } +// ExchangeDeclarePassive is functionally and parametrically equivalent to +// ExchangeDeclare, except that it sets the "passive" attribute to true. A passive +// exchange is assumed by RabbitMQ to already exist, and attempting to connect to a +// non-existent exchange will cause RabbitMQ to throw an exception. This function +// can be used to detect the existence of an exchange. +func (t *Topologer) ExchangeDeclarePassive(name string, kind ExchangeKind, option ...ExchangeDeclareOptions) (err error) { + s, err := t.getSession() + if err != nil { + return err + } + defer func() { + if err != nil { + t.pool.ReturnSession(s, true) + } else { + t.pool.ReturnSession(s, false) + } + }() + return s.ExchangeDeclarePassive(name, kind, option...) +} + // ExchangeDelete removes the named exchange from the server. When an exchange is // deleted all queue bindings on the exchange are also deleted. If this exchange // does not exist, the channel will be closed with an error. @@ -108,10 +134,10 @@ func (t *Topologer) ExchangeDelete(name string, option ...ExchangeDeleteOptions) // // The queue name may be empty, in which case the server will generate a unique name // which will be returned in the Name field of Queue struct. -func (t *Topologer) QueueDeclare(name string, option ...QueueDeclareOptions) (err error) { +func (t *Topologer) QueueDeclare(name string, option ...QueueDeclareOptions) (queue Queue, err error) { s, err := t.getSession() if err != nil { - return err + return Queue{}, err } defer func() { if err != nil { @@ -123,6 +149,42 @@ func (t *Topologer) QueueDeclare(name string, option ...QueueDeclareOptions) (er return s.QueueDeclare(name, option...) } +// QueueDeclarePassive is functionally and parametrically equivalent to QueueDeclare, except that it sets the "passive" attribute to true. +// A passive queue is assumed by RabbitMQ to already exist, and attempting to connect to a non-existent queue will cause RabbitMQ to throw an exception. +// This function can be used to test for the existence of a queue. +func (t *Topologer) QueueDeclarePassive(name string, option ...QueueDeclareOptions) (queue Queue, err error) { + s, err := t.getSession() + if err != nil { + return Queue{}, err + } + defer func() { + if err != nil { + t.pool.ReturnSession(s, true) + } else { + t.pool.ReturnSession(s, false) + } + }() + return s.QueueDeclarePassive(name, option...) +} + +// QueuePurge removes all messages from the named queue which are not waiting to be acknowledged. +// Messages that have been delivered but have not yet been acknowledged will not be removed. +// When successful, returns the number of messages purged. +func (t *Topologer) QueuePurge(name string, options ...QueuePurgeOptions) (int, error) { + s, err := t.getSession() + if err != nil { + return 0, err + } + defer func() { + if err != nil { + t.pool.ReturnSession(s, true) + } else { + t.pool.ReturnSession(s, false) + } + }() + return s.QueuePurge(name, options...) +} + // QueueDelete removes the queue from the server including all bindings then // purges the messages based on server configuration, returning the number of // messages purged. @@ -192,7 +254,7 @@ func (t *Topologer) QueueBind(name string, routingKey string, exchange string, o // It is possible to send and empty string for the exchange name which means to // unbind the queue from the default exchange. -func (t *Topologer) QueueUnbind(name string, routingKey string, exchange string, args ...amqp091.Table) (err error) { +func (t *Topologer) QueueUnbind(name string, routingKey string, exchange string, args ...Table) (err error) { s, err := t.getSession() if err != nil { return err diff --git a/pool/topologer_options.go b/pool/topologer_options.go index fe69150..d528cb3 100644 --- a/pool/topologer_options.go +++ b/pool/topologer_options.go @@ -1,11 +1,15 @@ package pool import ( + "context" + "github.com/jxsl13/amqpx/logging" ) type topologerOption struct { - Logger logging.Logger + TransientOnly bool + Logger logging.Logger + Ctx context.Context } type TopologerOption func(*topologerOption) @@ -15,3 +19,15 @@ func TopologerWithLogger(logger logging.Logger) TopologerOption { co.Logger = logger } } + +func TopologerWithTransientSessions(transientOnly bool) TopologerOption { + return func(co *topologerOption) { + co.TransientOnly = transientOnly + } +} + +func TopologerWithContext(ctx context.Context) TopologerOption { + return func(co *topologerOption) { + co.Ctx = ctx + } +}