Skip to content

Commit 6f1419f

Browse files
authored
feature: tarpit() filter to let connection to the client open until it closes the connection (zalando#2760)
1 parent c6deeb8 commit 6f1419f

File tree

5 files changed

+196
-0
lines changed

5 files changed

+196
-0
lines changed

docs/reference/filters.md

+18
Original file line numberDiff line numberDiff line change
@@ -598,6 +598,24 @@ the response path.
598598
Same as [chunks filter](#chunks), but on the request path and not on
599599
the response path.
600600

601+
### tarpit
602+
603+
The tarpit filter discards the request and respond with a never ending
604+
stream of chunked response payloads. The goal is to consume the client
605+
connection without letting the client know what is happening.
606+
607+
Parameters:
608+
609+
* time duration (time.Duration)
610+
611+
Example:
612+
613+
```
614+
* -> tarpit("1s") -> <shunt>;
615+
```
616+
617+
The example will send every second a chunk of response payload.
618+
601619
### absorb
602620

603621
The absorb filter reads and discards the payload of the incoming requests.

filters/builtin/builtin.go

+1
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,7 @@ func Filters() []filters.Spec {
176176
diag.NewBackendLatency(),
177177
diag.NewBackendBandwidth(),
178178
diag.NewBackendChunks(),
179+
diag.NewTarpit(),
179180
diag.NewAbsorb(),
180181
diag.NewAbsorbSilent(),
181182
diag.NewLogHeader(),

filters/diag/tarpit.go

+59
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
package diag
2+
3+
import (
4+
"net/http"
5+
"time"
6+
7+
"github.com/zalando/skipper/filters"
8+
)
9+
10+
type tarpitSpec struct{}
11+
12+
type tarpit struct {
13+
d time.Duration
14+
}
15+
16+
func NewTarpit() filters.Spec {
17+
return &tarpitSpec{}
18+
}
19+
20+
func (t *tarpitSpec) Name() string {
21+
return filters.TarpitName
22+
}
23+
24+
func (t *tarpitSpec) CreateFilter(args []interface{}) (filters.Filter, error) {
25+
if len(args) != 1 {
26+
return nil, filters.ErrInvalidFilterParameters
27+
}
28+
s, ok := args[0].(string)
29+
if !ok {
30+
return nil, filters.ErrInvalidFilterParameters
31+
}
32+
33+
d, err := time.ParseDuration(s)
34+
if err != nil {
35+
return nil, filters.ErrInvalidFilterParameters
36+
}
37+
38+
return &tarpit{d: d}, nil
39+
}
40+
41+
func (t *tarpit) Request(ctx filters.FilterContext) {
42+
ctx.Serve(&http.Response{StatusCode: http.StatusOK, Body: &slowBlockingReader{d: t.d}})
43+
}
44+
45+
func (*tarpit) Response(filters.FilterContext) {}
46+
47+
type slowBlockingReader struct {
48+
d time.Duration
49+
}
50+
51+
func (r *slowBlockingReader) Read(p []byte) (int, error) {
52+
time.Sleep(r.d)
53+
n := copy(p, []byte(" "))
54+
return n, nil
55+
}
56+
57+
func (r *slowBlockingReader) Close() error {
58+
return nil
59+
}

filters/diag/tarpit_test.go

+117
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
package diag
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"net/http"
7+
"net/http/httptest"
8+
"strings"
9+
"testing"
10+
"time"
11+
12+
"github.com/zalando/skipper/eskip"
13+
"github.com/zalando/skipper/filters"
14+
"github.com/zalando/skipper/proxy/proxytest"
15+
)
16+
17+
func TestTarpit(t *testing.T) {
18+
for _, tt := range []struct {
19+
name string
20+
args []interface{}
21+
status int
22+
clientTimeout time.Duration
23+
want error
24+
}{
25+
{
26+
name: "test no args return error",
27+
want: filters.ErrInvalidFilterParameters,
28+
},
29+
{
30+
name: "test wrong arg return error",
31+
args: []interface{}{"no-time-duration"},
32+
want: filters.ErrInvalidFilterParameters,
33+
},
34+
{
35+
name: "test no string arg return error",
36+
args: []interface{}{0x0a},
37+
want: filters.ErrInvalidFilterParameters,
38+
},
39+
{
40+
name: "test wrong number of args return error",
41+
args: []interface{}{"10s", "10ms"},
42+
want: filters.ErrInvalidFilterParameters,
43+
},
44+
{
45+
name: "test 10ms and 1s client timeout",
46+
args: []interface{}{"10ms"},
47+
clientTimeout: time.Second,
48+
want: nil,
49+
},
50+
{
51+
name: "test 1s and 1s client timeout",
52+
args: []interface{}{"1s"},
53+
clientTimeout: time.Second,
54+
want: nil,
55+
},
56+
{
57+
name: "test 1s and 100ms client timeout",
58+
args: []interface{}{"100ms"},
59+
clientTimeout: time.Second,
60+
want: nil,
61+
},
62+
{
63+
name: "test 1s and 3s client timeout",
64+
args: []interface{}{"1s"},
65+
clientTimeout: 3 * time.Second,
66+
want: nil,
67+
}} {
68+
t.Run(tt.name, func(t *testing.T) {
69+
backend := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
70+
71+
}))
72+
defer backend.Close()
73+
74+
spec := NewTarpit()
75+
_, err := spec.CreateFilter(tt.args)
76+
switch err {
77+
case tt.want:
78+
// ok
79+
if err != nil {
80+
return
81+
}
82+
default:
83+
t.Fatal(err)
84+
}
85+
86+
fr := filters.Registry{}
87+
fr.Register(spec)
88+
sargs := make([]string, 0, len(tt.args))
89+
for _, e := range tt.args {
90+
sargs = append(sargs, e.(string))
91+
}
92+
doc := fmt.Sprintf(`r: * -> tarpit("%s") -> "%s";`, strings.Join(sargs, ","), backend.URL)
93+
r := eskip.MustParse(doc)
94+
p := proxytest.New(fr, r...)
95+
defer p.Close()
96+
97+
N := 1
98+
for i := 0; i < N; i++ {
99+
ctx, done := context.WithTimeout(context.Background(), tt.clientTimeout)
100+
defer done()
101+
req, err := http.NewRequestWithContext(ctx, "GET", p.URL, nil)
102+
if err != nil {
103+
t.Fatalf("Failed to create request: %v", err)
104+
}
105+
106+
rsp, err := p.Client().Do(req)
107+
if err != nil {
108+
t.Fatalf("Failed to get response: %v", err)
109+
}
110+
111+
if rsp.StatusCode != 200 {
112+
t.Fatalf("Failed to get status code 200 got: %d", rsp.StatusCode)
113+
}
114+
}
115+
})
116+
}
117+
}

filters/filters.go

+1
Original file line numberDiff line numberDiff line change
@@ -254,6 +254,7 @@ const (
254254
BackendLatencyName = "backendLatency"
255255
BackendBandwidthName = "backendBandwidth"
256256
BackendChunksName = "backendChunks"
257+
TarpitName = "tarpit"
257258
AbsorbName = "absorb"
258259
AbsorbSilentName = "absorbSilent"
259260
UniformRequestLatencyName = "uniformRequestLatency"

0 commit comments

Comments
 (0)