forked from iovisor/bcc
-
Notifications
You must be signed in to change notification settings - Fork 1
/
memleak.py
executable file
·558 lines (466 loc) · 19.2 KB
/
memleak.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
#!/usr/bin/python
#
# memleak Trace and display outstanding allocations to detect
# memory leaks in user-mode processes and the kernel.
#
# USAGE: memleak [-h] [-p PID] [-t] [-a] [-o OLDER] [-c COMMAND]
# [--combined-only] [--wa-missing-free] [-s SAMPLE_RATE]
# [-T TOP] [-z MIN_SIZE] [-Z MAX_SIZE] [-O OBJ]
# [interval] [count]
#
# Licensed under the Apache License, Version 2.0 (the "License")
# Copyright (C) 2016 Sasha Goldshtein.
from bcc import BPF
from time import sleep
from datetime import datetime
import resource
import argparse
import subprocess
import os
import sys
class Allocation(object):
def __init__(self, stack, size):
self.stack = stack
self.count = 1
self.size = size
def update(self, size):
self.count += 1
self.size += size
def run_command_get_output(command):
p = subprocess.Popen(command.split(),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return iter(p.stdout.readline, b'')
def run_command_get_pid(command):
p = subprocess.Popen(command.split())
return p.pid
examples = """
EXAMPLES:
./memleak -p $(pidof allocs)
Trace allocations and display a summary of "leaked" (outstanding)
allocations every 5 seconds
./memleak -p $(pidof allocs) -t
Trace allocations and display each individual allocator function call
./memleak -ap $(pidof allocs) 10
Trace allocations and display allocated addresses, sizes, and stacks
every 10 seconds for outstanding allocations
./memleak -c "./allocs"
Run the specified command and trace its allocations
./memleak
Trace allocations in kernel mode and display a summary of outstanding
allocations every 5 seconds
./memleak -o 60000
Trace allocations in kernel mode and display a summary of outstanding
allocations that are at least one minute (60 seconds) old
./memleak -s 5
Trace roughly every 5th allocation, to reduce overhead
"""
description = """
Trace outstanding memory allocations that weren't freed.
Supports both user-mode allocations made with libc functions and kernel-mode
allocations made with kmalloc/kmem_cache_alloc/get_free_pages and corresponding
memory release functions.
"""
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-p", "--pid", type=int, default=-1,
help="the PID to trace; if not specified, trace kernel allocs")
parser.add_argument("-t", "--trace", action="store_true",
help="print trace messages for each alloc/free call")
parser.add_argument("interval", nargs="?", default=5, type=int,
help="interval in seconds to print outstanding allocations")
parser.add_argument("count", nargs="?", type=int,
help="number of times to print the report before exiting")
parser.add_argument("-a", "--show-allocs", default=False, action="store_true",
help="show allocation addresses and sizes as well as call stacks")
parser.add_argument("-o", "--older", default=500, type=int,
help="prune allocations younger than this age in milliseconds")
parser.add_argument("-c", "--command",
help="execute and trace the specified command")
parser.add_argument("--combined-only", default=False, action="store_true",
help="show combined allocation statistics only")
parser.add_argument("--wa-missing-free", default=False, action="store_true",
help="Workaround to alleviate misjudgments when free is missing")
parser.add_argument("-s", "--sample-rate", default=1, type=int,
help="sample every N-th allocation to decrease the overhead")
parser.add_argument("-T", "--top", type=int, default=10,
help="display only this many top allocating stacks (by size)")
parser.add_argument("-z", "--min-size", type=int,
help="capture only allocations larger than this size")
parser.add_argument("-Z", "--max-size", type=int,
help="capture only allocations smaller than this size")
parser.add_argument("-O", "--obj", type=str, default="c",
help="attach to allocator functions in the specified object")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument("--percpu", default=False, action="store_true",
help="trace percpu allocations")
args = parser.parse_args()
pid = args.pid
command = args.command
kernel_trace = (pid == -1 and command is None)
trace_all = args.trace
interval = args.interval
min_age_ns = 1e6 * args.older
sample_every_n = args.sample_rate
num_prints = args.count
top_stacks = args.top
min_size = args.min_size
max_size = args.max_size
obj = args.obj
if min_size is not None and max_size is not None and min_size > max_size:
print("min_size (-z) can't be greater than max_size (-Z)")
exit(1)
if command is not None:
print("Executing '%s' and tracing the resulting process." % command)
pid = run_command_get_pid(command)
bpf_source = """
#include <uapi/linux/ptrace.h>
struct alloc_info_t {
u64 size;
u64 timestamp_ns;
int stack_id;
};
struct combined_alloc_info_t {
u64 total_size;
u64 number_of_allocs;
};
BPF_HASH(sizes, u64);
BPF_HASH(allocs, u64, struct alloc_info_t, 1000000);
BPF_HASH(memptrs, u64, u64);
BPF_STACK_TRACE(stack_traces, 10240);
BPF_HASH(combined_allocs, u64, struct combined_alloc_info_t, 10240);
static inline void update_statistics_add(u64 stack_id, u64 sz) {
struct combined_alloc_info_t *existing_cinfo;
struct combined_alloc_info_t cinfo = {0};
existing_cinfo = combined_allocs.lookup(&stack_id);
if (existing_cinfo != 0)
cinfo = *existing_cinfo;
cinfo.total_size += sz;
cinfo.number_of_allocs += 1;
combined_allocs.update(&stack_id, &cinfo);
}
static inline void update_statistics_del(u64 stack_id, u64 sz) {
struct combined_alloc_info_t *existing_cinfo;
struct combined_alloc_info_t cinfo = {0};
existing_cinfo = combined_allocs.lookup(&stack_id);
if (existing_cinfo != 0)
cinfo = *existing_cinfo;
if (sz >= cinfo.total_size)
cinfo.total_size = 0;
else
cinfo.total_size -= sz;
if (cinfo.number_of_allocs > 0)
cinfo.number_of_allocs -= 1;
combined_allocs.update(&stack_id, &cinfo);
}
static inline int gen_alloc_enter(struct pt_regs *ctx, size_t size) {
SIZE_FILTER
if (SAMPLE_EVERY_N > 1) {
u64 ts = bpf_ktime_get_ns();
if (ts % SAMPLE_EVERY_N != 0)
return 0;
}
u64 pid = bpf_get_current_pid_tgid();
u64 size64 = size;
sizes.update(&pid, &size64);
if (SHOULD_PRINT)
bpf_trace_printk("alloc entered, size = %u\\n", size);
return 0;
}
static inline int gen_alloc_exit2(struct pt_regs *ctx, u64 address) {
u64 pid = bpf_get_current_pid_tgid();
u64* size64 = sizes.lookup(&pid);
struct alloc_info_t info = {0};
if (size64 == 0)
return 0; // missed alloc entry
info.size = *size64;
sizes.delete(&pid);
if (address != 0) {
info.timestamp_ns = bpf_ktime_get_ns();
info.stack_id = stack_traces.get_stackid(ctx, STACK_FLAGS);
allocs.update(&address, &info);
update_statistics_add(info.stack_id, info.size);
}
if (SHOULD_PRINT) {
bpf_trace_printk("alloc exited, size = %lu, result = %lx\\n",
info.size, address);
}
return 0;
}
static inline int gen_alloc_exit(struct pt_regs *ctx) {
return gen_alloc_exit2(ctx, PT_REGS_RC(ctx));
}
static inline int gen_free_enter(struct pt_regs *ctx, void *address) {
u64 addr = (u64)address;
struct alloc_info_t *info = allocs.lookup(&addr);
if (info == 0)
return 0;
allocs.delete(&addr);
update_statistics_del(info->stack_id, info->size);
if (SHOULD_PRINT) {
bpf_trace_printk("free entered, address = %lx, size = %lu\\n",
address, info->size);
}
return 0;
}
int malloc_enter(struct pt_regs *ctx, size_t size) {
return gen_alloc_enter(ctx, size);
}
int malloc_exit(struct pt_regs *ctx) {
return gen_alloc_exit(ctx);
}
int free_enter(struct pt_regs *ctx, void *address) {
return gen_free_enter(ctx, address);
}
int calloc_enter(struct pt_regs *ctx, size_t nmemb, size_t size) {
return gen_alloc_enter(ctx, nmemb * size);
}
int calloc_exit(struct pt_regs *ctx) {
return gen_alloc_exit(ctx);
}
int realloc_enter(struct pt_regs *ctx, void *ptr, size_t size) {
gen_free_enter(ctx, ptr);
return gen_alloc_enter(ctx, size);
}
int realloc_exit(struct pt_regs *ctx) {
return gen_alloc_exit(ctx);
}
int posix_memalign_enter(struct pt_regs *ctx, void **memptr, size_t alignment,
size_t size) {
u64 memptr64 = (u64)(size_t)memptr;
u64 pid = bpf_get_current_pid_tgid();
memptrs.update(&pid, &memptr64);
return gen_alloc_enter(ctx, size);
}
int posix_memalign_exit(struct pt_regs *ctx) {
u64 pid = bpf_get_current_pid_tgid();
u64 *memptr64 = memptrs.lookup(&pid);
void *addr;
if (memptr64 == 0)
return 0;
memptrs.delete(&pid);
if (bpf_probe_read_user(&addr, sizeof(void*), (void*)(size_t)*memptr64))
return 0;
u64 addr64 = (u64)(size_t)addr;
return gen_alloc_exit2(ctx, addr64);
}
int aligned_alloc_enter(struct pt_regs *ctx, size_t alignment, size_t size) {
return gen_alloc_enter(ctx, size);
}
int aligned_alloc_exit(struct pt_regs *ctx) {
return gen_alloc_exit(ctx);
}
int valloc_enter(struct pt_regs *ctx, size_t size) {
return gen_alloc_enter(ctx, size);
}
int valloc_exit(struct pt_regs *ctx) {
return gen_alloc_exit(ctx);
}
int memalign_enter(struct pt_regs *ctx, size_t alignment, size_t size) {
return gen_alloc_enter(ctx, size);
}
int memalign_exit(struct pt_regs *ctx) {
return gen_alloc_exit(ctx);
}
int pvalloc_enter(struct pt_regs *ctx, size_t size) {
return gen_alloc_enter(ctx, size);
}
int pvalloc_exit(struct pt_regs *ctx) {
return gen_alloc_exit(ctx);
}
"""
bpf_source_kernel = """
TRACEPOINT_PROBE(kmem, kmalloc) {
if (WORKAROUND_MISSING_FREE)
gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
}
TRACEPOINT_PROBE(kmem, kmalloc_node) {
if (WORKAROUND_MISSING_FREE)
gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
}
TRACEPOINT_PROBE(kmem, kfree) {
return gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
}
TRACEPOINT_PROBE(kmem, kmem_cache_alloc) {
if (WORKAROUND_MISSING_FREE)
gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
}
TRACEPOINT_PROBE(kmem, kmem_cache_alloc_node) {
if (WORKAROUND_MISSING_FREE)
gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
gen_alloc_enter((struct pt_regs *)args, args->bytes_alloc);
return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
}
TRACEPOINT_PROBE(kmem, kmem_cache_free) {
return gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
}
TRACEPOINT_PROBE(kmem, mm_page_alloc) {
gen_alloc_enter((struct pt_regs *)args, PAGE_SIZE << args->order);
return gen_alloc_exit2((struct pt_regs *)args, args->pfn);
}
TRACEPOINT_PROBE(kmem, mm_page_free) {
return gen_free_enter((struct pt_regs *)args, (void *)args->pfn);
}
"""
bpf_source_percpu = """
TRACEPOINT_PROBE(percpu, percpu_alloc_percpu) {
gen_alloc_enter((struct pt_regs *)args, args->size);
return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->ptr);
}
TRACEPOINT_PROBE(percpu, percpu_free_percpu) {
return gen_free_enter((struct pt_regs *)args, (void *)args->ptr);
}
"""
if kernel_trace:
if args.percpu:
bpf_source += bpf_source_percpu
else:
bpf_source += bpf_source_kernel
if kernel_trace:
bpf_source = bpf_source.replace("WORKAROUND_MISSING_FREE", "1"
if args.wa_missing_free else "0")
bpf_source = bpf_source.replace("SHOULD_PRINT", "1" if trace_all else "0")
bpf_source = bpf_source.replace("SAMPLE_EVERY_N", str(sample_every_n))
bpf_source = bpf_source.replace("PAGE_SIZE", str(resource.getpagesize()))
size_filter = ""
if min_size is not None and max_size is not None:
size_filter = "if (size < %d || size > %d) return 0;" % \
(min_size, max_size)
elif min_size is not None:
size_filter = "if (size < %d) return 0;" % min_size
elif max_size is not None:
size_filter = "if (size > %d) return 0;" % max_size
bpf_source = bpf_source.replace("SIZE_FILTER", size_filter)
stack_flags = "0"
if not kernel_trace:
stack_flags += "|BPF_F_USER_STACK"
bpf_source = bpf_source.replace("STACK_FLAGS", stack_flags)
if args.ebpf:
print(bpf_source)
exit()
bpf = BPF(text=bpf_source)
if not kernel_trace:
print("Attaching to pid %d, Ctrl+C to quit." % pid)
def attach_probes(sym, fn_prefix=None, can_fail=False):
if fn_prefix is None:
fn_prefix = sym
try:
bpf.attach_uprobe(name=obj, sym=sym,
fn_name=fn_prefix + "_enter",
pid=pid)
bpf.attach_uretprobe(name=obj, sym=sym,
fn_name=fn_prefix + "_exit",
pid=pid)
except Exception:
if can_fail:
return
else:
raise
attach_probes("malloc")
attach_probes("calloc")
attach_probes("realloc")
attach_probes("posix_memalign")
attach_probes("valloc", can_fail=True) # failed on Android, is deprecated in libc.so from bionic directory
attach_probes("memalign")
attach_probes("pvalloc", can_fail=True) # failed on Android, is deprecated in libc.so from bionic directory
attach_probes("aligned_alloc", can_fail=True) # added in C11
bpf.attach_uprobe(name=obj, sym="free", fn_name="free_enter",
pid=pid)
else:
print("Attaching to kernel allocators, Ctrl+C to quit.")
# No probe attaching here. Allocations are counted by attaching to
# tracepoints.
#
# Memory allocations in Linux kernel are not limited to malloc/free
# equivalents. It's also common to allocate a memory page or multiple
# pages. Page allocator have two interfaces, one working with page
# frame numbers (PFN), while other working with page addresses. It's
# possible to allocate pages with one kind of functions, and free them
# with another. Code in kernel can easy convert PFNs to addresses and
# back, but it's hard to do the same in eBPF kprobe without fragile
# hacks.
#
# Fortunately, Linux exposes tracepoints for memory allocations, which
# can be instrumented by eBPF programs. Tracepoint for page allocations
# gives access to PFNs for both allocator interfaces. So there is no
# need to guess which allocation corresponds to which free.
def print_outstanding():
print("[%s] Top %d stacks with outstanding allocations:" %
(datetime.now().strftime("%H:%M:%S"), top_stacks))
alloc_info = {}
allocs = bpf["allocs"]
stack_traces = bpf["stack_traces"]
for address, info in sorted(allocs.items(), key=lambda a: a[1].size):
if BPF.monotonic_time() - min_age_ns < info.timestamp_ns:
continue
if info.stack_id < 0:
continue
if info.stack_id in alloc_info:
alloc_info[info.stack_id].update(info.size)
else:
stack = list(stack_traces.walk(info.stack_id))
combined = []
for addr in stack:
combined.append(bpf.sym(addr, pid,
show_module=True, show_offset=True))
alloc_info[info.stack_id] = Allocation(combined,
info.size)
if args.show_allocs:
print("\taddr = %x size = %s" %
(address.value, info.size))
to_show = sorted(alloc_info.values(),
key=lambda a: a.size)[-top_stacks:]
for alloc in to_show:
print("\t%d bytes in %d allocations from stack\n\t\t%s" %
(alloc.size, alloc.count,
b"\n\t\t".join(alloc.stack).decode("ascii")))
def print_outstanding_combined():
stack_traces = bpf["stack_traces"]
stacks = sorted(bpf["combined_allocs"].items(),
key=lambda a: -a[1].total_size)
cnt = 1
entries = []
for stack_id, info in stacks:
try:
trace = []
for addr in stack_traces.walk(stack_id.value):
sym = bpf.sym(addr, pid,
show_module=True,
show_offset=True)
trace.append(sym)
trace = "\n\t\t".join(trace)
except KeyError:
trace = "stack information lost"
entry = ("\t%d bytes in %d allocations from stack\n\t\t%s" %
(info.total_size, info.number_of_allocs, trace))
entries.append(entry)
cnt += 1
if cnt > top_stacks:
break
print("[%s] Top %d stacks with outstanding allocations:" %
(datetime.now().strftime("%H:%M:%S"), top_stacks))
print('\n'.join(reversed(entries)))
count_so_far = 0
while True:
if trace_all:
print(bpf.trace_fields())
else:
try:
sleep(interval)
except KeyboardInterrupt:
exit()
if args.combined_only:
print_outstanding_combined()
else:
print_outstanding()
sys.stdout.flush()
count_so_far += 1
if num_prints is not None and count_so_far >= num_prints:
exit()