forked from iovisor/bcc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cachestat.py
executable file
·176 lines (151 loc) · 4.6 KB
/
cachestat.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
#!/usr/bin/python
#
# cachestat Count cache kernel function calls.
# For Linux, uses BCC, eBPF. See .c file.
#
# USAGE: cachestat
# Taken from funccount by Brendan Gregg
# This is a rewrite of cachestat from perf to bcc
# https://github.com/brendangregg/perf-tools/blob/master/fs/cachestat
#
# Copyright (c) 2016 Allan McAleavy.
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 09-Sep-2015 Brendan Gregg Created this.
# 06-Nov-2015 Allan McAleavy
# 13-Jan-2016 Allan McAleavy run pep8 against program
# 02-Feb-2019 Brendan Gregg Column shuffle, bring back %ratio
from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
import argparse
import signal
import re
from sys import argv
# signal handler
def signal_ignore(signal, frame):
print()
# Function to gather data from /proc/meminfo
# return dictionary for quicker lookup of both values
def get_meminfo():
result = dict()
for line in open('/proc/meminfo'):
k = line.split(':', 3)
v = k[1].split()
result[k[0]] = int(v[0])
return result
# set global variables
mpa = 0
mbd = 0
apcl = 0
apd = 0
total = 0
misses = 0
hits = 0
debug = 0
# arguments
parser = argparse.ArgumentParser(
description="Count cache kernel function calls",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-T", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("interval", nargs="?", default=1,
help="output interval, in seconds")
parser.add_argument("count", nargs="?", default=-1,
help="number of outputs")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
count = int(args.count)
tstamp = args.timestamp
interval = int(args.interval)
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
struct key_t {
u64 ip;
};
BPF_HASH(counts, struct key_t);
int do_count(struct pt_regs *ctx) {
struct key_t key = {};
u64 ip;
key.ip = PT_REGS_IP(ctx);
counts.increment(key); // update counter
return 0;
}
"""
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# load BPF program
b = BPF(text=bpf_text)
b.attach_kprobe(event="add_to_page_cache_lru", fn_name="do_count")
b.attach_kprobe(event="mark_page_accessed", fn_name="do_count")
b.attach_kprobe(event="account_page_dirtied", fn_name="do_count")
b.attach_kprobe(event="mark_buffer_dirty", fn_name="do_count")
# header
if tstamp:
print("%-8s " % "TIME", end="")
print("%8s %8s %8s %8s %12s %10s" %
("HITS", "MISSES", "DIRTIES", "HITRATIO", "BUFFERS_MB", "CACHED_MB"))
loop = 0
exiting = 0
while 1:
if count > 0:
loop += 1
if loop > count:
exit()
try:
sleep(interval)
except KeyboardInterrupt:
exiting = 1
# as cleanup can take many seconds, trap Ctrl-C:
signal.signal(signal.SIGINT, signal_ignore)
counts = b["counts"]
for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
func = b.ksym(k.ip)
# partial string matches in case of .isra (necessary?)
if func.find(b"mark_page_accessed") == 0:
mpa = max(0, v.value)
if func.find(b"mark_buffer_dirty") == 0:
mbd = max(0, v.value)
if func.find(b"add_to_page_cache_lru") == 0:
apcl = max(0, v.value)
if func.find(b"account_page_dirtied") == 0:
apd = max(0, v.value)
# total = total cache accesses without counting dirties
# misses = total of add to lru because of read misses
total = mpa - mbd
misses = apcl - apd
if misses < 0:
misses = 0
if total < 0:
total = 0
hits = total - misses
# If hits are < 0, then its possible misses are overestimated
# due to possibly page cache read ahead adding more pages than
# needed. In this case just assume misses as total and reset hits.
if hits < 0:
misses = total
hits = 0
ratio = 0
if total > 0:
ratio = float(hits) / total
if debug:
print("%d %d %d %d %d %d %d\n" %
(mpa, mbd, apcl, apd, total, misses, hits))
counts.clear()
# Get memory info
mem = get_meminfo()
cached = int(mem["Cached"]) / 1024
buff = int(mem["Buffers"]) / 1024
if tstamp:
print("%-8s " % strftime("%H:%M:%S"), end="")
print("%8d %8d %8d %7.2f%% %12.0f %10.0f" %
(hits, misses, mbd, 100 * ratio, buff, cached))
mpa = mbd = apcl = apd = total = misses = hits = cached = buff = 0
if exiting:
print("Detaching...")
exit()