forked from andikleen/pmu-tools
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathperf-iter.c
195 lines (175 loc) · 5.3 KB
/
perf-iter.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
/*
* Copyright (c) 2013 Intel Corporation
* Author: Andi Kleen
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that: (1) source code distributions
* retain the above copyright notice and this paragraph in its entirety, (2)
* distributions including binary code include the above copyright notice and
* this paragraph in its entirety in the documentation or other materials
* provided with the distribution
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/**
* DOC: A simple perf library to manage the perf ring buffer
*
* This library provides a simple wrapping layer for the perf
* mmap ring buffer. This allows to access perf events in
* zero-copy from a user program.
*/
#include <linux/perf_event.h>
#include <unistd.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include "jevents.h"
#include "util.h"
#include "perf-iter.h"
/**
* perf_iter_init - Initialize iterator for perf ring buffer
* @iter: Iterator to initialize.
* @pfd: perf_fd from perf_fd_open() to use with the iterator.
*
* Needs to be called first to start walking a perf buffer.
*/
void perf_iter_init(struct perf_iter *iter, struct perf_fd *pfd)
{
int pagesize = sysconf(_SC_PAGESIZE);
int page_shift = ffs(pagesize) - 1;
iter->mpage = pfd->mpage;
iter->bufsize = (1ULL << (pfd->buf_size_shift + page_shift));
iter->ring_buffer_mask = iter->bufsize - 1;
iter->cur = iter->mpage->data_tail & iter->ring_buffer_mask;
/* Kernel only changes head */
iter->raw_head = iter->mpage->data_head;
iter->avail = iter->raw_head - iter->mpage->data_tail;
iter->head = iter->raw_head & iter->ring_buffer_mask;
mb();
iter->data = (char *)(iter->mpage) + pagesize;
}
/**
* perf_buffer_read - Access data in perf ring iterator.
* @iter: Iterator to copy data from
* @buffer: Temporary buffer to use for wrapped events
* @bufsize: Size of buffer
*
* Return the next available perf_event_header in the ring buffer.
* This normally does zero copy, but for wrapped events
* they are copied into the temporary buffer supplied and a
* pointer into that is returned.
*
* Return: NULL when nothing available, otherwise perf_event_header.
*/
struct perf_event_header *perf_buffer_read(struct perf_iter *iter, void *buffer, int bufsize)
{
struct perf_event_header *hdr = (struct perf_event_header *)(iter->data + iter->cur);
u64 left = iter->bufsize - iter->cur;
if (left >= sizeof(hdr->size) && hdr->size <= left) {
iter->cur += hdr->size;
iter->avail -= hdr->size;
/* Copy less fast path */
return hdr;
} else {
/*
* Buffer wraps. This case is untested in this example.
* Assumes hdr->size is always continuous by itself.
*/
if (left) {
if (hdr->size > bufsize)
return NULL;
memcpy(buffer, hdr, left);
} else {
hdr = (struct perf_event_header *)iter->data;
if (hdr->size > bufsize)
return NULL;
}
memcpy(buffer + left, iter->data, hdr->size - left);
iter->cur = hdr->size - left;
iter->avail -= hdr->size;
return buffer;
}
}
/**
* perf_iter_continue - Allow the kernel to log over our data.
* @iter: Iterator.
* Tell the kernel we are finished with the data and it can
* continue logging.
*/
void perf_iter_continue(struct perf_iter *iter)
{
iter->mpage->data_tail = iter->raw_head;
mb();
}
unsigned perf_mmap_size(int buf_size_shift)
{
return ((1U << buf_size_shift) + 1) * sysconf(_SC_PAGESIZE);
}
/**
* perf_fd_open - Open a perf event with ring buffer for the current thread
* @p: perf_fd to initialize
* @attr: perf event attribute to use
* @buf_size_shift: log2 of buffer size.
* Return: -1 on error, otherwise 0.
*/
int perf_fd_open(struct perf_fd *p, struct perf_event_attr *attr, int buf_size_shift)
{
return perf_fd_open_other(p, attr, buf_size_shift, 0, -1);
}
/**
* perf_fd_open_other - Open a perf event with ring buffer for other thread or cpu
* @p: perf_fd to initialize
* @attr: perf event attribute to use
* @buf_size_shift: log2 of buffer size.
* @pid: pid/tid to trace, or 0 for current, or -1 for any
* @cpu: cpu to trace, or -1 for any.
* Return: -1 on error, otherwise 0.
*/
int perf_fd_open_other(struct perf_fd *p, struct perf_event_attr *attr, int buf_size_shift,
int pid, int cpu)
{
p->pfd = perf_event_open(attr, pid, cpu, -1, 0);
if (p->pfd < 0)
return -1;
struct perf_event_mmap_page *mpage;
mpage = mmap(NULL, perf_mmap_size(buf_size_shift),
PROT_READ|PROT_WRITE, MAP_SHARED,
p->pfd, 0);
if (mpage == (struct perf_event_mmap_page *)-1L) {
close(p->pfd);
return -1;
}
p->mpage = mpage;
p->buf_size_shift = buf_size_shift;
return 0;
}
/**
* perf_fd_close - Close perf_fd
* @p: pfd to close.
*/
void perf_fd_close(struct perf_fd *p)
{
munmap(p->mpage, perf_mmap_size(p->buf_size_shift));
close(p->pfd);
p->mpage = NULL;
}
/**
* perf_enable - Start perf collection on pfd
* @p: perf fd
* Return: -1 for error, otherwise 0.
*/
int perf_enable(struct perf_fd *p)
{
return ioctl(p->pfd, PERF_EVENT_IOC_ENABLE, 0);
}
/**
* perf_enable - Stop perf collection on pfd
* @p: perf fd
* Return: -1 for error, otherwise 0.
*/
int perf_disable(struct perf_fd *p)
{
return ioctl(p->pfd, PERF_EVENT_IOC_DISABLE, 0);
}