1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
|
/* -*- c++ -*- */
/*
* Copyright 2004,2009,2010,2013 Free Software Foundation, Inc.
*
* This file is part of GNU Radio
*
* SPDX-License-Identifier: GPL-3.0-or-later
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "vmcircbuf.h"
#include <gnuradio/buffer.h>
#include <gnuradio/integer_math.h>
#include <gnuradio/math.h>
#include <assert.h>
#include <algorithm>
#include <iostream>
#include <stdexcept>
namespace gr {
static long s_buffer_count = 0; // counts for debugging storage mgmt
static long s_buffer_reader_count = 0;
/* ----------------------------------------------------------------------------
Notes on storage management
Pretty much all the fundamental classes are now using the
shared_ptr stuff for automatic reference counting. To ensure that
no mistakes are made, we make the constructors for classes private,
and then provide a free factory function that returns a smart
pointer to the desired class.
gr::buffer and gr::buffer_reader are no exceptions. However, they
both want pointers to each other, and unless we do something, we'll
never delete any of them because of the circular structure.
They'll always have a reference count of at least one. We could
use std::weak_ptr's from gr::buffer to gr::buffer_reader but that
introduces it's own problems. (gr::buffer_reader's destructor needs
to call gr::buffer::drop_reader, but has no easy way to get a
shared_ptr to itself.)
Instead, we solve this problem by having gr::buffer hold a raw
pointer to gr::buffer_reader in its d_reader vector.
gr::buffer_reader's destructor calls gr::buffer::drop_reader, so
we're never left with an dangling pointer. gr::buffer_reader still
has a shared_ptr to the buffer ensuring that the buffer doesn't go
away under it. However, when the reference count of a
gr::buffer_reader goes to zero, we can successfully reclaim it.
---------------------------------------------------------------------------- */
/*
* Compute the minimum number of buffer items that work (i.e.,
* address space wrap-around works). To work is to satisfy this
* constraint for integer buffer_size and k:
*
* type_size * nitems == k * page_size
*/
static inline long minimum_buffer_items(long type_size, long page_size)
{
return page_size / GR_GCD(type_size, page_size);
}
buffer::buffer(int nitems, size_t sizeof_item, block_sptr link)
: d_base(0),
d_bufsize(0),
d_max_reader_delay(0),
d_sizeof_item(sizeof_item),
d_link(link),
d_write_index(0),
d_abs_write_offset(0),
d_done(false),
d_last_min_items_read(0)
{
gr::configure_default_loggers(d_logger, d_debug_logger, "buffer");
if (!allocate_buffer(nitems, sizeof_item))
throw std::bad_alloc();
s_buffer_count++;
}
buffer_sptr make_buffer(int nitems, size_t sizeof_item, block_sptr link)
{
return buffer_sptr(new buffer(nitems, sizeof_item, link));
}
buffer::~buffer()
{
assert(d_readers.size() == 0);
s_buffer_count--;
}
/*!
* sets d_vmcircbuf, d_base, d_bufsize.
* returns true iff successful.
*/
bool buffer::allocate_buffer(int nitems, size_t sizeof_item)
{
int orig_nitems = nitems;
// Any buffersize we come up with must be a multiple of min_nitems.
int granularity = gr::vmcircbuf_sysconfig::granularity();
int min_nitems = minimum_buffer_items(sizeof_item, granularity);
// Round-up nitems to a multiple of min_nitems.
if (nitems % min_nitems != 0)
nitems = ((nitems / min_nitems) + 1) * min_nitems;
// If we rounded-up a whole bunch, give the user a heads up.
// This only happens if sizeof_item is not a power of two.
if (nitems > 2 * orig_nitems && nitems * (int)sizeof_item > granularity) {
auto msg =
str(boost::format(
"allocate_buffer: tried to allocate"
" %d items of size %d. Due to alignment requirements"
" %d were allocated. If this isn't OK, consider padding"
" your structure to a power-of-two bytes."
" On this platform, our allocation granularity is %d bytes.") %
orig_nitems % sizeof_item % nitems % granularity);
GR_LOG_WARN(d_logger, msg.c_str());
}
d_bufsize = nitems;
d_vmcircbuf.reset(gr::vmcircbuf_sysconfig::make(d_bufsize * d_sizeof_item));
if (d_vmcircbuf == 0) {
std::ostringstream msg;
msg << "gr::buffer::allocate_buffer: failed to allocate buffer of size "
<< d_bufsize * d_sizeof_item / 1024 << " KB";
GR_LOG_ERROR(d_logger, msg.str());
return false;
}
d_base = (char*)d_vmcircbuf->pointer_to_first_copy();
return true;
}
int buffer::space_available()
{
if (d_readers.empty())
return d_bufsize - 1; // See comment below
else {
// Find out the maximum amount of data available to our readers
int most_data = d_readers[0]->items_available();
uint64_t min_items_read = d_readers[0]->nitems_read();
for (size_t i = 1; i < d_readers.size(); i++) {
most_data = std::max(most_data, d_readers[i]->items_available());
min_items_read = std::min(min_items_read, d_readers[i]->nitems_read());
}
if (min_items_read != d_last_min_items_read) {
prune_tags(d_last_min_items_read);
d_last_min_items_read = min_items_read;
}
// The -1 ensures that the case d_write_index == d_read_index is
// unambiguous. It indicates that there is no data for the reader
return d_bufsize - most_data - 1;
}
}
void* buffer::write_pointer() { return &d_base[d_write_index * d_sizeof_item]; }
void buffer::update_write_pointer(int nitems)
{
gr::thread::scoped_lock guard(*mutex());
d_write_index = index_add(d_write_index, nitems);
d_abs_write_offset += nitems;
}
void buffer::set_done(bool done)
{
gr::thread::scoped_lock guard(*mutex());
d_done = done;
}
buffer_reader_sptr
buffer_add_reader(buffer_sptr buf, int nzero_preload, block_sptr link, int delay)
{
if (nzero_preload < 0)
throw std::invalid_argument("buffer_add_reader: nzero_preload must be >= 0");
buffer_reader_sptr r(
new buffer_reader(buf, buf->index_sub(buf->d_write_index, nzero_preload), link));
r->declare_sample_delay(delay);
buf->d_readers.push_back(r.get());
return r;
}
void buffer::drop_reader(buffer_reader* reader)
{
std::vector<buffer_reader*>::iterator result =
std::find(d_readers.begin(), d_readers.end(), reader);
if (result == d_readers.end())
throw std::invalid_argument("buffer::drop_reader"); // we didn't find it...
d_readers.erase(result);
}
void buffer::add_item_tag(const tag_t& tag)
{
gr::thread::scoped_lock guard(*mutex());
d_item_tags.insert(std::pair<uint64_t, tag_t>(tag.offset, tag));
}
void buffer::remove_item_tag(const tag_t& tag, long id)
{
gr::thread::scoped_lock guard(*mutex());
for (std::multimap<uint64_t, tag_t>::iterator it =
d_item_tags.lower_bound(tag.offset);
it != d_item_tags.upper_bound(tag.offset);
++it) {
if ((*it).second == tag) {
(*it).second.marked_deleted.push_back(id);
}
}
}
void buffer::prune_tags(uint64_t max_time)
{
/* NOTE: this function _should_ lock the mutex before editing
d_item_tags. In practice, this function is only called at
runtime by min_available_space in block_executor.cc, which
locks the mutex itself.
If this function is used elsewhere, remember to lock the
buffer's mutex al la the scoped_lock:
gr::thread::scoped_lock guard(*mutex());
*/
/*
http://www.cplusplus.com/reference/map/multimap/erase/
"Iterators, pointers and references referring to elements removed
by the function are invalidated. All other iterators, pointers
and references keep their validity."
Store the iterator to be deleted in tmp; increment itr to the
next valid iterator, then erase tmp, which now becomes invalid.
*/
uint64_t item_time;
std::multimap<uint64_t, tag_t>::iterator itr(d_item_tags.begin()), tmp;
while (itr != d_item_tags.end()) {
item_time = (*itr).second.offset;
if (item_time + d_max_reader_delay + bufsize() < max_time) {
tmp = itr;
itr++;
d_item_tags.erase(tmp);
} else {
// d_item_tags is a map sorted by offset, so when the if
// condition above fails, all future tags in the map must also
// fail. So just break here.
break;
}
}
}
long buffer_ncurrently_allocated() { return s_buffer_count; }
// ----------------------------------------------------------------------------
buffer_reader::buffer_reader(buffer_sptr buffer, unsigned int read_index, block_sptr link)
: d_buffer(buffer),
d_read_index(read_index),
d_abs_read_offset(0),
d_link(link),
d_attr_delay(0)
{
s_buffer_reader_count++;
}
buffer_reader::~buffer_reader()
{
d_buffer->drop_reader(this);
s_buffer_reader_count--;
}
void buffer_reader::declare_sample_delay(unsigned delay)
{
d_attr_delay = delay;
d_buffer->d_max_reader_delay = std::max(d_attr_delay, d_buffer->d_max_reader_delay);
}
unsigned buffer_reader::sample_delay() const { return d_attr_delay; }
int buffer_reader::items_available() const
{
return d_buffer->index_sub(d_buffer->d_write_index, d_read_index);
}
const void* buffer_reader::read_pointer()
{
return &d_buffer->d_base[d_read_index * d_buffer->d_sizeof_item];
}
void buffer_reader::update_read_pointer(int nitems)
{
gr::thread::scoped_lock guard(*mutex());
d_read_index = d_buffer->index_add(d_read_index, nitems);
d_abs_read_offset += nitems;
}
void buffer_reader::get_tags_in_range(std::vector<tag_t>& v,
uint64_t abs_start,
uint64_t abs_end,
long id)
{
gr::thread::scoped_lock guard(*mutex());
uint64_t lower_bound = abs_start - d_attr_delay;
// check for underflow and if so saturate at 0
if (lower_bound > abs_start)
lower_bound = 0;
uint64_t upper_bound = abs_end - d_attr_delay;
// check for underflow and if so saturate at 0
if (upper_bound > abs_end)
upper_bound = 0;
v.clear();
std::multimap<uint64_t, tag_t>::iterator itr =
d_buffer->get_tags_lower_bound(lower_bound);
std::multimap<uint64_t, tag_t>::iterator itr_end =
d_buffer->get_tags_upper_bound(upper_bound);
uint64_t item_time;
while (itr != itr_end) {
item_time = (*itr).second.offset + d_attr_delay;
if ((item_time >= abs_start) && (item_time < abs_end)) {
std::vector<long>::iterator id_itr;
id_itr = std::find(
itr->second.marked_deleted.begin(), itr->second.marked_deleted.end(), id);
// If id is not in the vector of marked blocks
if (id_itr == itr->second.marked_deleted.end()) {
tag_t t = (*itr).second;
t.offset += d_attr_delay;
v.push_back(t);
v.back().marked_deleted.clear();
}
}
itr++;
}
}
long buffer_reader_ncurrently_allocated() { return s_buffer_reader_count; }
} /* namespace gr */
|