summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohnathan Corgan <johnathan@corganlabs.com>2016-10-13 14:46:18 -0700
committerJohnathan Corgan <johnathan@corganlabs.com>2016-10-13 14:46:18 -0700
commitd806c4f416302f09292bbc5fb96647382368424d (patch)
treeb06bfa90b23ad4c96eba6e7eacfe747a47363d25
parentd2a3192ec7c8143315002c123a20ccd63a9f737d (diff)
parent15a8a39c9acb1eca7ed2fb29868eaaa1e1e3d5a9 (diff)
Merge branch 'master' into next
-rw-r--r--gr-audio/lib/windows/windows_sink.cc22
-rw-r--r--gr-audio/lib/windows/windows_sink.h14
-rw-r--r--gr-audio/lib/windows/windows_source.cc455
-rw-r--r--gr-audio/lib/windows/windows_source.h37
-rw-r--r--gr-fec/lib/cc_decoder_impl.cc3
-rw-r--r--gr-uhd/apps/uhd_app.py24
6 files changed, 379 insertions, 176 deletions
diff --git a/gr-audio/lib/windows/windows_sink.cc b/gr-audio/lib/windows/windows_sink.cc
index 4ec798b0ba..52456dbdc4 100644
--- a/gr-audio/lib/windows/windows_sink.cc
+++ b/gr-audio/lib/windows/windows_sink.cc
@@ -46,10 +46,10 @@ namespace gr {
sink::sptr
windows_sink_fcn(int sampling_rate,
const std::string &device_name,
- bool)
+ bool ok_to_block)
{
return sink::sptr
- (new windows_sink(sampling_rate, device_name));
+ (new windows_sink(sampling_rate, device_name, ok_to_block));
}
static const double CHUNK_TIME = prefs::singleton()->get_double("audio_windows", "period_time", 0.1); // 100 ms (below 3ms distortion will likely occur regardless of number of buffers, will likely be a higher limit on slower machines)
@@ -63,13 +63,13 @@ namespace gr {
return (default_device == "default" ? "WAVE_MAPPER" : default_device);
}
- windows_sink::windows_sink(int sampling_freq, const std::string device_name)
+ windows_sink::windows_sink(int sampling_freq, const std::string device_name, bool ok_to_block)
: sync_block("audio_windows_sink",
io_signature::make(1, 2, sizeof(float)),
io_signature::make(0, 0, 0)),
d_sampling_freq(sampling_freq),
d_device_name(device_name.empty() ? default_device_name() : device_name),
- d_fd(-1), d_buffers(0), d_chunk_size(0)
+ d_fd(-1), d_buffers(0), d_chunk_size(0), d_ok_to_block(ok_to_block)
{
/* Initialize the WAVEFORMATEX for 16-bit, 44KHz, stereo */
wave_format.wFormatTag = WAVE_FORMAT_PCM;
@@ -154,10 +154,20 @@ namespace gr {
}
}
if (!chosen_header) {
- WaitForSingleObject(d_wave_write_event, 100);
- printf("aO");
+ if (!d_ok_to_block)
+ {
+ // drop the input data, print warning, and return control.
+ printf("aO");
+ return noutput_items;
+ }
+ else {
+ WaitForSingleObject(d_wave_write_event, 100);
+ }
}
if (c++ > 10) {
+ // After waiting for 1 second, then something else is seriously wrong so let's
+ // just fail and give some debugging information about the status
+ // of the buffers.
for (int i = 0; i < nPeriods; i++) {
printf("%d: %d\n", i, d_buffers[i]->dwFlags);
}
diff --git a/gr-audio/lib/windows/windows_sink.h b/gr-audio/lib/windows/windows_sink.h
index 2bfdbd318d..de905c68fd 100644
--- a/gr-audio/lib/windows/windows_sink.h
+++ b/gr-audio/lib/windows/windows_sink.h
@@ -49,22 +49,24 @@ namespace gr {
int d_fd;
LPWAVEHDR *d_buffers;
DWORD d_chunk_size;
- DWORD d_buffer_size;
+ DWORD d_buffer_size;
+ bool d_ok_to_block;
HWAVEOUT d_h_waveout;
HANDLE d_wave_write_event;
- WAVEFORMATEX wave_format;
+ WAVEFORMATEX wave_format;
protected:
int string_to_int(const std::string & s);
int open_waveout_device(void);
int write_waveout(LPWAVEHDR lp_wave_hdr);
- MMRESULT is_format_supported(LPWAVEFORMATEX pwfx, UINT uDeviceID);
- bool is_number(const std::string& s);
- UINT find_device(std::string szDeviceName);
+ MMRESULT is_format_supported(LPWAVEFORMATEX pwfx, UINT uDeviceID);
+ bool is_number(const std::string& s);
+ UINT find_device(std::string szDeviceName);
public:
windows_sink(int sampling_freq,
- const std::string device_name = "");
+ const std::string device_name,
+ bool ok_to_block);
~windows_sink();
int work(int noutput_items,
diff --git a/gr-audio/lib/windows/windows_source.cc b/gr-audio/lib/windows/windows_source.cc
index 02c9311517..f458fa474e 100644
--- a/gr-audio/lib/windows/windows_source.cc
+++ b/gr-audio/lib/windows/windows_source.cc
@@ -26,9 +26,6 @@
#include "audio_registry.h"
#include <windows_source.h>
-#include <gnuradio/io_signature.h>
-//include <sys/soundcard.h>
-//include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@@ -36,160 +33,306 @@
#include <stdio.h>
#include <iostream>
#include <stdexcept>
+#include <sstream>
+#include <gnuradio/io_signature.h>
+#include <gnuradio/prefs.h>
+#include <gnuradio/logger.h>
+
+#include "boost/lexical_cast.hpp"
+
namespace gr {
- namespace audio {
-
- source::sptr
- windows_source_fcn(int sampling_rate,
- const std::string &device_name,
- bool)
- {
- return source::sptr
- (new windows_source(sampling_rate, device_name));
- }
-
- static const double CHUNK_TIME = 0.005; // 5 ms
-
- // FIXME these should query some kind of user preference
-
- static std::string
- default_device_name()
- {
- return "/dev/dsp";
- }
-
- windows_source::windows_source(int sampling_freq,
- const std::string device_name)
- : sync_block("audio_windows_source",
- io_signature::make(0, 0, 0),
- io_signature::make(1, 2, sizeof(float))),
- d_sampling_freq(sampling_freq),
- d_device_name(device_name.empty() ? default_device_name() : device_name),
- d_fd(-1), d_buffer(0), d_chunk_size(0)
- {
- //FIXME TODO implement me
-#if 0
- if((d_fd = open(d_device_name.c_str(), O_RDONLY)) < 0) {
- fprintf(stderr, "audio_windows_source: ");
- perror(d_device_name.c_str());
- throw std::runtime_error("audio_windows_source");
- }
-
- d_chunk_size = (int)(d_sampling_freq * CHUNK_TIME);
- set_output_multiple(d_chunk_size);
-
- d_buffer = new short[d_chunk_size * 2];
-
- int format = AFMT_S16_NE;
- int orig_format = format;
- if(ioctl(d_fd, SNDCTL_DSP_SETFMT, &format) < 0) {
- std::cerr << "audio_windows_source: " << d_device_name
- << " ioctl failed\n";
- perror(d_device_name.c_str());
- throw std::runtime_error("audio_windows_source");
- }
-
- if(format != orig_format) {
- fprintf(stderr, "audio_windows_source: unable to support format %d\n",
- orig_format);
- fprintf(stderr, " card requested %d instead.\n", format);
- }
-
- // set to stereo no matter what. Some hardware only does stereo
- int channels = 2;
- if(ioctl(d_fd, SNDCTL_DSP_CHANNELS, &channels) < 0 || channels != 2) {
- perror("audio_windows_source: could not set STEREO mode");
- throw std::runtime_error("audio_windows_source");
- }
-
- // set sampling freq
- int sf = sampling_freq;
- if(ioctl(d_fd, SNDCTL_DSP_SPEED, &sf) < 0) {
- std::cerr << "audio_windows_source: "
- << d_device_name << ": invalid sampling_freq "
- << sampling_freq << "\n";
- sampling_freq = 8000;
- if(ioctl(d_fd, SNDCTL_DSP_SPEED, &sf) < 0) {
- std::cerr << "audio_windows_source: failed to set sampling_freq to 8000\n";
- throw std::runtime_error ("audio_windows_source");
- }
- }
-#endif
- }
-
- windows_source::~windows_source()
- {
- /*close(d_fd);
- delete [] d_buffer;
- */
- }
-
- int
- windows_source::work(int noutput_items,
- gr_vector_const_void_star & input_items,
- gr_vector_void_star & output_items)
- {
- //FIXME TODO implement me
-#if 0
- float *f0 = (float*)output_items[0];
- float *f1 = (float*)output_items[1]; // will be invalid if this is mono output
-
- const int shorts_per_item = 2; // L + R
- const int bytes_per_item = shorts_per_item * sizeof(short);
-
- // To minimize latency, never return more than CHUNK_TIME
- // worth of samples per call to work.
- // FIXME, we need an API to set this value
-
- noutput_items = std::min(noutput_items, d_chunk_size);
-
- int base = 0;
- int ntogo = noutput_items;
-
- while(ntogo > 0) {
- int nbytes = std::min(ntogo, d_chunk_size) * bytes_per_item;
- int result_nbytes = read(d_fd, d_buffer, nbytes);
-
- if(result_nbytes < 0) {
- perror("audio_windows_source");
- return -1; // say we're done
- }
-
- if((result_nbytes & (bytes_per_item - 1)) != 0) {
- fprintf(stderr, "audio_windows_source: internal error.\n");
- throw std::runtime_error("internal error");
- }
-
- int result_nitems = result_nbytes / bytes_per_item;
-
- // now unpack samples into output streams
- switch(output_items.size()) {
- case 1: // mono output
- for(int i = 0; i < result_nitems; i++) {
- f0[base + i] = d_buffer[2 * i + 0] * (1.0 / 32767);
- }
- break;
-
- case 2: // stereo output
- for(int i = 0; i < result_nitems; i++) {
- f0[base + i] = d_buffer[2 * i + 0] * (1.0 / 32767);
- f1[base + i] = d_buffer[2 * i + 1] * (1.0 / 32767);
- }
- break;
-
- default:
- assert(0);
- }
-
- ntogo -= result_nitems;
- base += result_nitems;
- }
-
- return noutput_items - ntogo;
-#endif
- return -1; // EOF
- }
+ namespace audio {
+
+ // Currently this audio source will only support a single channel input at 16-bits. So a stereo input will likely be turned into a mono by the wave mapper
+
+ source::sptr
+ windows_source_fcn(int sampling_rate,
+ const std::string &device_name,
+ bool)
+ {
+ return source::sptr
+ (new windows_source(sampling_rate, device_name));
+ }
+
+ static const double CHUNK_TIME = prefs::singleton()->get_double("audio_windows", "period_time", 0.1); // 100 ms (below 3ms distortion will likely occur regardless of number of buffers, will likely be a higher limit on slower machines)
+ static const int nPeriods = prefs::singleton()->get_long("audio_windows", "nperiods", 4); // 4 should be more than enough with a normal chunk time (2 will likely work as well)... at 3ms chunks 10 was enough on a fast machine
+ static const bool verbose = prefs::singleton()->get_bool("audio_windows", "verbose", false);
+ static const std::string default_device = prefs::singleton()->get_string("audio_windows", "standard_input_device", "default");
+
+ static std::string
+ default_device_name()
+ {
+ return (default_device == "default" ? "WAVE_MAPPER" : default_device);
+ }
+
+ windows_source::windows_source(int sampling_freq,
+ const std::string device_name)
+ : sync_block("audio_windows_source",
+ io_signature::make(0, 0, 0),
+ io_signature::make(1, 1, sizeof(float))),
+ d_sampling_freq(sampling_freq),
+ d_device_name(device_name.empty() ? default_device_name() : device_name),
+ d_fd(-1), lp_buffers(0), d_chunk_size(0)
+ {
+ /* Initialize the WAVEFORMATEX for 16-bit, mono */
+ wave_format.wFormatTag = WAVE_FORMAT_PCM;
+ wave_format.nChannels = 1; // changing this will require adjustments to the work routine.
+ wave_format.wBitsPerSample = 16; // changing this will necessitate changing buffer type from short.
+ wave_format.nSamplesPerSec = d_sampling_freq; // defined by flowgraph settings, but note that the microphone will likely have a native sample rate
+ // that the audio system may upsample to you desired rate, so check where the cutoff ends up or check your control panel
+ wave_format.nBlockAlign =
+ wave_format.nChannels * (wave_format.wBitsPerSample / 8);
+ wave_format.nAvgBytesPerSec =
+ wave_format.nSamplesPerSec * wave_format.nBlockAlign;
+ wave_format.cbSize = 0;
+
+ d_chunk_size = (int)(d_sampling_freq * CHUNK_TIME); // Samples per chunk
+ set_output_multiple(d_chunk_size);
+ d_buffer_size = d_chunk_size * wave_format.nChannels * (wave_format.wBitsPerSample / 8); // room for 16-bit audio on one channel.
+
+ if (open_wavein_device() < 0) {
+ perror("audio_windows_source:open_wavein_device() failed\n");
+ throw
+ std::runtime_error("audio_windows_source:open_wavein_device() failed");
+ }
+ else if (verbose) {
+ GR_LOG_INFO(logger, "Opened windows wavein device");
+ }
+ lp_buffers = new LPWAVEHDR[nPeriods];
+ for (int i = 0; i < nPeriods; i++)
+ {
+ lp_buffers[i] = new WAVEHDR;
+ LPWAVEHDR lp_buffer = lp_buffers[i];
+ lp_buffer->dwLoops = 0L;
+ lp_buffer->dwFlags = 0;
+ lp_buffer->dwBufferLength = d_buffer_size;
+ lp_buffer->lpData = new CHAR[d_buffer_size];
+ MMRESULT w_result =
+ waveInPrepareHeader(d_h_wavein, lp_buffer, sizeof(WAVEHDR));
+ if (w_result != 0) {
+ perror("audio_windows_source: Failed to waveInPrepareHeader");
+ throw
+ std::runtime_error("audio_windows_source:open_wavein_device() failed");
+ }
+ waveInAddBuffer(d_h_wavein, lp_buffer, sizeof(WAVEHDR));
+ }
+ waveInStart(d_h_wavein);
+ if (verbose) GR_LOG_INFO(logger, boost::format("Initialized %1% %2%ms audio buffers, total memory used: %3$0.2fkB") % (nPeriods) % (CHUNK_TIME * 1000) % ((d_buffer_size * nPeriods) / 1024.0));
+ }
+
+ windows_source::~windows_source()
+ {
+ // stop playback and set all buffers to DONE.
+ waveInReset(d_h_wavein);
+ // Now we can deallocate the buffers
+ for (int i = 0; i < nPeriods; i++)
+ {
+ if (lp_buffers[i]->dwFlags & (WHDR_DONE | WHDR_PREPARED)) {
+ waveInUnprepareHeader(d_h_wavein, lp_buffers[i], sizeof(WAVEHDR));
+ }
+ else {
+
+ }
+ delete lp_buffers[i]->lpData;
+ }
+ /* Free the callback Event */
+ waveInClose(d_h_wavein);
+ delete[] lp_buffers;
+ }
+
+ int
+ windows_source::work(int noutput_items,
+ gr_vector_const_void_star & input_items,
+ gr_vector_void_star & output_items)
+ {
+ float *f0, *f1;
+ DWORD dw_items = 0;
+
+ while (!buffer_queue.empty())
+ {
+ // Pull the next incoming buffer off the queue
+ LPWAVEHDR next_header = buffer_queue.front();
+
+ // Convert and calculate the number of samples (might not be full)
+ short *lp_buffer = (short *)next_header->lpData;
+ DWORD buffer_length = next_header->dwBytesRecorded / sizeof(short);
+
+ if (buffer_length + dw_items > noutput_items * output_items.size()) {
+ // There's not enough output buffer space to send the whole input buffer
+ // so don't try, just leave it in the queue
+ // or else we'd have to track how much we sent etc
+ // In theory we should never reach this code because the buffers should all be
+ // sized the same
+ return dw_items;
+ }
+ else {
+ switch (output_items.size()) {
+ case 1: // mono output
+ f0 = (float*)output_items[0];
+
+ for (int j = 0; j < buffer_length; j++) {
+ f0[dw_items + j] = (float)(lp_buffer[j]) / 32767.0;
+ }
+ dw_items += buffer_length;
+ break;
+ case 2: // stereo output (interleaved in the buffer)
+ f0 = (float*)output_items[0];
+ f1 = (float*)output_items[1];
+
+ for (int j = 0; j < buffer_length / 2; j++) {
+ f0[dw_items + j] = (float)(lp_buffer[2 * j + 0]) / 32767.0;
+ f1[dw_items + j] = (float)(lp_buffer[2 * j + 1]) / 32767.0;
+ }
+ dw_items += buffer_length / 2;
+ }
+ buffer_queue.pop();
+
+ // Recycle the buffer
+ next_header->dwFlags = 0;
+ waveInPrepareHeader(d_h_wavein, next_header, sizeof(WAVEHDR));
+ waveInAddBuffer(d_h_wavein, next_header, sizeof(WAVEHDR));
+ }
+ }
+ return dw_items;
+ }
+
+ int
+ windows_source::string_to_int(const std::string & s)
+ {
+ int i;
+ std::istringstream(s) >> i;
+ return i;
+ }
+
+ MMRESULT windows_source::is_format_supported(LPWAVEFORMATEX pwfx, UINT uDeviceID)
+ {
+ return (waveInOpen(
+ NULL, // ptr can be NULL for query
+ uDeviceID, // the device identifier
+ pwfx, // defines requested format
+ NULL, // no callback
+ NULL, // no instance data
+ WAVE_FORMAT_QUERY)); // query only, do not open device
+ }
+
+ bool windows_source::is_number(const std::string& s)
+ {
+ std::string::const_iterator it = s.begin();
+ while (it != s.end() && std::isdigit(*it)) ++it;
+ return !s.empty() && it == s.end();
+ }
+
+ UINT windows_source::find_device(std::string szDeviceName)
+ {
+ UINT result = -1;
+ UINT num_devices = waveInGetNumDevs();
+ if (num_devices > 0) {
+ // what the device name passed as a number?
+ if (is_number(szDeviceName))
+ {
+ // a number, so must be referencing a device ID (which incremement from zero)
+ UINT num = std::stoul(szDeviceName);
+ if (num < num_devices) {
+ result = num;
+ }
+ else {
+ GR_LOG_INFO(logger, boost::format("Warning: waveIn deviceID %d was not found, defaulting to WAVE_MAPPER") % num);
+ result = WAVE_MAPPER;
+ }
+
+ }
+ else {
+ // device name passed as string
+ for (UINT i = 0; i < num_devices; i++)
+ {
+ WAVEINCAPS woc;
+ if (waveInGetDevCaps(i, &woc, sizeof(woc)) != MMSYSERR_NOERROR)
+ {
+ perror("Error: Could not retrieve wave out device capabilities for device");
+ return -1;
+ }
+ if (woc.szPname == szDeviceName)
+ {
+ result = i;
+ }
+ if (verbose) GR_LOG_INFO(logger, boost::format("WaveIn Device %d: %s") % i % woc.szPname);
+ }
+ if (result == -1) {
+ GR_LOG_INFO(logger, boost::format("Warning: waveIn device '%s' was not found, defaulting to WAVE_MAPPER") % szDeviceName);
+ result = WAVE_MAPPER;
+ }
+ }
+ }
+ else {
+ perror("Error: No WaveIn devices present or accessible");
+ }
+ return result;
+ }
+
+ int
+ windows_source::open_wavein_device(void)
+ {
+ UINT u_device_id;
+ unsigned long result;
+
+ /** Identifier of the waveform-audio output device to open. It
+ can be either a device identifier or a handle of an open
+ waveform-audio input device. You can use the following flag
+ instead of a device identifier.
+ WAVE_MAPPER The function selects a waveform-audio output
+ device capable of playing the given format.
+ */
+ if (d_device_name.empty() || default_device_name() == d_device_name)
+ u_device_id = WAVE_MAPPER;
+ else
+ // The below could be uncommented to allow selection of different device handles
+ // however it is unclear what other devices are out there and how a user
+ // would know the device ID so at the moment we will ignore that setting
+ // and stick with WAVE_MAPPER
+ u_device_id = find_device(d_device_name);
+ if (verbose) GR_LOG_INFO(logger, boost::format("waveIn Device ID: %1%") % (u_device_id));
+
+ // Check if the sampling rate/bits/channels are good to go with the device.
+ MMRESULT supported = is_format_supported(&wave_format, u_device_id);
+ if (supported != MMSYSERR_NOERROR) {
+ char err_msg[50];
+ waveInGetErrorText(supported, err_msg, 50);
+ GR_LOG_INFO(logger, boost::format("format error: %s") % err_msg);
+ perror("audio_windows_source: Requested audio format is not supported by device driver");
+ return -1;
+ }
+
+ // Open a waveform device for output using event callback.
+ result = waveInOpen(&d_h_wavein, u_device_id,
+ &wave_format,
+ (DWORD_PTR)&read_wavein,
+ (DWORD_PTR)&buffer_queue, CALLBACK_FUNCTION | WAVE_ALLOWSYNC);
+
+ if (result) {
+ perror("audio_windows_source: Failed to open waveform output device.");
+ return -1;
+ }
+ return 0;
+ }
- } /* namespace audio */
+ static void CALLBACK read_wavein(
+ HWAVEIN hwi,
+ UINT uMsg,
+ DWORD_PTR dwInstance,
+ DWORD_PTR dwParam1,
+ DWORD_PTR dwParam2
+ )
+ {
+ // Ignore WIM_OPEN and WIM_CLOSE messages
+ if (uMsg == WIM_DATA) {
+ if (!dwInstance) {
+ perror("audio_windows_source: callback function missing buffer queue");
+ }
+ LPWAVEHDR lp_wave_hdr = (LPWAVEHDR)dwParam1; // The new audio data
+ boost::lockfree::spsc_queue<LPWAVEHDR> *q = (boost::lockfree::spsc_queue<LPWAVEHDR> *)dwInstance; // The buffer queue we assigned to the device to track the buffers that need to be sent
+ q->push(lp_wave_hdr); // Add the buffer to that queue
+ }
+ }
+ } /* namespace audio */
} /* namespace gr */
diff --git a/gr-audio/lib/windows/windows_source.h b/gr-audio/lib/windows/windows_source.h
index 9814d12f54..edb89a73ce 100644
--- a/gr-audio/lib/windows/windows_source.h
+++ b/gr-audio/lib/windows/windows_source.h
@@ -23,9 +23,17 @@
#ifndef INCLUDED_AUDIO_WINDOWS_SOURCE_H
#define INCLUDED_AUDIO_WINDOWS_SOURCE_H
+#define WIN32_LEAN_AND_MEAN
+#define NOMINMAX // stops windef.h defining max/min under cygwin
+
+#include <windows.h>
+#include <mmsystem.h>
+
#include <gnuradio/audio/source.h>
#include <string>
+#include <boost/lockfree/spsc_queue.hpp>
+
namespace gr {
namespace audio {
@@ -38,11 +46,22 @@ namespace gr {
*/
class windows_source : public source
{
- int d_sampling_freq;
- std::string d_device_name;
- int d_fd;
- short *d_buffer;
- int d_chunk_size;
+ int d_sampling_freq;
+ std::string d_device_name;
+ int d_fd;
+ LPWAVEHDR *lp_buffers;
+ DWORD d_chunk_size;
+ DWORD d_buffer_size;
+ HWAVEIN d_h_wavein;
+ WAVEFORMATEX wave_format;
+
+ protected:
+ int string_to_int(const std::string & s);
+ int open_wavein_device(void);
+ MMRESULT is_format_supported(LPWAVEFORMATEX pwfx, UINT uDeviceID);
+ bool is_number(const std::string& s);
+ UINT find_device(std::string szDeviceName);
+ boost::lockfree::spsc_queue<LPWAVEHDR> buffer_queue{ 100 };
public:
windows_source(int sampling_freq,
@@ -54,6 +73,14 @@ namespace gr {
gr_vector_void_star & output_items);
};
+ static void CALLBACK read_wavein(
+ HWAVEIN hwi,
+ UINT uMsg,
+ DWORD_PTR dwInstance,
+ DWORD_PTR dwParam1,
+ DWORD_PTR dwParam2
+ );
+
} /* namespace audio */
} /* namespace gr */
diff --git a/gr-fec/lib/cc_decoder_impl.cc b/gr-fec/lib/cc_decoder_impl.cc
index be505e2f8e..21af22e36f 100644
--- a/gr-fec/lib/cc_decoder_impl.cc
+++ b/gr-fec/lib/cc_decoder_impl.cc
@@ -156,6 +156,9 @@ namespace gr {
kerneltype << k_ << d_k << r_ << d_rate;
d_kernel = yp_kernel[kerneltype.str()];
+ if (d_kernel == NULL) {
+ throw std::runtime_error("cc_decoder: parameters not supported");
+ }
}
cc_decoder_impl::~cc_decoder_impl()
diff --git a/gr-uhd/apps/uhd_app.py b/gr-uhd/apps/uhd_app.py
index 652a9fbab6..8e377f0b4d 100644
--- a/gr-uhd/apps/uhd_app.py
+++ b/gr-uhd/apps/uhd_app.py
@@ -121,6 +121,20 @@ class UHDApp(object):
antennas = [antennas[0],] * len(args.channels)
return antennas
+ def normalize_subdev_sel(self, spec):
+ """
+ """
+ if spec is None:
+ return None
+ specs = [x.strip() for x in spec.split(",")]
+ if len(specs) == 1:
+ return spec
+ elif len(specs) != self.usrp.get_num_mboards():
+ raise ValueError("Invalid subdev setting for {n} mboards: {a}".format(
+ n=len(self.usrp.get_num_mboards()), a=spec
+ ))
+ return specs
+
def async_callback(self, msg):
"""
Call this when USRP async metadata needs printing.
@@ -151,9 +165,13 @@ class UHDApp(object):
)
)
# Set the subdevice spec:
+ args.spec = self.normalize_subdev_sel(args.spec)
if args.spec:
for mb_idx in xrange(self.usrp.get_num_mboards()):
- self.usrp.set_subdev_spec(args.spec, mb_idx)
+ if isinstance(args.spec, list):
+ self.usrp.set_subdev_spec(args.spec[mb_idx], mb_idx)
+ else:
+ self.usrp.set_subdev_spec(args.spec, mb_idx)
# Set the clock and/or time source:
if args.clock_source is not None:
for mb_idx in xrange(self.usrp.get_num_mboards()):
@@ -298,8 +316,8 @@ class UHDApp(object):
tx_or_rx = tx_or_rx.strip() + " "
group = parser.add_argument_group('USRP Arguments')
group.add_argument("-a", "--args", default="", help="UHD device address args")
- group.add_argument("--spec", help="Subdevice of UHD device where appropriate")
- group.add_argument("-A", "--antenna", help="Select {xx}Antenna(s) where appropriate".format(xx=tx_or_rx))
+ group.add_argument("--spec", help="Subdevice(s) of UHD device where appropriate. Use a comma-separated list to set different boards to different specs.")
+ group.add_argument("-A", "--antenna", help="Select {xx}antenna(s) where appropriate".format(xx=tx_or_rx))
group.add_argument("-s", "--samp-rate", type=eng_arg.eng_float, default=1e6,
help="Sample rate")
group.add_argument("-g", "--gain", type=eng_arg.eng_float, default=None,