Newer
Older
* Copyright (c) 2015-2016, Luca Fulchir<luca@fulchir.it>, All rights reserved.
*
* This file is part of "libRaptorQ".
*
* libRaptorQ is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3
* of the License, or (at your option) any later version.
*
* libRaptorQ is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* and a copy of the GNU Lesser General Public License
* along with libRaptorQ. If not, see <http://www.gnu.org/licenses/>.
*/
/////////////////////
//
// These templates are just a wrapper around the
// So if you want to see what the algorithm looks like,
// you are in the wrong place
//
/////////////////////
#include "RaptorQ/v1/Interleaver.hpp"
#include "RaptorQ/v1/De_Interleaver.hpp"
#include "RaptorQ/v1/Decoder.hpp"
#include "RaptorQ/v1/Encoder.hpp"
#include "RaptorQ/v1/Shared_Computation/Decaying_LF.hpp"
#include "RaptorQ/v1/Thread_Pool.hpp"
#include <cassert>
#include <future>
typedef uint64_t RQ_OTI_Common_Data;
typedef uint32_t RQ_OTI_Scheme_Specific_Data;
// maximum times a single block can be decoded at the same time.
// the decoder can be launched multiple times with different combinations
// of repair symbols. This can be useful as the decoding is actually
// probabilistic, and dropping a set of repair symbols *MIGHT* make things
// decodable again.
// keep this low. 1, 2, 3 should be ok.
static uint16_t max_block_decoder_concurrency = 1;
static const uint64_t max_data = 946270874880; // ~881 GB
const uint16_t min_subsymbol_size,
const uint16_t symbol_size,
const size_t max_memory)
_min_subsymbol (min_subsymbol_size),
interleave (_data_from,
_data_to,
_min_subsymbol,
_mem,
_symbol_size)
IS_RANDOM(Rnd_It, "RFC6330__v1::Encoder");
IS_FORWARD(Fwd_It, "RFC6330__v1::Encoder");
auto _alignment = sizeof(typename
std::iterator_traits<Rnd_It>::value_type);
assert(_symbol_size >= _alignment &&
"RaptorQ: symbol_size must be >= alignment");
assert((_symbol_size % _alignment) == 0 &&
"RaptorQ: symbol_size must be multiple of alignment");
assert(min_subsymbol_size >= _alignment &&
"RaptorQ: minimum subsymbol must be at least aligment");
assert(min_subsymbol_size <= _symbol_size &&
"RaptorQ: minimum subsymbol must be at most symbol_size");
assert((min_subsymbol_size % _alignment) == 0 &&
"RaptorQ: minimum subsymbol must be multiple of alignment");
assert((_symbol_size % min_subsymbol_size == 0) &&
"RaptorQ: symbol size must be multiple of subsymbol size");
if (static_cast<uint64_t> (data_to - data_from) *
sizeof(typename std::iterator_traits<Rnd_It>::value_type)
> max_data) {
_pool_notify = std::make_shared<std::condition_variable>();
_pool_mtx = std::make_shared<std::mutex>();
pool_last_reported = -1;
use_pool = true;
exiting = false;
auto part = interleave.get_partition();
return Block_Iterator<Rnd_It, Fwd_It> (this, part,
operator bool() const { return interleave; }
RQ_OTI_Common_Data OTI_Common() const;
RQ_OTI_Scheme_Specific_Data OTI_Scheme_Specific() const;
// TODO: introduce memory limits on threading ?
std::future<std::pair<Error, uint8_t>> compute (const Compute flags);
size_t encode (Fwd_It &output, const Fwd_It end, const uint32_t esi,
size_t encode (Fwd_It &output, const Fwd_It end, const uint32_t &id);
uint8_t blocks() const;
uint32_t block_size (const uint8_t sbn) const;
uint16_t symbol_size() const;
uint16_t symbols (const uint8_t sbn) const;
uint32_t max_repair (const uint8_t sbn) const;
static void wait_threads (Encoder<Rnd_It, Fwd_It> *obj, const Compute flags,
std::promise<std::pair<Error, uint8_t>> p);
class Block_Work : public Impl::Pool_Work {
public:
std::weak_ptr<RaptorQ__v1::Impl::Raw_Encoder<Rnd_It, Fwd_It>> work;
std::weak_ptr<std::condition_variable> notify;
std::weak_ptr<std::mutex> lock;
Work_Exit_Status do_work (RaptorQ__v1::Work_State *state) override;
};
// TODO: tagged pointer
class Enc {
Enc (Impl::Interleaver<Rnd_It> *interleaver, const uint8_t sbn)
enc = std::make_shared<
RaptorQ__v1::Impl::Raw_Encoder<Rnd_It, Fwd_It>> (
std::shared_ptr<RaptorQ__v1::Impl::Raw_Encoder<Rnd_It, Fwd_It>> enc;
std::pair<Error, uint8_t> get_report (const Compute flags);
std::shared_ptr<std::condition_variable> _pool_notify;
std::shared_ptr<std::mutex> _pool_mtx;
std::deque<std::thread> pool_wait;
std::map<uint8_t, Enc> encoders;
const size_t _mem;
const Rnd_It _data_from, _data_to;
const uint16_t _symbol_size;
bool use_pool, exiting;
int16_t pool_last_reported;
// rfc 6330, pg 6
// easy explanation for OTI_* comes next.
// we do NOT use bitfields as compilators are not actually forced to put
// them in any particular order. meaning tey're useless.
//
//union OTI_Common_Data {
// uint64_t raw;
// struct {
// uint64_t size:40;
// uint8_t reserved:8;
// uint16_t symbol_size:16;
// };
//};
//union OTI_Scheme_Specific_Data {
// uint32_t raw;
// struct {
// uint8_t source_blocks;
// uint16_t sub_blocks;
// uint8_t alignment;
// };
//};
Decoder (const RQ_OTI_Common_Data common,
const RQ_OTI_Scheme_Specific_Data scheme)
IS_INPUT(In_It, "RaptorQ__v1::Decoder");
IS_FORWARD(Fwd_It, "RaptorQ__v1::Decoder");
// see the above commented bitfields for quick reference
_symbol_size = static_cast<uint16_t> (common);
uint16_t tot_sub_blocks = static_cast<uint16_t> (scheme >> 8);
_alignment = static_cast<uint8_t> (scheme);
_sub_blocks = Impl::Partition (_symbol_size /
static_cast<uint8_t> (scheme),
tot_sub_blocks);
_blocks = static_cast<uint8_t> (scheme >> 24);
return;
const uint64_t total_symbols = static_cast<uint64_t> (ceil (
_size / static_cast<double> (_symbol_size)));
part = Impl::Partition (total_symbols, static_cast<uint8_t> (_blocks));
_pool_notify = std::make_shared<std::condition_variable>();
_pool_mtx = std::make_shared<std::mutex>();
Decoder (const uint64_t size, const uint16_t symbol_size,
const uint16_t sub_blocks,
const uint8_t blocks,
const uint8_t alignment)
:_size (size), _symbol_size (symbol_size), _blocks (blocks),
_alignment(alignment)
return;
const uint64_t total_symbols = static_cast<uint64_t> (ceil (
_size / static_cast<double> (_symbol_size)));
_sub_blocks = Impl::Partition (_symbol_size / _alignment, sub_blocks);
part = Impl::Partition (total_symbols, static_cast<uint8_t> (_blocks));
_pool_notify = std::make_shared<std::condition_variable>();
_pool_mtx = std::make_shared<std::mutex>();
pool_last_reported = -1;
use_pool = true;
exiting = false;
std::future<std::pair<Error, uint8_t>> compute (const Compute flags);
// result in BYTES
uint64_t decode_bytes (Fwd_It &start, const Fwd_It end, const uint8_t skip);
size_t decode_block_bytes (Fwd_It &start, const Fwd_It end,
const uint8_t skip,
const uint8_t sbn);
// result in ITERATORS
// last *might* be half written depending on data alignments
std::pair<uint64_t, uint8_t> decode_aligned (Fwd_It &start, const Fwd_It end,
const uint8_t skip);
std::pair<size_t, uint8_t> decode_block_aligned (Fwd_It &start,
const Fwd_It end,
const uint8_t skip,
const uint8_t sbn);
Error add_symbol (In_It &start, const In_It end, const uint32_t id);
Error add_symbol (In_It &start, const In_It end, const uint32_t esi,
uint8_t blocks() const;
uint32_t block_size (const uint8_t sbn) const;
uint16_t symbol_size() const;
uint16_t symbols (const uint8_t sbn) const;
// using shared pointers to avoid locking too much or
// worrying about deleting used stuff.
class RAPTORQ_LOCAL Block_Work : public Impl::Pool_Work {
std::weak_ptr<RaptorQ__v1::Impl::Raw_Decoder<In_It>> work;
std::weak_ptr<std::condition_variable> notify;
std::weak_ptr<std::mutex> lock;
Work_Exit_Status do_work (RaptorQ__v1::Work_State *state) override;
public:
Dec (const uint16_t symbols, const uint16_t symbol_size)
{
dec = std::make_shared<RaptorQ__v1::Impl::Raw_Decoder<In_It>> (symbols,
std::shared_ptr<RaptorQ__v1::Impl::Raw_Decoder<In_It>> dec;
bool reported;
};
static void wait_threads (Decoder<In_It, Fwd_It> *obj, const Compute flags,
std::promise<std::pair<Error, uint8_t>> p);
std::pair<Error, uint8_t> get_report (const Compute flags);
std::shared_ptr<std::condition_variable> _pool_notify;
std::shared_ptr<std::mutex> _pool_mtx;
std::deque<std::thread> pool_wait;
std::map<uint8_t, Dec> decoders;
std::mutex _mtx;
/////////////////
//
// Encoder
//
/////////////////
exiting = true; // stop notifying thread
for (auto &it : encoders) { // stop existing computations
auto ptr = it.second.enc;
if (ptr != nullptr)
ptr->stop();
}
RQ_OTI_Common_Data Encoder<Rnd_It, Fwd_It>::OTI_Common() const
ret = (static_cast<uint64_t> (_data_to - _data_from) *
sizeof(typename std::iterator_traits<Rnd_It>::value_type)) << 24;
RQ_OTI_Scheme_Specific_Data Encoder<Rnd_It, Fwd_It>::OTI_Scheme_Specific() const
ret = static_cast<uint32_t> (interleave.blocks()) << 24;
ret += static_cast<uint32_t> (interleave.sub_blocks()) << 8;
ret += sizeof(typename std::iterator_traits<Rnd_It>::value_type);
template <typename Rnd_It, typename Fwd_It>
size_t Encoder<Rnd_It, Fwd_It>::precompute_max_memory ()
{
// give a good estimate on the amount of memory neede for the precomputation
// of one block;
// this will help you understand how many concurrent precomputations
// you want to do :)
uint16_t symbols = interleave.source_symbols (0);
for (K_idx = 0; K_idx < RaptorQ__v1::Impl::K_padded.size(); ++K_idx) {
if (symbols < RaptorQ__v1::Impl::K_padded[K_idx])
if (K_idx == RaptorQ__v1::Impl::K_padded.size())
auto S_H = RaptorQ__v1::Impl::S_H_W[K_idx];
uint16_t matrix_cols = RaptorQ__v1::Impl::K_padded[K_idx] +
std::get<0> (S_H) +
// Rough memory estimate: Matrix A, matrix X (=> *2) and matrix D.
return matrix_cols * matrix_cols * 2 + _symbol_size * matrix_cols;
template <typename Rnd_It, typename Fwd_It>
Encoder<Rnd_It, Fwd_It>::Block_Work::~Block_Work()
{
// cleanup. have we benn called before the computation finished?
auto locked_enc = work.lock();
auto locked_notify = notify.lock();
auto locked_mtx = lock.lock();
if (locked_enc != nullptr && locked_notify != nullptr &&
locked_mtx != nullptr) {
locked_enc->stop();
std::unique_lock<std::mutex> p_lock (*locked_mtx);
RQ_UNUSED(p_lock);
locked_notify->notify_all();
}
}
Work_Exit_Status Encoder<Rnd_It, Fwd_It>::Block_Work::do_work (
auto locked_enc = work.lock();
auto locked_notify = notify.lock();
auto locked_mtx = lock.lock();
if (locked_enc != nullptr && locked_notify != nullptr &&
locked_mtx != nullptr) {
// encoding always works. It's one of the few constants of the universe.
if (!locked_enc->generate_symbols (state))
return Work_Exit_Status::STOPPED; // or maybe not so constant
work.reset();
std::unique_lock<std::mutex> p_lock (*locked_mtx);
RQ_UNUSED(p_lock);
locked_notify->notify_all();
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
}
return Work_Exit_Status::DONE;
}
template <typename Rnd_It, typename Fwd_It>
std::future<std::pair<Error, uint8_t>> Encoder<Rnd_It, Fwd_It>::compute (
const Compute flags)
{
using ret_t = std::pair<Error, uint8_t>;
std::promise<ret_t> p;
bool error = !interleave;
// need some flags
if (flags == Compute::NONE)
error = true;
// flag incompatibilities
if (Compute::NONE != (flags & Compute::PARTIAL_FROM_BEGINNING) &&
(Compute::NONE != (flags & (Compute::PARTIAL_ANY |
Compute::COMPLETE |
Compute::NO_POOL)))) {
error = true;
} else if (Compute::NONE != (flags & Compute::PARTIAL_ANY) &&
(Compute::NONE != (flags & (Compute::PARTIAL_FROM_BEGINNING |
Compute::COMPLETE |
Compute::NO_POOL)))) {
error = true;
} else if (Compute::NONE != (flags & Compute::COMPLETE) &&
Compute::NONE != (flags &(Compute::PARTIAL_FROM_BEGINNING |
Compute::PARTIAL_ANY |
Compute::NO_POOL))) {
error = true;
}
if (Compute::NONE != (flags & Compute::NO_POOL)) {
std::unique_lock<std::mutex> lock (_mtx);
if (encoders.size() != 0) {
// You can only say you won't use the pool *before* you start
// decoding something!
error = true;
} else {
use_pool = false;
p.set_value ({Error::NONE, 0});
return p.get_future();
}
if (error) {
p.set_value ({Error::WRONG_INPUT, 0});
return p.get_future();
}
// flags are fine, add work to pool
std::unique_lock<std::mutex> lock (_mtx);
for (uint8_t block = 0; block < blocks(); ++block) {
auto enc = encoders.find (block);
if (enc == encoders.end()) {
std::tie (enc, success) = encoders.emplace (
std::piecewise_construct,
std::forward_as_tuple (block),
assert (success == true);
std::unique_ptr<Block_Work> work = std::unique_ptr<Block_Work>(
new Block_Work());
work->work = enc->second.enc;
work->notify = _pool_notify;
work->lock = _pool_mtx;
}
}
lock.unlock();
// spawn thread waiting for other thread exit.
// this way we can set_value to the future when needed.
auto future = p.get_future();
if (Compute::NONE != (flags & Compute::NO_BACKGROUND)) {
wait_threads (this, flags, std::move(p));
} else {
std::unique_lock<std::mutex> pool_wait_lock (_mtx);
pool_wait.emplace_back(wait_threads, this, flags, std::move(p));
void Encoder<Rnd_It, Fwd_It>::wait_threads (Encoder<Rnd_It, Fwd_It> *obj,
const Compute flags,
std::promise<std::pair<Error, uint8_t>> p)
std::unique_lock<std::mutex> lock (*obj->_pool_mtx);
if (obj->exiting) {
p.set_value ({Error::NONE, 0});
break;
}
auto status = obj->get_report (flags);
if (status.first != Error::WORKING) {
p.set_value (status);
break;
}
// delete ourselves from the waiting thread vector.
for (auto it = obj->pool_wait.begin(); it != obj->pool_wait.end(); ++it) {
if (it->get_id() == std::this_thread::get_id()) {
it->detach();
obj->pool_wait.erase (it);
std::pair<Error, uint8_t> Encoder<Rnd_It, Fwd_It>::get_report (
const Compute flags)
if (encoders.size() == 0)
return {Error::WORKING, 0};
if (Compute::NONE != (flags & Compute::COMPLETE) ||
Compute::NONE != (flags & Compute::PARTIAL_FROM_BEGINNING)) {
auto it = encoders.begin();
for (; it != encoders.end(); ++it) {
auto ptr = it->second.enc;
if (ptr != nullptr) {
if (!ptr->ready()) {
if (ptr->is_stopped())
return{Error::EXITING, 0};
break;
}
}
}
if (it == encoders.end()) {
pool_last_reported = static_cast<int16_t> (encoders.size() - 1);
return {Error::NONE, static_cast<uint8_t>(pool_last_reported)};
}
if (Compute::NONE != (flags & Compute::PARTIAL_FROM_BEGINNING) &&
(pool_last_reported < (it->first - 1))) {
pool_last_reported = it->first - 1;
return {Error::NONE, static_cast<uint8_t>(pool_last_reported)};
}
return {Error::WORKING, 0};
}
if (Compute::NONE != (flags & Compute::PARTIAL_ANY)) {
for (auto &it : encoders) {
if (!it.second.reported) {
auto ptr = it.second.enc;
if (ptr != nullptr) {
if (ptr->ready())
return {Error::NONE, it.first};
if (ptr->is_stopped())
return{Error::EXITING, 0};
}
}
}
}
return {Error::WORKING, 0}; // should never be reached
size_t Encoder<Rnd_It, Fwd_It>::encode (Fwd_It &output, const Fwd_It end,
const uint32_t mask_8 = static_cast<uint32_t> (std::pow (2, 8)) - 1;
const uint32_t mask = ~(mask_8 << 24);
return encode (output, end, id & mask, static_cast<uint8_t> (id & mask_8));
size_t Encoder<Rnd_It, Fwd_It>::encode (Fwd_It &output, const Fwd_It end,
std::unique_lock<std::mutex> lock (_mtx);
if (use_pool) {
if (it == encoders.end())
return 0;
auto shared_enc = it->second.enc;
if (!shared_enc->ready())
return 0;
lock.unlock();
return shared_enc->Enc (esi, output, end);
} else {
if (it == encoders.end()) {
bool success;
std::tie (it, success) = encoders.emplace (std::make_pair (sbn,
auto shared_enc = it->second.enc;
lock.unlock();
RaptorQ__v1::Work_State state =
RaptorQ__v1::Work_State::KEEP_WORKING;
shared_enc->generate_symbols (&state);
return shared_enc->Enc (esi, output, end);
} else {
auto shared_enc = it->second.enc;
lock.unlock();
if (!shared_enc->ready())
return 0;
return shared_enc->Enc (esi, output, end);
}
template <typename Rnd_It, typename Fwd_It>
void Encoder<Rnd_It, Fwd_It>::free (const uint8_t sbn)
std::unique_lock<std::mutex> lock (_mtx);
auto it = encoders.find (sbn);
if (it != encoders.end())
encoders.erase (it);
}
template <typename Rnd_It, typename Fwd_It>
uint8_t Encoder<Rnd_It, Fwd_It>::blocks() const
template <typename Rnd_It, typename Fwd_It>
uint32_t Encoder<Rnd_It, Fwd_It>::block_size (const uint8_t sbn) const
return interleave.source_symbols (sbn) * interleave.symbol_size();
template <typename Rnd_It, typename Fwd_It>
uint16_t Encoder<Rnd_It, Fwd_It>::symbol_size() const
template <typename Rnd_It, typename Fwd_It>
uint16_t Encoder<Rnd_It, Fwd_It>::symbols (const uint8_t sbn) const
return interleave.source_symbols (sbn);
template <typename Rnd_It, typename Fwd_It>
uint32_t Encoder<Rnd_It, Fwd_It>::max_repair (const uint8_t sbn) const
/////////////////
//
// Decoder
//
/////////////////
template <typename In_It, typename Fwd_It>
Decoder<In_It, Fwd_It>::~Decoder()
{
exiting = true; // stop notifying thread
for (auto &it : decoders) { // stop existing computations
auto ptr = it.second.dec;
if (ptr != nullptr)
ptr->stop();
}
template <typename In_It, typename Fwd_It>
void Decoder<In_It, Fwd_It>::free (const uint8_t sbn)
Error Decoder<In_It, Fwd_It>::add_symbol (In_It &start, const In_It end,
Error Decoder<In_It, Fwd_It>::add_symbol (In_It &start, const In_It end,
const uint32_t esi, const uint8_t sbn)
std::unique_lock<std::mutex> lock (_mtx);
auto it = decoders.find (sbn);
if (it == decoders.end()) {
const uint16_t symbols = sbn < part.num (0) ?
part.size(0) : part.size(1);
bool success;
std::tie (it, success) = decoders.emplace (std::make_pair(sbn,
Dec (symbols, _symbol_size)));
assert (success);
auto dec = it->second.dec;
lock.unlock();
if (err != Error::NONE)
return err;
// automatically add work to pool if we use it and have enough data
std::unique_lock<std::mutex> pool_lock (*_pool_mtx);
RQ_UNUSED(pool_lock);
bool add_work = dec->add_concurrent (max_block_decoder_concurrency);
if (add_work) {
std::unique_ptr<Block_Work> work = std::unique_ptr<Block_Work>(
new Block_Work());
work->work = dec;
work->notify = _pool_notify;
work->lock = _pool_mtx;
Impl::Thread_Pool::get().add_work (std::move(work));
}
}
return Error::NONE;
}
template <typename In_It, typename Fwd_It>
Decoder<In_It, Fwd_It>::Block_Work::~Block_Work()
{
// have we been called before the computation finished?
auto locked_dec = work.lock();
auto locked_notify = notify.lock();
auto locked_mtx = lock.lock();
if (locked_dec != nullptr && locked_notify != nullptr &&
locked_mtx != nullptr) {
locked_dec->stop();
std::unique_lock<std::mutex> p_lock (*locked_mtx);
RQ_UNUSED(p_lock);
locked_notify->notify_all();
}
}
template <typename In_It, typename Fwd_It>
Work_Exit_Status Decoder<In_It, Fwd_It>::Block_Work::do_work (
auto locked_notify = notify.lock();
auto locked_mtx = lock.lock();
if (locked_dec != nullptr && locked_notify != nullptr &&
locked_mtx != nullptr) {
std::unique_lock<std::mutex> p_lock (*locked_mtx, std::defer_lock);
case RaptorQ__v1::Impl::Raw_Decoder<In_It>::Decoder_Result::DECODED:
locked_dec->drop_concurrent();
work.reset();
p_lock.lock();
locked_notify->notify_all();
p_lock.unlock();
case RaptorQ__v1::Impl::Raw_Decoder<In_It>::Decoder_Result::NEED_DATA:
p_lock.lock();
if (locked_dec->can_decode()) {
// check again to avoid race between threads
return Work_Exit_Status::REQUEUE;
} else {
locked_dec->drop_concurrent();
p_lock.unlock();
work.reset();
return Work_Exit_Status::DONE;
}
case RaptorQ__v1::Impl::Raw_Decoder<In_It>::Decoder_Result::STOPPED:
case RaptorQ__v1::Impl::Raw_Decoder<In_It>::Decoder_Result::CAN_RETRY:
return Work_Exit_Status::REQUEUE;
}
}
return Work_Exit_Status::DONE;
std::future<std::pair<Error, uint8_t>> Decoder<In_It, Fwd_It>::compute (
const Compute flags)
using ret_t = std::pair<Error, uint8_t>;
std::promise<ret_t> p;
bool error = false;
// need some flags
if (flags == Compute::NONE)
error = true;
// flag incompatibilities
if (Compute::NONE != (flags & Compute::PARTIAL_FROM_BEGINNING) &&
(Compute::NONE != (flags & (Compute::PARTIAL_ANY |
Compute::COMPLETE |
Compute::NO_POOL)))) {
error = true;
} else if (Compute::NONE != (flags & Compute::PARTIAL_ANY) &&
(Compute::NONE != (flags & (Compute::PARTIAL_FROM_BEGINNING |
Compute::COMPLETE |
Compute::NO_POOL)))) {
error = true;
} else if (Compute::NONE != (flags & Compute::COMPLETE) &&
Compute::NONE != (flags &(Compute::PARTIAL_FROM_BEGINNING |
Compute::PARTIAL_ANY |
Compute::NO_POOL))) {
error = true;
if (Compute::NONE != (flags & Compute::NO_POOL)) {
std::unique_lock<std::mutex> lock (_mtx);
if (decoders.size() != 0) {
// You can only say you won't use the pool *before* you start
// decoding something!
error = true;
} else {
use_pool = false;
p.set_value ({Error::NONE, 0});
return p.get_future();
}
}
if (error) {
p.set_value ({Error::WRONG_INPUT, 0});
return p.get_future();
}
// do not add work to the pool to save up memory.
// let "add_symbol craete the Decoders as needed.
// spawn thread waiting for other thread exit.
// this way we can set_value to the future when needed.
auto future = p.get_future();
if (Compute::NONE != (flags & Compute::NO_BACKGROUND)) {
wait_threads (this, flags, std::move(p));
} else {
std::unique_lock<std::mutex> pool_wait_lock (*_pool_mtx);
pool_wait.emplace_back (wait_threads, this, flags, std::move(p));
void Decoder<In_It, Fwd_It>::wait_threads (Decoder<In_It, Fwd_It> *obj,
const Compute flags,
std::promise<std::pair<Error, uint8_t>> p)
if (obj->exiting) { // make sure we can exit
break;
}
auto status = obj->get_report (flags);
if (Error::WORKING != status.first) {
p.set_value (status);
break;
lock.unlock();
} while (true);
// delete ourselves from the waiting thread vector.
for (auto it = obj->pool_wait.begin(); it != obj->pool_wait.end(); ++it) {
if (it->get_id() == std::this_thread::get_id()) {
it->detach();
obj->pool_wait.erase (it);
}
template <typename In_It, typename Fwd_It>
std::pair<Error, uint8_t> Decoder<In_It, Fwd_It>::get_report (
if (decoders.size() == 0)
return {Error::WORKING, 0};
if (Compute::COMPLETE == (flags & Compute::COMPLETE) ||
Compute::PARTIAL_FROM_BEGINNING ==
(flags & Compute::PARTIAL_FROM_BEGINNING)) {
// get first non-reported block.
for (;it != decoders.end(); ++it) {
if (pool_last_reported <= it->first)
break;
}
uint16_t reportable = 0;
// get last reportable block
for (; it != decoders.end(); ++it) {
auto ptr = it->second.dec;
if (ptr != nullptr) {
if (!ptr->ready()) {
if (ptr->is_stopped())
return {Error::EXITING, 0};
break;
}
}