2017-03-29 06:44:02 -07:00
|
|
|
## @package data_workers
|
|
|
|
|
# Module caffe2.python.data_workers
|
2016-12-13 15:15:36 -08:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
from __future__ import division
|
|
|
|
|
from __future__ import print_function
|
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
This module provides a python-land multithreaded data input mechanism
|
|
|
|
|
for Caffe2 nets.
|
|
|
|
|
|
|
|
|
|
Basic usage is as follows:
|
|
|
|
|
coordinator = data_workers.init_data_input_workers(
|
|
|
|
|
net,
|
|
|
|
|
["data", "label"],
|
|
|
|
|
my_fetch_fun,
|
2016-12-21 05:45:47 -08:00
|
|
|
batch_size=32,
|
2017-05-03 14:42:23 -07:00
|
|
|
input_source_name="train",
|
|
|
|
|
dont_rebatch=False
|
2016-12-13 15:15:36 -08:00
|
|
|
)
|
|
|
|
|
...
|
|
|
|
|
coordinator.start()
|
|
|
|
|
|
|
|
|
|
First argument is the Caffe2 net (or model helper), and second argument
|
|
|
|
|
is list of input blobs that are to be fed.
|
|
|
|
|
|
2016-12-21 05:45:47 -08:00
|
|
|
Argument 'input_source_name' is used to distinguish different sources of data,
|
|
|
|
|
such as train or test data. This is to ensure the data does not get mixed up,
|
|
|
|
|
although two nets would share blobs.
|
2016-12-15 09:30:47 -08:00
|
|
|
|
2016-12-13 15:15:36 -08:00
|
|
|
To do the actual data loading, one defines a "fetcher function"
|
|
|
|
|
that has call signature
|
|
|
|
|
my_fetch_fun(worker_id, batch_size)
|
|
|
|
|
|
2017-03-28 08:42:10 -07:00
|
|
|
Optionally, one can define a "init function" that is called once before
|
|
|
|
|
threads start, and has call signature:
|
|
|
|
|
my_init_fun(data_coordinator, global_coordinator)
|
|
|
|
|
|
2017-05-03 14:42:23 -07:00
|
|
|
If dont_rebatch is set to True, the data input is not batched into equal sized
|
|
|
|
|
chunks but data directly provided by fetchers is used.
|
|
|
|
|
|
|
|
|
|
'batch_columns' can be used to specify which dimension is the batch dimension,
|
|
|
|
|
for each of the inputs. Default is 0 for all iputs.
|
|
|
|
|
|
2017-07-13 18:41:27 -07:00
|
|
|
'timeout' is the timeout in seconds after which if no data is available, the
|
|
|
|
|
net will fail (default 600s = 10 mins).
|
|
|
|
|
|
2016-12-13 15:15:36 -08:00
|
|
|
This function returns a list of numpy arrays corresponding to the different
|
|
|
|
|
input blobs. In the example above, it would return two arrays, one for the
|
|
|
|
|
data blob and another for the labels. These arrays can have arbitrary number
|
|
|
|
|
of elements (i.e they do not need to match the batch size). The batch size
|
|
|
|
|
is provided for the function as a hint only.
|
|
|
|
|
|
|
|
|
|
For example, fetcher function could download images from a remote service or
|
|
|
|
|
load random images from a directory on a file system.
|
|
|
|
|
|
|
|
|
|
For a dummy example, see the data_workers_test unit test.
|
|
|
|
|
|
|
|
|
|
Note that for data_parallel_models, init_data_input_workers will be called
|
|
|
|
|
for each GPU. Note that the 'coordinator' returned by the function is same
|
|
|
|
|
each time.
|
|
|
|
|
'''
|
|
|
|
|
|
2017-05-26 16:16:22 -07:00
|
|
|
try:
|
|
|
|
|
import Queue
|
|
|
|
|
except ImportError:
|
|
|
|
|
# Py3
|
|
|
|
|
import queue as Queue
|
2017-06-29 16:52:01 -07:00
|
|
|
from itertools import chain
|
2016-12-13 15:15:36 -08:00
|
|
|
import logging
|
|
|
|
|
import threading
|
|
|
|
|
import numpy as np
|
2017-02-16 11:59:57 -08:00
|
|
|
import time
|
2016-12-13 15:15:36 -08:00
|
|
|
|
2017-04-26 13:29:08 -07:00
|
|
|
from caffe2.python import workspace, core, scope, utils
|
2016-12-13 15:15:36 -08:00
|
|
|
from caffe2.proto import caffe2_pb2
|
2017-08-07 09:53:35 -07:00
|
|
|
from caffe2.python.parallel_workers import Metrics, State, \
|
|
|
|
|
WorkerCoordinator, GlobalWorkerCoordinator, Worker, run_worker
|
2016-12-13 15:15:36 -08:00
|
|
|
|
|
|
|
|
log = logging.getLogger("data_workers")
|
2017-02-23 13:05:33 -08:00
|
|
|
log.setLevel(logging.INFO)
|
2017-03-28 08:42:10 -07:00
|
|
|
LOG_INT_SECS = 60
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_worker_ids(num_workers):
|
2017-06-06 23:59:46 -07:00
|
|
|
return list(range(0, num_workers))
|
2017-02-27 19:36:25 -08:00
|
|
|
|
2016-12-13 15:15:36 -08:00
|
|
|
|
|
|
|
|
def init_data_input_workers(
|
|
|
|
|
net,
|
|
|
|
|
input_blob_names,
|
|
|
|
|
fetch_fun,
|
|
|
|
|
batch_size,
|
2016-12-15 09:30:47 -08:00
|
|
|
num_worker_threads=2,
|
|
|
|
|
input_source_name="train",
|
2017-02-27 19:36:25 -08:00
|
|
|
max_buffered_batches=800,
|
2017-03-28 08:42:10 -07:00
|
|
|
init_fun=None,
|
|
|
|
|
external_loggers=None,
|
2017-05-03 14:42:23 -07:00
|
|
|
dont_rebatch=False,
|
|
|
|
|
batch_columns=None,
|
2017-07-13 18:41:27 -07:00
|
|
|
timeout=600
|
2016-12-13 15:15:36 -08:00
|
|
|
):
|
|
|
|
|
global global_coordinator
|
|
|
|
|
device_option = scope.CurrentDeviceScope()
|
|
|
|
|
if (device_option is None):
|
|
|
|
|
device_option = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU)
|
|
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
metrics = Metrics(external_loggers)
|
|
|
|
|
batch_feeder = BatchFeeder(
|
2016-12-13 15:15:36 -08:00
|
|
|
net,
|
|
|
|
|
input_blob_names,
|
|
|
|
|
batch_size,
|
|
|
|
|
device_option,
|
|
|
|
|
scope.CurrentNameScope(),
|
2016-12-15 09:30:47 -08:00
|
|
|
input_source_name,
|
2017-03-28 08:42:10 -07:00
|
|
|
global_coordinator.get_queue(input_source_name, max_buffered_batches),
|
2017-08-07 09:53:35 -07:00
|
|
|
metrics,
|
|
|
|
|
dont_rebatch,
|
2017-10-31 14:44:57 -07:00
|
|
|
batch_columns,
|
|
|
|
|
timeout=timeout
|
2016-12-13 15:15:36 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Launch fetch worker threads
|
2017-03-28 08:42:10 -07:00
|
|
|
worker_ids = [
|
|
|
|
|
global_coordinator.get_new_worker_id()
|
|
|
|
|
for i in range(num_worker_threads)
|
|
|
|
|
]
|
Update from facebook (#7696)
* Fix handling of empty batches in SumReduceDimsOp
As titled
* Deferrable async_scheduling finishRun fix
Proper order of finishing run operations in deferrable_async_scheduling net
* Simplify exception handling in async_scheduling
Simplify exception handling, no need to busy wait, thread that processes the
last task can finish the run
* [C2]worker_coordinator_memorize_worker_ids
As titled. This is related to T28689868, where the number of blobs we want to create is equal to the number of worker ids
* Add unit test for nets with no type set
* Ignore total length argument in sympolic_pad_packed_sequence
1- There was a mistake in the code that total_length was added to the wrong symbolic function (pack_padded_sequence) instead of (pad_packed_sequence)
2- No need to throw an exception if total_length is given since it is only used to enable data_parallel training on multi-gpus and doesn't have anything to do with onnx export, so just ignore it. https://fburl.com/tk4gciqp
* Add support for MKLDNN to async_scheduling
Just add MKLDNN as a possible CPU option to async_scheduling's pool function
* [AuFL][ensemble] support branch output for prediction
This diff supports using predictions from different branches and thus enables model ensembling (not fully independent).
* Fix a bug in add_loss in layer_model_helper
As titled.
* Support lradaption for adam
1.lr adaption operator
2.apply to dense adam
* Perf tweaks for async_scheduling
Restore single pool option + remove unnecessary (no-ops) calls
* add quantization to SparseSimdAdagradOp
add a bunch of quantization signatures to SparseSimdAdagradOp, implementations to come next
* [sr] [codemod] Change all SR callsites to use new API
@allow-large-files
This diff refactors all callsites of SR to use the slightly changed API introduced in the diff below. Really what this means is that you need to include the correct header. Also if you were using `ClientFactory::newFactory` you need to not prefix it with `ClientFactory::`.
```
cd ~/fbsource/fbcode
find ./ -type f -exec sed -i -e 's:#include "servicerouter/client/cpp2/ClientFactory.h":#include "servicerouter/client/cpp2/ServiceRouter.h":' -e 's:#include <servicerouter/client/cpp2/ClientFactory.h>:#include <servicerouter/client/cpp2/ServiceRouter.h>:' -e 's/ClientFactory::newFactory(/newFactory(/g' {} \;
```
Also manually fixed spots that couldn't be done automatically (or broke because they depended on transitive includes).
* Back out "Fix handling of empty batches in SumReduceDimsOp"
Original commit changeset: 282da1730cc2 This commit is blocking the
Github->fbcode sync, which really needs to get merged ASAP. D7881937 which this
diff depends on will be reverted in the sync D7990948 which causes this to
break. The sync diff cannot be patched with this reversion because it must be
landed against base revision 5c8c099 , and D7881937 must not be included in the
sync diff because it is breaking GPU tests that are not available in sandcastle
: https://ci.pytorch.org/jenkins/job/caffe2-builds/job/py2-cuda8.0-cudnn6-ubuntu16.04-test/3638/console
for one example.
* Add the flow to support operator benchmark
1) generate model with the operator 2) upload to everstore 3) generate model spec into json file 4) start running the benchmark
* [tum][gpu] Connect DPM trainer with flow and unit tests
This diff:
- Fix some small bugs for Yiming's recent changes to parallelizer, so it suits real use cases.
- Add correct tags to the TUM code, so we can do data parallel transform
- pass extra info when instantiation.
- add unit test for using DPM in TUM model
After this diff, we can do simple box, multi-gpu fully-sync trainer for TUM in Fblearner workflow, but may still need to do speed benchmarking.
* w/o normalized lradaption for adam dense only
The previous lr adaption includes a normalization step when performing the dot product operation. This is not exactly same as what is proposed in the paper. I add normalization as an option. Without it, the operator performs exactly what the paper proposed. With the option, we add the normalization step
* [fb] Use SharedPromise in DeferrableAsyncSchedulingNet
This code is to simplify DeferrableAsyncSchedulingNet by removing condition
variable + small fixes
* [tum] implement cuda sparseLengthsMean and LengthsMean
as title
* Adding an optional parameter to allow use of protobufs in InferShapesAndTypes function.
Adding an optional parameter to allow use of protobufs in InferShapesAndTypes function.
* Move feature_to_index to FeatureSpec.feature_to_index
move feature_to_index to FeatureSpec.feature_to_index to avoid override other fields
* [Caffe2] Rename bytes_moved to bytes_written
Just a rename in preparation for supporting bytes_read.
* [c2] fix ReduceFrontSumOp for empty case by setting 0
otherwise, it may use the results from last iteration when it's empty batch.
* [Caffe2] [Int8] Improve Intel CPU performance
* [Easy] Improve PrependDim op logging
as titled
* DBFileReader expand db_path using os.path.expanduser(..)
Since there are a lot of possible use cases of `DBFileReader` to read from user home path, like `~/local/sample.db`, I want to save people's trouble of calling `os.path.expanduser(db_path)` themselves.
* [Caffe2] Add bytes_read to cost structure
We're adding analytical read bytes to cost functions. This extends the structure accordingly for all CostInference defined operators.
Additionally, some small bug fixes were performed:
1) Cost functions now extract type information of operands instead of assuming float
* Fix sleef on aarch64 for hhvm
@bypass-lint
Rename flag
* Remove duplicated part in caffe2/ideep/operators/conv_op.cc
should be sync error
* Rename test helper function test_adagrad_sparse_helper to adagrad_sparse_test_helper to avoid confusing pytest
2018-05-19 23:10:48 -07:00
|
|
|
|
|
|
|
|
# Create coordinator object
|
|
|
|
|
coordinator = WorkerCoordinator(
|
|
|
|
|
input_source_name, worker_ids, init_fun, batch_feeder)
|
|
|
|
|
|
2016-12-13 15:15:36 -08:00
|
|
|
workers = [
|
|
|
|
|
threading.Thread(
|
2017-08-07 09:53:35 -07:00
|
|
|
target=run_worker,
|
2017-03-28 08:42:10 -07:00
|
|
|
name="data_workers fetcher id {}".format(worker_id),
|
2017-08-07 09:53:35 -07:00
|
|
|
args=[coordinator,
|
|
|
|
|
DataWorker(coordinator, worker_id, fetch_fun, metrics,
|
|
|
|
|
batch_size, batch_feeder)],
|
2017-03-28 08:42:10 -07:00
|
|
|
) for worker_id in worker_ids
|
2016-12-13 15:15:36 -08:00
|
|
|
]
|
2016-12-15 21:26:05 -08:00
|
|
|
|
2016-12-13 15:15:36 -08:00
|
|
|
workers.append(threading.Thread(
|
|
|
|
|
target=enqueuer,
|
2017-03-01 03:19:32 -08:00
|
|
|
name="Enqueuer {} {}".format(input_source_name, scope.CurrentNameScope()),
|
2017-08-07 09:53:35 -07:00
|
|
|
args=[coordinator, batch_feeder]))
|
2016-12-13 15:15:36 -08:00
|
|
|
coordinator._workers = workers
|
|
|
|
|
global_coordinator.add(coordinator)
|
|
|
|
|
|
|
|
|
|
return global_coordinator
|
|
|
|
|
|
|
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
class BatchFeeder(State):
|
2016-12-13 15:15:36 -08:00
|
|
|
def __init__(self, net, input_blob_names, batch_size,
|
2017-03-28 08:42:10 -07:00
|
|
|
device_option, namescope, input_source_name, queue,
|
2017-08-07 09:53:35 -07:00
|
|
|
metrics, dont_rebatch, batch_columns, timeout=600):
|
2017-02-11 01:56:58 -08:00
|
|
|
self._counter = 0
|
2016-12-13 15:15:36 -08:00
|
|
|
self._input_blob_names = input_blob_names
|
|
|
|
|
self._batch_size = batch_size
|
2017-02-27 19:36:25 -08:00
|
|
|
self._internal_queue = queue
|
2016-12-13 15:15:36 -08:00
|
|
|
self._queues = []
|
|
|
|
|
self._device_option = device_option
|
|
|
|
|
self._namescope = namescope
|
2017-07-13 18:41:27 -07:00
|
|
|
self._timeout = timeout
|
2016-12-15 09:30:47 -08:00
|
|
|
self._input_source_name = input_source_name
|
2017-04-26 11:17:51 -07:00
|
|
|
self._c2_queue_capacity = 4
|
2017-05-23 01:04:26 -07:00
|
|
|
self._create_caffe2_queues(net)
|
|
|
|
|
self._create_caffe2_ops(net)
|
2017-02-16 11:59:57 -08:00
|
|
|
self._inputs = 0
|
|
|
|
|
self._prev_seconds = 0
|
2017-02-27 19:36:25 -08:00
|
|
|
self._last_warning = time.time()
|
2017-05-03 14:42:23 -07:00
|
|
|
self._dont_rebatch = dont_rebatch
|
2017-06-26 10:04:39 -07:00
|
|
|
self._init_scratch()
|
2017-08-07 09:53:35 -07:00
|
|
|
self._metrics = metrics
|
2017-06-26 10:04:39 -07:00
|
|
|
|
2017-05-03 14:42:23 -07:00
|
|
|
if batch_columns is None:
|
|
|
|
|
batch_columns = [0 for _ in input_blob_names]
|
|
|
|
|
self._batch_columns = batch_columns
|
2016-12-13 15:15:36 -08:00
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
def start(self):
|
2017-02-16 11:59:57 -08:00
|
|
|
self._inputs = 0
|
|
|
|
|
self._prev_seconds = time.time()
|
2016-12-15 13:10:34 -08:00
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
def stop(self):
|
2017-03-28 08:42:10 -07:00
|
|
|
try:
|
|
|
|
|
for q in self._queues:
|
|
|
|
|
workspace.RunOperatorOnce(
|
|
|
|
|
core.CreateOperator("CloseBlobsQueue", [q], [])
|
|
|
|
|
)
|
|
|
|
|
finally:
|
|
|
|
|
self._log_inputs_per_interval(0, force=True)
|
2016-12-13 15:15:36 -08:00
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
def cleanup(self):
|
|
|
|
|
utils.ResetBlobs(self._scratch_blob.values())
|
|
|
|
|
utils.ResetBlobs(self._scratch_status.values())
|
|
|
|
|
|
|
|
|
|
def _get(self, data_input_coordinator):
|
2017-07-28 18:17:44 -07:00
|
|
|
start_time = time.time()
|
|
|
|
|
last_warning = time.time()
|
2017-08-07 09:53:35 -07:00
|
|
|
while data_input_coordinator.is_active():
|
2016-12-13 15:15:36 -08:00
|
|
|
try:
|
|
|
|
|
return self._internal_queue.get(block=True, timeout=0.5)
|
|
|
|
|
except Queue.Empty:
|
2017-07-28 18:17:44 -07:00
|
|
|
if time.time() - last_warning > 10.0:
|
|
|
|
|
log.warning("** Data input is slow: (still) no data in {} secs.".format(
|
|
|
|
|
time.time() - start_time))
|
|
|
|
|
last_warning = time.time()
|
2016-12-13 15:15:36 -08:00
|
|
|
continue
|
|
|
|
|
return None
|
|
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
def _validate_chunk(self, chunk):
|
|
|
|
|
if chunk is None:
|
2017-11-17 18:00:24 -08:00
|
|
|
log.warning("Fetcher function returned None")
|
2017-08-07 09:53:35 -07:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
assert len(chunk) == len(self._input_blob_names), \
|
|
|
|
|
"Expecting data blob for each input"
|
|
|
|
|
for d in chunk:
|
|
|
|
|
assert isinstance(d, np.ndarray), \
|
|
|
|
|
"Fetcher function must return a numpy array"
|
|
|
|
|
if not self._dont_rebatch:
|
|
|
|
|
j = 1
|
|
|
|
|
for d in chunk[1:]:
|
|
|
|
|
assert d.shape[self._batch_columns[j]] == \
|
|
|
|
|
chunk[0].shape[self._batch_columns[0]], \
|
|
|
|
|
"Each returned input must have equal number of samples"
|
|
|
|
|
j += 1
|
|
|
|
|
|
2017-02-23 13:05:33 -08:00
|
|
|
if len(chunk) == 0:
|
2017-11-17 18:00:24 -08:00
|
|
|
log.warning("Worker provided zero length input")
|
2017-08-07 09:53:35 -07:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
def put(self, chunk, data_input_coordinator):
|
|
|
|
|
if not self._validate_chunk(chunk):
|
2017-02-23 13:05:33 -08:00
|
|
|
return
|
2017-08-07 09:53:35 -07:00
|
|
|
|
|
|
|
|
while data_input_coordinator.is_active():
|
2016-12-13 15:15:36 -08:00
|
|
|
try:
|
2017-02-11 01:56:58 -08:00
|
|
|
qsize = self._internal_queue.qsize()
|
2017-02-27 19:36:25 -08:00
|
|
|
if qsize < 2 and (time.time() - self._last_warning) > LOG_INT_SECS:
|
2017-11-17 18:00:24 -08:00
|
|
|
log.warning("Warning, data loading lagging behind: " +
|
2018-09-05 09:52:38 -07:00
|
|
|
"queue size={}, name={}".format(qsize, self._input_source_name))
|
2017-02-27 19:36:25 -08:00
|
|
|
self._last_warning = time.time()
|
2017-02-11 01:56:58 -08:00
|
|
|
self._counter += 1
|
2016-12-15 21:26:05 -08:00
|
|
|
self._internal_queue.put(chunk, block=True, timeout=0.5)
|
2017-02-27 19:36:25 -08:00
|
|
|
self._log_inputs_per_interval(chunk[0].shape[0])
|
2016-12-13 15:15:36 -08:00
|
|
|
return
|
|
|
|
|
except Queue.Full:
|
2016-12-15 21:26:05 -08:00
|
|
|
log.debug("Queue full: stalling fetchers...")
|
2016-12-13 15:15:36 -08:00
|
|
|
continue
|
|
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
def _enqueue_batch_direct(self, data_input_coordinator):
|
|
|
|
|
data = self._get(data_input_coordinator)
|
2017-05-03 14:42:23 -07:00
|
|
|
if data is None:
|
|
|
|
|
return
|
2017-08-07 09:53:35 -07:00
|
|
|
if data_input_coordinator.is_active():
|
2017-05-03 14:42:23 -07:00
|
|
|
for b, q, c in zip(self._input_blob_names, self._queues, data):
|
|
|
|
|
self._enqueue(b, q, c)
|
|
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
def _enqueue_batch(self, data_input_coordinator):
|
2016-12-13 15:15:36 -08:00
|
|
|
'''
|
|
|
|
|
This pulls data from the python-side queue and collects them
|
2017-05-03 14:42:23 -07:00
|
|
|
into batch-sized pieces, unless dont_rebatch is set to true.
|
2016-12-13 15:15:36 -08:00
|
|
|
'''
|
2017-05-03 14:42:23 -07:00
|
|
|
if self._dont_rebatch:
|
2017-08-07 09:53:35 -07:00
|
|
|
self._enqueue_batch_direct(data_input_coordinator)
|
2017-05-03 14:42:23 -07:00
|
|
|
return
|
|
|
|
|
|
2016-12-13 15:15:36 -08:00
|
|
|
cur_batch = [np.array([]) for d in self._input_blob_names]
|
2017-05-03 14:42:23 -07:00
|
|
|
first_batch_col = self._batch_columns[0]
|
2016-12-13 15:15:36 -08:00
|
|
|
|
|
|
|
|
# Collect data until we have a full batch size
|
2017-05-03 14:42:23 -07:00
|
|
|
while (
|
|
|
|
|
cur_batch[0].shape[0] == 0 or
|
|
|
|
|
cur_batch[0].shape[first_batch_col] < self._batch_size
|
2017-08-07 09:53:35 -07:00
|
|
|
) and data_input_coordinator.is_active():
|
|
|
|
|
chunk = self._get(data_input_coordinator)
|
2016-12-13 15:15:36 -08:00
|
|
|
if chunk is None:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
for j, chunk_elem in enumerate(chunk):
|
|
|
|
|
if cur_batch[j].shape[0] == 0:
|
|
|
|
|
cur_batch[j] = chunk_elem.copy()
|
|
|
|
|
else:
|
2017-05-03 14:42:23 -07:00
|
|
|
cur_batch[j] = np.append(
|
|
|
|
|
cur_batch[j], chunk_elem, axis=self._batch_columns[j]
|
|
|
|
|
)
|
2016-12-13 15:15:36 -08:00
|
|
|
|
2017-03-28 08:42:10 -07:00
|
|
|
start_time = time.time()
|
|
|
|
|
try:
|
|
|
|
|
# Return data over the batch size back to queue
|
2017-05-03 14:42:23 -07:00
|
|
|
if cur_batch[0].shape[0] > 0 and cur_batch[0].shape[
|
|
|
|
|
first_batch_col
|
|
|
|
|
] > self._batch_size:
|
|
|
|
|
leftover = []
|
|
|
|
|
trimmed_batch = []
|
|
|
|
|
for j, b in enumerate(cur_batch):
|
|
|
|
|
[c, l] = np.split(
|
|
|
|
|
b, [self._batch_size], axis=self._batch_columns[j]
|
|
|
|
|
)
|
|
|
|
|
leftover.append(l)
|
|
|
|
|
trimmed_batch.append(c)
|
|
|
|
|
cur_batch = trimmed_batch
|
2017-03-28 08:42:10 -07:00
|
|
|
try:
|
|
|
|
|
self._internal_queue.put(leftover, block=False)
|
|
|
|
|
except Queue.Full:
|
|
|
|
|
pass
|
|
|
|
|
|
2017-05-03 14:42:23 -07:00
|
|
|
assert cur_batch[0].shape[first_batch_col] == self._batch_size
|
2017-03-28 08:42:10 -07:00
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
if data_input_coordinator.is_active():
|
2017-05-03 14:42:23 -07:00
|
|
|
for b, q, c in zip(
|
|
|
|
|
self._input_blob_names, self._queues, cur_batch
|
|
|
|
|
):
|
2017-03-28 08:42:10 -07:00
|
|
|
self._enqueue(b, q, c)
|
|
|
|
|
finally:
|
2017-08-07 09:53:35 -07:00
|
|
|
self._metrics.put_metric('enqueue_time', time.time() - start_time)
|
2016-12-13 15:15:36 -08:00
|
|
|
|
2017-06-26 10:04:39 -07:00
|
|
|
def _init_scratch(self):
|
|
|
|
|
self._scratch_blob = {}
|
|
|
|
|
self._scratch_status = {}
|
|
|
|
|
for blob_name in self._input_blob_names:
|
|
|
|
|
scratch_name = self._namescope + blob_name + \
|
|
|
|
|
"_scratch_" + self._input_source_name
|
|
|
|
|
self._scratch_blob[blob_name] = core.BlobReference(scratch_name)
|
|
|
|
|
self._scratch_status[blob_name] = core.BlobReference(
|
|
|
|
|
scratch_name + "_status"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Feed empty arrays to the scratch blobs here, so that there won't be
|
|
|
|
|
# race conditions when calling FeedBlob (which calls wworkspace
|
|
|
|
|
# CreateBlob()) from enqueue threads
|
2017-06-29 16:52:01 -07:00
|
|
|
for b in chain(
|
|
|
|
|
self._scratch_blob.values(), self._scratch_status.values()
|
|
|
|
|
):
|
2017-06-26 10:04:39 -07:00
|
|
|
workspace.FeedBlob(
|
|
|
|
|
b,
|
|
|
|
|
np.array([]).astype(np.float32),
|
|
|
|
|
device_option=self._device_option,
|
|
|
|
|
)
|
|
|
|
|
|
2016-12-13 15:15:36 -08:00
|
|
|
def _enqueue(self, blob_name, queue, data_arr):
|
|
|
|
|
'''
|
|
|
|
|
Enqueue the correctly sized batch arrays to Caffe2's queue.
|
|
|
|
|
'''
|
|
|
|
|
workspace.FeedBlob(
|
2017-06-26 10:04:39 -07:00
|
|
|
self._scratch_blob[blob_name],
|
2016-12-13 15:15:36 -08:00
|
|
|
data_arr,
|
|
|
|
|
device_option=self._device_option
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
op = core.CreateOperator(
|
2017-03-01 03:19:32 -08:00
|
|
|
"SafeEnqueueBlobs",
|
2017-06-26 10:04:39 -07:00
|
|
|
[queue, self._scratch_blob[blob_name]],
|
|
|
|
|
[self._scratch_blob[blob_name], self._scratch_status[blob_name]],
|
2016-12-13 15:15:36 -08:00
|
|
|
device_option=self._device_option
|
|
|
|
|
)
|
|
|
|
|
workspace.RunOperatorOnce(op)
|
|
|
|
|
|
2017-05-23 01:04:26 -07:00
|
|
|
def _create_caffe2_queues(self, net):
|
2016-12-13 15:15:36 -08:00
|
|
|
'''
|
2017-05-23 01:04:26 -07:00
|
|
|
Creates queues on caffe2 side
|
2016-12-13 15:15:36 -08:00
|
|
|
'''
|
|
|
|
|
def create_queue(queue_name, num_blobs, capacity):
|
|
|
|
|
workspace.RunOperatorOnce(
|
|
|
|
|
core.CreateOperator(
|
|
|
|
|
"CreateBlobsQueue",
|
|
|
|
|
[], [queue_name],
|
|
|
|
|
num_blobs=1,
|
|
|
|
|
capacity=capacity))
|
|
|
|
|
return core.ScopedBlobReference(queue_name)
|
|
|
|
|
|
|
|
|
|
for blob_name in self._input_blob_names:
|
2016-12-15 09:30:47 -08:00
|
|
|
qname = blob_name + "_c2queue" + "_" + self._input_source_name
|
2017-04-26 11:17:51 -07:00
|
|
|
q = create_queue(
|
|
|
|
|
qname, num_blobs=1, capacity=self._c2_queue_capacity
|
|
|
|
|
)
|
2016-12-13 15:15:36 -08:00
|
|
|
self._queues.append(q)
|
|
|
|
|
|
2017-05-23 01:04:26 -07:00
|
|
|
def _create_caffe2_ops(self, net):
|
|
|
|
|
'''
|
|
|
|
|
Creates dequeue-ops on caffe2 side
|
|
|
|
|
'''
|
|
|
|
|
for q, blob_name in zip(self._queues, self._input_blob_names):
|
2016-12-13 15:15:36 -08:00
|
|
|
# Add operator to the Caffe2 network to dequeue
|
2017-07-13 18:41:27 -07:00
|
|
|
net.DequeueBlobs(q, blob_name, timeout_secs=float(self._timeout))
|
2016-12-13 15:15:36 -08:00
|
|
|
|
2017-03-28 08:42:10 -07:00
|
|
|
def _log_inputs_per_interval(self, inputs, force=False):
|
2017-02-23 13:05:33 -08:00
|
|
|
self._inputs += inputs
|
2017-02-16 11:59:57 -08:00
|
|
|
current_seconds = time.time()
|
|
|
|
|
delta_seconds = current_seconds - self._prev_seconds
|
2017-03-28 08:42:10 -07:00
|
|
|
if delta_seconds >= LOG_INT_SECS or force:
|
|
|
|
|
inputs_per_sec = int(self._inputs / delta_seconds)
|
|
|
|
|
qsize = self._internal_queue.qsize()
|
2017-11-17 18:00:24 -08:00
|
|
|
log.info("{}/{}: {} inputs/sec".format(
|
2017-02-23 13:05:33 -08:00
|
|
|
self._input_source_name,
|
|
|
|
|
self._namescope,
|
2017-03-28 08:42:10 -07:00
|
|
|
inputs_per_sec,
|
2017-02-16 11:59:57 -08:00
|
|
|
))
|
2017-11-17 18:00:24 -08:00
|
|
|
log.info("-- queue: {} batches".format(qsize))
|
2017-03-28 08:42:10 -07:00
|
|
|
# log and reset perf metrics
|
2017-08-07 09:53:35 -07:00
|
|
|
self._metrics.put_metric(
|
|
|
|
|
'inputs_per_sec', inputs_per_sec, False)
|
|
|
|
|
self._metrics.put_metric('queue_size', qsize, False)
|
|
|
|
|
self._metrics.put_metric(
|
|
|
|
|
'time_elapsed', delta_seconds, False)
|
|
|
|
|
self._metrics.log_metrics()
|
|
|
|
|
self._metrics.reset_metrics()
|
2017-02-16 11:59:57 -08:00
|
|
|
self._inputs = 0
|
|
|
|
|
self._prev_seconds = current_seconds
|
|
|
|
|
|
2017-03-28 08:42:10 -07:00
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
class GlobalCoordinator(GlobalWorkerCoordinator):
|
2016-12-13 15:15:36 -08:00
|
|
|
def __init__(self):
|
2017-08-07 09:53:35 -07:00
|
|
|
GlobalWorkerCoordinator.__init__(self)
|
2017-02-27 19:36:25 -08:00
|
|
|
self._queues = {}
|
2017-03-28 08:42:10 -07:00
|
|
|
|
2017-02-27 19:36:25 -08:00
|
|
|
def get_queue(self, queue_name, max_buffered_batches):
|
|
|
|
|
assert isinstance(max_buffered_batches, int)
|
|
|
|
|
if queue_name not in self._queues:
|
|
|
|
|
self._queues[queue_name] = Queue.Queue(maxsize=max_buffered_batches)
|
|
|
|
|
return self._queues[queue_name]
|
|
|
|
|
|
2017-05-23 01:04:26 -07:00
|
|
|
def reset_data_input(self, namescope, name, net, batch_size):
|
|
|
|
|
log.info("Reset data input {}, batch size {}: ".format(name, batch_size))
|
2017-05-01 00:18:56 -07:00
|
|
|
for c in self._coordinators:
|
2017-08-07 09:53:35 -07:00
|
|
|
if c._worker_name == name and c._state._namescope == namescope:
|
|
|
|
|
c._state._batch_size = batch_size
|
|
|
|
|
c._state._create_caffe2_ops(net)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DataWorker(Worker):
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
coordinator,
|
|
|
|
|
worker_id,
|
|
|
|
|
worker_fun,
|
|
|
|
|
metrics,
|
|
|
|
|
batch_size,
|
|
|
|
|
batch_feeder
|
|
|
|
|
):
|
|
|
|
|
Worker.__init__(self, coordinator, worker_id, worker_fun=worker_fun,
|
|
|
|
|
metrics=metrics)
|
|
|
|
|
self._batch_size = batch_size
|
|
|
|
|
self._batch_feeder = batch_feeder
|
2016-12-13 15:15:36 -08:00
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
def run(self):
|
|
|
|
|
input_data = self._worker_fun(self._worker_id, self._batch_size)
|
2017-04-26 11:17:51 -07:00
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
self._batch_feeder.put(input_data, self._coordinator)
|
2016-12-13 15:15:36 -08:00
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
def finish(self):
|
|
|
|
|
self._metrics.put_metric(
|
|
|
|
|
'fetcher_time', time.time() - self._start_time)
|
2016-12-13 15:15:36 -08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
global_coordinator = GlobalCoordinator()
|
|
|
|
|
|
|
|
|
|
|
2017-08-07 09:53:35 -07:00
|
|
|
def enqueuer(coordinator, batch_feeder):
|
2016-12-13 15:15:36 -08:00
|
|
|
while coordinator.is_active():
|
2017-08-07 09:53:35 -07:00
|
|
|
batch_feeder._enqueue_batch(coordinator)
|