#include <TensorFlowEngine.hpp>
|
void | load () override |
|
void | run () override |
|
std::string | getName () const override |
|
| ~TensorFlowEngine () override |
|
ImageOrdering | getPreferredImageOrdering () const override |
|
std::vector< ModelFormat > | getSupportedModelFormats () const |
|
ModelFormat | getPreferredModelFormat () const |
|
| TensorFlowEngine () |
|
std::vector< InferenceDeviceInfo > | getDeviceList () |
|
virtual void | setFilename (std::string filename) |
|
virtual void | setModelAndWeights (std::vector< uint8_t > model, std::vector< uint8_t > weights) |
|
virtual std::string | getFilename () const |
|
virtual void | addInputNode (uint portID, std::string name, NodeType type=NodeType::IMAGE, TensorShape shape={}) |
|
virtual void | addOutputNode (uint portID, std::string name, NodeType type=NodeType::IMAGE, TensorShape shape={}) |
|
virtual void | setInputNodeShape (std::string name, TensorShape shape) |
|
virtual void | setOutputNodeShape (std::string name, TensorShape shape) |
|
virtual NetworkNode | getInputNode (std::string name) const |
|
virtual NetworkNode | getOutputNode (std::string name) const |
|
virtual std::unordered_map< std::string, NetworkNode > | getOutputNodes () const |
|
virtual std::unordered_map< std::string, NetworkNode > | getInputNodes () const |
|
virtual void | setInputData (std::string inputNodeName, std::shared_ptr< Tensor > tensor) |
|
virtual std::shared_ptr< Tensor > | getOutputData (std::string inputNodeName) |
|
virtual bool | isLoaded () const |
|
virtual bool | isModelFormatSupported (ModelFormat format) |
|
virtual void | setDeviceType (InferenceDeviceType type) |
|
virtual void | setDevice (int index=-1, InferenceDeviceType type=InferenceDeviceType::ANY) |
|
virtual int | getMaxBatchSize () |
|
virtual void | setMaxBatchSize (int size) |
|
| Object () |
|
virtual | ~Object () |
|
Reporter & | getReporter () |
|
◆ ~TensorFlowEngine()
fast::TensorFlowEngine::~TensorFlowEngine |
( |
| ) |
|
|
override |
◆ TensorFlowEngine()
fast::TensorFlowEngine::TensorFlowEngine |
( |
| ) |
|
◆ getDeviceList()
Get a list of devices available for this inference engine.
- Returns
- vector with info on each device
Reimplemented from fast::InferenceEngine.
◆ getName()
std::string fast::TensorFlowEngine::getName |
( |
| ) |
const |
|
overridevirtual |
◆ getPreferredImageOrdering()
ImageOrdering fast::TensorFlowEngine::getPreferredImageOrdering |
( |
| ) |
const |
|
overridevirtual |
◆ getPreferredModelFormat()
ModelFormat fast::TensorFlowEngine::getPreferredModelFormat |
( |
| ) |
const |
|
inlinevirtual |
◆ getSupportedModelFormats()
std::vector<ModelFormat> fast::TensorFlowEngine::getSupportedModelFormats |
( |
| ) |
const |
|
inlinevirtual |
◆ load()
void fast::TensorFlowEngine::load |
( |
| ) |
|
|
overridevirtual |
◆ run()
void fast::TensorFlowEngine::run |
( |
| ) |
|
|
overridevirtual |
◆ mLearningPhaseTensors
std::vector<std::string> fast::TensorFlowEngine::mLearningPhaseTensors |
|
protected |
◆ mSavedModelBundle
std::unique_ptr<tensorflow::SavedModelBundle> fast::TensorFlowEngine::mSavedModelBundle |
|
protected |
◆ mSession
std::unique_ptr<tensorflow::Session> fast::TensorFlowEngine::mSession |
|
protected |
The documentation for this class was generated from the following file: