FAST  3.2.0
Framework for Heterogeneous Medical Image Computing and Visualization
Classes | Public Types | Public Member Functions | Protected Member Functions | Protected Attributes | List of all members
fast::InferenceEngine Class Referenceabstract

#include <InferenceEngine.hpp>

+ Inheritance diagram for fast::InferenceEngine:
+ Collaboration diagram for fast::InferenceEngine:

Classes

struct  NetworkNode
 

Public Types

typedef std::shared_ptr< InferenceEnginepointer
 
- Public Types inherited from fast::Object
typedef std::shared_ptr< Objectpointer
 

Public Member Functions

virtual void setFilename (std::string filename)
 
virtual void setModelAndWeights (std::vector< uint8_t > model, std::vector< uint8_t > weights)
 
virtual std::string getFilename () const
 
virtual void run ()=0
 
virtual void addInputNode (uint portID, std::string name, NodeType type=NodeType::IMAGE, TensorShape shape={})
 
virtual void addOutputNode (uint portID, std::string name, NodeType type=NodeType::IMAGE, TensorShape shape={})
 
virtual void setInputNodeShape (std::string name, TensorShape shape)
 
virtual void setOutputNodeShape (std::string name, TensorShape shape)
 
virtual NetworkNode getInputNode (std::string name) const
 
virtual NetworkNode getOutputNode (std::string name) const
 
virtual std::unordered_map< std::string, NetworkNodegetOutputNodes () const
 
virtual std::unordered_map< std::string, NetworkNodegetInputNodes () const
 
virtual void setInputData (std::string inputNodeName, std::shared_ptr< Tensor > tensor)
 
virtual std::shared_ptr< TensorgetOutputData (std::string inputNodeName)
 
virtual void load ()=0
 
virtual bool isLoaded () const
 
virtual ImageOrdering getPreferredImageOrdering () const =0
 
virtual std::string getName () const =0
 
virtual std::vector< ModelFormatgetSupportedModelFormats () const =0
 
virtual ModelFormat getPreferredModelFormat () const =0
 
virtual bool isModelFormatSupported (ModelFormat format)
 
virtual void setDeviceType (InferenceDeviceType type)
 
virtual void setDevice (int index=-1, InferenceDeviceType type=InferenceDeviceType::ANY)
 
virtual std::vector< InferenceDeviceInfogetDeviceList ()
 
virtual int getMaxBatchSize ()
 
virtual void setMaxBatchSize (int size)
 
- Public Member Functions inherited from fast::Object
 Object ()
 
virtual ~Object ()
 
ReportergetReporter ()
 

Protected Member Functions

virtual void setIsLoaded (bool loaded)
 
- Protected Member Functions inherited from fast::Object
ReporterreportError ()
 
ReporterreportWarning ()
 
ReporterreportInfo ()
 
ReporterEnd reportEnd () const
 

Protected Attributes

std::unordered_map< std::string, NetworkNodemInputNodes
 
std::unordered_map< std::string, NetworkNodemOutputNodes
 
int m_deviceIndex = -1
 
InferenceDeviceType m_deviceType = InferenceDeviceType::ANY
 
int m_maxBatchSize = 1
 
std::vector< uint8_t > m_model
 
std::vector< uint8_t > m_weights
 
- Protected Attributes inherited from fast::Object
std::weak_ptr< ObjectmPtr
 

Additional Inherited Members

- Static Public Member Functions inherited from fast::Object
static std::string getStaticNameOfClass ()
 

Detailed Description

Abstract class for neural network inference engines (TensorFlow, TensorRT ++)

Member Typedef Documentation

◆ pointer

Member Function Documentation

◆ addInputNode()

virtual void fast::InferenceEngine::addInputNode ( uint  portID,
std::string  name,
NodeType  type = NodeType::IMAGE,
TensorShape  shape = {} 
)
virtual

◆ addOutputNode()

virtual void fast::InferenceEngine::addOutputNode ( uint  portID,
std::string  name,
NodeType  type = NodeType::IMAGE,
TensorShape  shape = {} 
)
virtual

◆ getDeviceList()

virtual std::vector<InferenceDeviceInfo> fast::InferenceEngine::getDeviceList ( )
virtual

Get a list of devices available for this inference engine.

Returns
vector with info on each device

Reimplemented in fast::OpenVINOEngine, and fast::TensorFlowEngine.

◆ getFilename()

virtual std::string fast::InferenceEngine::getFilename ( ) const
virtual

◆ getInputNode()

virtual NetworkNode fast::InferenceEngine::getInputNode ( std::string  name) const
virtual

◆ getInputNodes()

virtual std::unordered_map<std::string, NetworkNode> fast::InferenceEngine::getInputNodes ( ) const
virtual

◆ getMaxBatchSize()

virtual int fast::InferenceEngine::getMaxBatchSize ( )
virtual

◆ getName()

virtual std::string fast::InferenceEngine::getName ( ) const
pure virtual

◆ getOutputData()

virtual std::shared_ptr<Tensor> fast::InferenceEngine::getOutputData ( std::string  inputNodeName)
virtual

◆ getOutputNode()

virtual NetworkNode fast::InferenceEngine::getOutputNode ( std::string  name) const
virtual

◆ getOutputNodes()

virtual std::unordered_map<std::string, NetworkNode> fast::InferenceEngine::getOutputNodes ( ) const
virtual

◆ getPreferredImageOrdering()

virtual ImageOrdering fast::InferenceEngine::getPreferredImageOrdering ( ) const
pure virtual

◆ getPreferredModelFormat()

virtual ModelFormat fast::InferenceEngine::getPreferredModelFormat ( ) const
pure virtual

◆ getSupportedModelFormats()

virtual std::vector<ModelFormat> fast::InferenceEngine::getSupportedModelFormats ( ) const
pure virtual

◆ isLoaded()

virtual bool fast::InferenceEngine::isLoaded ( ) const
virtual

◆ isModelFormatSupported()

virtual bool fast::InferenceEngine::isModelFormatSupported ( ModelFormat  format)
virtual

◆ load()

virtual void fast::InferenceEngine::load ( )
pure virtual

◆ run()

virtual void fast::InferenceEngine::run ( )
pure virtual

◆ setDevice()

virtual void fast::InferenceEngine::setDevice ( int  index = -1,
InferenceDeviceType  type = InferenceDeviceType::ANY 
)
virtual

Specify which device index and/or device type to use

Parameters
indexIndex of the device to use. -1 means any device can be used
type

◆ setDeviceType()

virtual void fast::InferenceEngine::setDeviceType ( InferenceDeviceType  type)
virtual

Set which device type the inference engine should use (assuming the IE supports multiple devices like OpenVINO)

Parameters
type

◆ setFilename()

virtual void fast::InferenceEngine::setFilename ( std::string  filename)
virtual

◆ setInputData()

virtual void fast::InferenceEngine::setInputData ( std::string  inputNodeName,
std::shared_ptr< Tensor tensor 
)
virtual

◆ setInputNodeShape()

virtual void fast::InferenceEngine::setInputNodeShape ( std::string  name,
TensorShape  shape 
)
virtual

◆ setIsLoaded()

virtual void fast::InferenceEngine::setIsLoaded ( bool  loaded)
protectedvirtual

◆ setMaxBatchSize()

virtual void fast::InferenceEngine::setMaxBatchSize ( int  size)
virtual

Reimplemented in fast::TensorRTEngine.

◆ setModelAndWeights()

virtual void fast::InferenceEngine::setModelAndWeights ( std::vector< uint8_t >  model,
std::vector< uint8_t >  weights 
)
virtual

◆ setOutputNodeShape()

virtual void fast::InferenceEngine::setOutputNodeShape ( std::string  name,
TensorShape  shape 
)
virtual

Member Data Documentation

◆ m_deviceIndex

int fast::InferenceEngine::m_deviceIndex = -1
protected

◆ m_deviceType

InferenceDeviceType fast::InferenceEngine::m_deviceType = InferenceDeviceType::ANY
protected

◆ m_maxBatchSize

int fast::InferenceEngine::m_maxBatchSize = 1
protected

◆ m_model

std::vector<uint8_t> fast::InferenceEngine::m_model
protected

◆ m_weights

std::vector<uint8_t> fast::InferenceEngine::m_weights
protected

◆ mInputNodes

std::unordered_map<std::string, NetworkNode> fast::InferenceEngine::mInputNodes
protected

◆ mOutputNodes

std::unordered_map<std::string, NetworkNode> fast::InferenceEngine::mOutputNodes
protected

The documentation for this class was generated from the following file: