Skip to content

File reader implementation #31

New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Merged
merged 9 commits into from
Sep 8, 2023
10 changes: 6 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ BUILD_DIR := build
DEBUG := FALSE

# Objects and executable
OBJS := $(addprefix $(BUILD_DIR)/, main.o pipeline.o segfilter.o StateManager.o IdlingState.o ProcessingState.o IOBridge.o UART.o ThreadLogger.o LoggingLevel.o )
OBJS := $(addprefix $(BUILD_DIR)/, main.o pipeline.o StateManager.o IdlingState.o ProcessingState.o segfilter.o IOBridge.o UART.o Reader.o ThreadLogger.o LoggingLevel.o )
TARGET := $(BUILD_DIR)/pipeline

LIB := tensorflow
Expand All @@ -26,9 +26,9 @@ ifeq ($(LIB),nvinfer)
endif

# Compiler flags
CFLAGS := -Wall -Werror -Wpedantic
CFLAGS := -Wall -Werror -Wpedantic -std=c++17 -fopenmp

LINKERFLAGS := -lstdc++ -lpthread
LINKERFLAGS := -lstdc++ -lpthread

# Conditionally add leak sanitizer
ifeq ($(LEAK),TRUE)
Expand Down Expand Up @@ -95,7 +95,9 @@ $(BUILD_DIR)/LoggingLevel.o: $(SRC_DIR)/utils/LoggingLevel.cpp $(SRC_DIR)/utils/
@mkdir -p $(@D)
$(CC) $(CFLAGS) -c $(SRC_DIR)/utils/LoggingLevel.cpp -o $@


$(BUILD_DIR)/Reader.o: $(SRC_DIR)/utils/Reader.cpp $(SRC_DIR)/utils/Reader.hpp
@mkdir -p $(@D)
$(CC) $(CFLAGS) -c $(SRC_DIR)/utils/Reader.cpp -o $@



Expand Down
7 changes: 4 additions & 3 deletions src/bridge/UART.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ UartIO::UartIO(ThreadLogger *logger)
// SETUP SERIAL WORLD

struct termios port_options; // Create the structure

uartId = open(uart_target, O_RDWR | O_NOCTTY);
tcgetattr(uartId, &port_options); // Get the current attributes of the Serial port

//------------------------------------------------
// OPEN THE UART
//------------------------------------------------
Expand All @@ -24,7 +24,8 @@ UartIO::UartIO(ThreadLogger *logger)
// Caution: VMIN and VTIME flags are ignored if O_NONBLOCK flag is set.
// O_NOCTTY - When set and path identifies a terminal device, open() shall not cause the terminal device to become the controlling terminal for the process.uartId = open("/dev/ttyTHS1", O_RDWR | O_NOCTTY | O_NDELAY); //Open in non blocking read/write mode

uartId = open(uart_target, O_RDWR | O_NOCTTY);



tcflush(uartId, TCIFLUSH);
tcflush(uartId, TCIOFLUSH);
Expand Down
2 changes: 1 addition & 1 deletion src/filter/IFilter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,6 @@ class IFilter
{
public:
virtual ~IFilter() {}
virtual void doProcessing(unsigned char* image, int width, int height, int channels) = 0;
virtual int * doProcessing(unsigned char* image, int width, int height, int channels) = 0;
virtual void doDecision() = 0;
};
6 changes: 3 additions & 3 deletions src/filter/segfilter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@



void SegFilter::doProcessing(unsigned char* image, int width, int height, int channels) {
SegFilter::model -> predict(image, width, height, channels);
int * SegFilter::doProcessing(unsigned char* image, int width, int height, int channels) {
return SegFilter::model -> predict(image, width, height, channels);
}

void SegFilter::doDecision() {
Expand All @@ -14,7 +14,7 @@ void SegFilter::doDecision() {

SegFilter::SegFilter(const char *modelPath,ThreadLogger * logger) {
logger -> log("Mounting segmentation filter...");
IModel * model = new TFLiteModel(logger);
IModel * model = new TensorRTModel(logger);
model -> loadModel(modelPath);
SegFilter::model = model;

Expand Down
4 changes: 2 additions & 2 deletions src/filter/segfilter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#define SEG_FILTER_H

#include "IFilter.hpp"
#include "../model/TFLiteModel.hpp"
#include "../model/TensorRTModel.hpp"
#include <stdio.h>
#include "../utils/ThreadLogger.hpp"

Expand All @@ -14,7 +14,7 @@ class SegFilter: public IFilter{
public:
SegFilter(const char *modelPath,ThreadLogger * logger);
~SegFilter();
virtual void doProcessing(unsigned char* image, int width, int height, int channels);
virtual int * doProcessing(unsigned char* image, int width, int height, int channels);
virtual void doDecision();

};
Expand Down
2 changes: 1 addition & 1 deletion src/model/IModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@ class IModel
public:
virtual ~IModel() {}
virtual void loadModel(const char *modelPath) = 0;
virtual void predict(unsigned char* image, int height, int width, int channels) = 0;
virtual int * predict(unsigned char* image, int height, int width, int channels) = 0;
};
10 changes: 5 additions & 5 deletions src/model/TFLiteModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,15 +43,15 @@ void TFLiteModel::loadModel(const char *modelPath)
TF_DeleteBuffer(model_buffer);
}

void TFLiteModel::predict(unsigned char *image, int height, int width, int channels)
int * TFLiteModel::predict(unsigned char *image, int height, int width, int channels)
{

TFLiteModel::logger -> log("Performing inference");
if (graph == nullptr)
{
LoggingLevelWrapper level(LoggingLevel::ERROR);
TFLiteModel::logger -> log(level, "Graph not initialized");
return;
return nullptr;
}
// Set input tensor
TF_DataType dtype = TF_UINT8;
Expand All @@ -69,13 +69,13 @@ void TFLiteModel::predict(unsigned char *image, int height, int width, int chann
{
LoggingLevelWrapper level(LoggingLevel::ERROR);
TFLiteModel::logger -> log(level,"TF object session not initialized");
return;
return nullptr;
}
if (status == nullptr)
{
LoggingLevelWrapper level(LoggingLevel::ERROR);
TFLiteModel::logger -> log(level,"TF object status not initialized");
return;
return nullptr;
}

this -> logger -> log("Running session");
Expand All @@ -85,7 +85,7 @@ void TFLiteModel::predict(unsigned char *image, int height, int width, int chann
this -> logger -> log("Session run");
if (input_tensor)
TF_DeleteTensor(input_tensor);

return nullptr;
}
void TFLiteModel::deallocator(void *data, size_t length, void *arg)
{
Expand Down
2 changes: 1 addition & 1 deletion src/model/TFLiteModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class TFLiteModel : public IModel{
TFLiteModel(ThreadLogger * logger);
virtual ~TFLiteModel();
virtual void loadModel(const char *modelPath);
virtual void predict(unsigned char* image, int height, int width, int channels);
virtual int * predict(unsigned char* image, int height, int width, int channels);
};

#endif // MODEL_HPP
37 changes: 29 additions & 8 deletions src/model/TensorRTModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
class Logger : public nvinfer1::ILogger
{
ThreadLogger *logger;

public:void setLogger(ThreadLogger *logger)

public:
void setLogger(ThreadLogger *logger)
{
Logger::logger = logger;
}
Expand Down Expand Up @@ -90,12 +91,12 @@ void TensorRTModel::loadModel(const char *modelPath)
}
}

void TensorRTModel::predict(unsigned char *image, int height, int width, int channels)
int *TensorRTModel::predict(unsigned char *image, int height, int width, int channels)
{
if (engine == nullptr)
{
this->logger->log("Engine not initialized\n");
return;
return nullptr;
}
this->logger->log("Performing TensorRT inference...\n");
// Allocate GPU memory for the input and output buffers
Expand All @@ -117,12 +118,32 @@ void TensorRTModel::predict(unsigned char *image, int height, int width, int cha
float *cpu_output = new float[height * width * 7];
cudaMemcpy(cpu_output, gpu_output, sizeof(float) * height * width * 7, cudaMemcpyDeviceToHost);

// Clean up
//! TODO: This is possibly the image output from the model so it has to be returned
delete[] cpu_output;

cudaFree(gpu_input);
cudaFree(gpu_output);

int *max_indices = new int[height * width];
delete context;

// Parallelized post-processing using OpenMP
#pragma omp parallel for
for (int i = 0; i < height * width; ++i)
{
float max_value = cpu_output[i * 7];
int max_index = 0;
for (int j = 1; j < 7; ++j)
{
if (cpu_output[i * 7 + j] > max_value)
{
max_value = cpu_output[i * 7 + j];
max_index = j;
}
}
max_indices[i] = max_index;
}
// Clean up
//! TODO: This is possibly the image output from the model so it has to be returned


this->logger->log("TensorRT inference done!\n");
return max_indices;
}
2 changes: 1 addition & 1 deletion src/model/TensorRTModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class TensorRTModel : public IModel{
TensorRTModel(ThreadLogger * logger);
virtual ~TensorRTModel();
virtual void loadModel(const char *modelPath);
virtual void predict(unsigned char* image, int height, int width, int channels);
virtual int * predict(unsigned char* image, int height, int width, int channels);
};

#endif // TENSORRTMODEL_HPP
2 changes: 1 addition & 1 deletion src/pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Pipeline::Pipeline(){
this->logger = new ThreadLogger();
logger->log("Starting system pipeline...");
this->stateManager = new StateManager(logger);
this->stateManager->transitionTo(new IdlingState());
this->stateManager->transitionTo(new ProcessingState());
this->ioBridge = new IOBridge(logger, stateManager);
stateManager -> pushShutdown([this](){this -> stop();});
}
Expand Down
Loading