Skip to content

Commit 5b8c56a

Browse files
Merge pull request #95 from AxisCommunications/raii-inference
Follow RAII in Inference class
2 parents 3a061b0 + 9a309de commit 5b8c56a

File tree

4 files changed

+63
-84
lines changed

4 files changed

+63
-84
lines changed

src/acap_runtime.cpp

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -125,15 +125,8 @@ int RunServer(const string& address,
125125
}
126126
builder.RegisterService(&capture);
127127

128-
// Register inference service
129-
Inference inference;
130-
if (chipId > 0) {
131-
if (!inference.Init(_verbose, chipId, models, &capture)) {
132-
syslog(LOG_ERR, "Could not Init Inference Service");
133-
return EXIT_FAILURE;
134-
}
135-
builder.RegisterService(&inference);
136-
}
128+
Inference inference{_verbose, chipId, models, &capture};
129+
builder.RegisterService(&inference);
137130

138131
// Start server
139132
unique_ptr<Server> server(builder.BuildAndStart());

src/inference.cpp

Lines changed: 34 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -54,40 +54,14 @@ const char* const LAYOUTS[] = {"LAROD_TENSOR_LAYOUT_INVALID",
5454
"LAROD_TENSOR_LAYOUT_NCHW",
5555
"LAROD_TENSOR_LAYOUT_420SP"};
5656

57-
Inference::Inference() : _conn(nullptr), _chipId(LAROD_CHIP_INVALID), _verbose(false) {}
58-
59-
Inference::~Inference() {
60-
if (nullptr != _conn) {
61-
// Delete models
62-
TRACELOG << "Deleting loaded models:" << endl;
63-
larodError* error = nullptr;
64-
for (auto& [model_name, model] : _models) {
65-
TRACELOG << "- " << model_name << endl;
66-
if (!larodDeleteModel(_conn, model, &error)) {
67-
PrintError("Failed to delete model", error);
68-
larodClearError(&error);
69-
}
70-
}
71-
72-
// Disconnect from larod service
73-
TRACELOG << "Disconnecting from larod" << endl;
74-
if (!larodDisconnect(&_conn, &error)) {
75-
PrintError("Failed to disconnect", error);
76-
larodClearError(&error);
77-
}
78-
}
79-
80-
for (auto& [model_name, model] : _models) {
81-
larodDestroyModel(&model);
82-
}
83-
}
84-
85-
// Initialize inference
86-
bool Inference::Init(const bool verbose,
57+
Inference::Inference(const bool verbose,
8758
const uint64_t chipId,
8859
const vector<string>& models,
89-
Capture* captureService) {
90-
_verbose = verbose;
60+
Capture* captureService)
61+
: _verbose(verbose) {
62+
if (chipId <= 0)
63+
return;
64+
9165
larodError* error = nullptr;
9266

9367
_captureService = captureService;
@@ -96,14 +70,14 @@ bool Inference::Init(const bool verbose,
9670

9771
if (pthread_mutex_init(&_mtx, NULL) != 0) {
9872
ERRORLOG << "Init mutex FAILED" << endl;
99-
return false;
73+
throw runtime_error("Could not Init Inference Service");
10074
}
10175

10276
// Connect to larod service
10377
if (!larodConnect(&_conn, &error)) {
10478
PrintError("Connecting to larod FAILED", error);
10579
larodClearError(&error);
106-
return false;
80+
throw runtime_error("Could not Init Inference Service");
10781
}
10882

10983
// List available chip id:s
@@ -128,11 +102,35 @@ bool Inference::Init(const bool verbose,
128102
_models.clear();
129103
for (auto model : models) {
130104
if (!LoadModel(*_conn, model.c_str(), _chipId, LAROD_ACCESS_PRIVATE)) {
131-
return false;
105+
throw runtime_error("Could not Init Inference Service");
132106
}
133107
}
108+
}
134109

135-
return true;
110+
Inference::~Inference() {
111+
if (nullptr != _conn) {
112+
// Delete models
113+
TRACELOG << "Deleting loaded models:" << endl;
114+
larodError* error = nullptr;
115+
for (auto& [model_name, model] : _models) {
116+
TRACELOG << "- " << model_name << endl;
117+
if (!larodDeleteModel(_conn, model, &error)) {
118+
PrintError("Failed to delete model", error);
119+
larodClearError(&error);
120+
}
121+
}
122+
123+
// Disconnect from larod service
124+
TRACELOG << "Disconnecting from larod" << endl;
125+
if (!larodDisconnect(&_conn, &error)) {
126+
PrintError("Failed to disconnect", error);
127+
larodClearError(&error);
128+
}
129+
}
130+
131+
for (auto& [model_name, model] : _models) {
132+
larodDestroyModel(&model);
133+
}
136134
}
137135

138136
// Run inference on a single image

src/inference.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,12 @@ class Inference : public tensorflow::serving::PredictionService::Service {
2828
using Status = grpc::Status;
2929
using TensorProto = tensorflow::TensorProto;
3030

31-
Inference();
32-
~Inference();
33-
bool Init(const bool verbose,
31+
Inference(const bool verbose,
3432
const uint64_t chipId,
3533
const std::vector<std::string>& models,
3634
Capture* captureService);
35+
~Inference();
36+
3737
Status Predict(ServerContext* context,
3838
const PredictRequest* request,
3939
PredictResponse* response) override;
@@ -72,7 +72,7 @@ class Inference : public tensorflow::serving::PredictionService::Service {
7272
larodError*& error);
7373

7474
bool _verbose;
75-
larodConnection* _conn;
75+
larodConnection* _conn = nullptr;
7676
larodChip _chipId;
7777
std::map<std::string, larodModel*> _models;
7878
larodModel* _ppModel;

test/inference_unittest.cc

Lines changed: 23 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -339,25 +339,27 @@ TEST(InferenceUnittest, InitCpu) {
339339
}
340340

341341
const vector<string> models = {cpuModel1};
342-
Inference inference;
343-
ASSERT_TRUE(inference.Init(verbose, cpuChipId, models, &capture));
342+
Inference inference{verbose, cpuChipId, models, &capture};
343+
SUCCEED();
344344
}
345345

346346
TEST(InferenceUnittest, Init_Fail) {
347347
const bool verbose = get_verbose_status();
348348
const vector<string> models = {cpuModel1, "invalid"};
349-
350-
Inference inference;
351-
ASSERT_FALSE(inference.Init(verbose, cpuChipId, models, &capture));
349+
try {
350+
Inference inference{verbose, cpuChipId, models, &capture};
351+
FAIL();
352+
} catch (const runtime_error&) {
353+
SUCCEED();
354+
}
352355
}
353356

354357
TEST(InferenceUnittest, PredictCpuModel1Preload) {
355358
const bool verbose = get_verbose_status();
356359
const vector<string> models = {cpuModel1};
357360
shm_unlink(sharedFile);
358361

359-
Inference inference;
360-
ASSERT_TRUE(inference.Init(verbose, cpuChipId, models, &capture));
362+
Inference inference{verbose, cpuChipId, models, &capture};
361363
PredictModel1(inference, cpuModel1, imageFile1, 0.87890601, 0.58203125, true);
362364
PredictModel1(inference, cpuModel1, imageFile1, 0.87890601, 0.58203125, true);
363365
PredictModel1(inference, cpuModel1, imageFile1, 0.87890601, 0.58203125, true);
@@ -378,8 +380,7 @@ TEST(InferenceUnittest, PredictCpuModel1) {
378380
const vector<string> models = {};
379381
shm_unlink(sharedFile);
380382

381-
Inference inference;
382-
ASSERT_TRUE(inference.Init(verbose, cpuChipId, models, &capture));
383+
Inference inference{verbose, cpuChipId, models, &capture};
383384
PredictModel1(inference, cpuModel1, imageFile1, 0.87890601, 0.58203125, false);
384385
PredictModel1(inference, cpuModel1, imageFile1, 0.87890601, 0.58203125, false);
385386
PredictModel1(inference, cpuModel1, imageFile1, 0.87890601, 0.58203125, true);
@@ -400,8 +401,7 @@ TEST(InferenceUnittest, PredictCpuModel2) {
400401
const vector<string> models = {};
401402
shm_unlink(sharedFile);
402403

403-
Inference inference;
404-
ASSERT_TRUE(inference.Init(verbose, cpuChipId, models, &capture));
404+
Inference inference{verbose, cpuChipId, models, &capture};
405405
#ifdef __arm64__
406406
PredictModel2(inference, cpuModel2, imageFile1, 653, 168, false);
407407
PredictModel2(inference, cpuModel2, imageFile1, 653, 168, false);
@@ -428,8 +428,7 @@ TEST(InferenceUnittest, PredictCpuModel3) {
428428
const vector<string> models = {};
429429
shm_unlink(sharedFile);
430430

431-
Inference inference;
432-
ASSERT_TRUE(inference.Init(verbose, cpuChipId, models, &capture));
431+
Inference inference{verbose, cpuChipId, models, &capture};
433432
#ifdef __arm64__
434433
PredictModel3(inference, cpuModel3, imageFile1, 653, 190, false);
435434
PredictModel3(inference, cpuModel3, imageFile1, 653, 190, false);
@@ -456,8 +455,7 @@ TEST(InferenceUnittest, PredictCpuModelMix) {
456455
const vector<string> models = {};
457456
shm_unlink(sharedFile);
458457

459-
Inference inference;
460-
ASSERT_TRUE(inference.Init(verbose, cpuChipId, models, &capture));
458+
Inference inference{verbose, cpuChipId, models, &capture};
461459
#ifdef __arm64__
462460
PredictModel1(inference, cpuModel1, imageFile1, 0.87890601, 0.58203125, false);
463461
PredictModel2(inference, cpuModel2, imageFile1, 653, 168, false);
@@ -485,17 +483,15 @@ TEST(InferenceUnittest, InitDlpu) {
485483
const bool verbose = get_verbose_status();
486484
const vector<string> models = {cpuModel1};
487485

488-
Inference inference;
489-
ASSERT_TRUE(inference.Init(verbose, dlpuChipId, models, &capture));
486+
Inference inference{verbose, dlpuChipId, models, &capture};
490487
}
491488

492489
TEST(InferenceUnittest, PredictDlpuModel1Preload) {
493490
const bool verbose = get_verbose_status();
494491
const vector<string> models = {cpuModel1};
495492
shm_unlink(sharedFile);
496493

497-
Inference inference;
498-
ASSERT_TRUE(inference.Init(verbose, dlpuChipId, models, &capture));
494+
Inference inference{verbose, dlpuChipId, models, &capture};
499495
PredictModel1(inference, cpuModel1, imageFile1, 0.878906, 0.5, true);
500496
PredictModel1(inference, cpuModel1, imageFile1, 0.878906, 0.5, true);
501497
PredictModel1(inference, cpuModel1, imageFile1, 0.878906, 0.5, true);
@@ -513,8 +509,7 @@ TEST(InferenceUnittest, PredictDlpuModel1) {
513509
const vector<string> models = {};
514510
shm_unlink(sharedFile);
515511

516-
Inference inference;
517-
ASSERT_TRUE(inference.Init(verbose, dlpuChipId, models, &capture));
512+
Inference inference{verbose, dlpuChipId, models, &capture};
518513
PredictModel1(inference, cpuModel1, imageFile1, 0.878906, 0.5, false);
519514
PredictModel1(inference, cpuModel1, imageFile1, 0.878906, 0.5, false);
520515
PredictModel1(inference, cpuModel1, imageFile1, 0.878906, 0.5, true);
@@ -527,8 +522,7 @@ TEST(InferenceUnittest, PredictDlpuModel2) {
527522
const vector<string> models = {};
528523
shm_unlink(sharedFile);
529524

530-
Inference inference;
531-
ASSERT_TRUE(inference.Init(verbose, dlpuChipId, models, &capture));
525+
Inference inference{verbose, dlpuChipId, models, &capture};
532526
PredictModel2(inference, cpuModel2, imageFile1, 653, 166, false);
533527
PredictModel2(inference, cpuModel2, imageFile1, 653, 166, false);
534528
PredictModel2(inference, cpuModel2, imageFile1, 653, 166, false);
@@ -544,8 +538,7 @@ TEST(InferenceUnittest, DISABLED_PredictDlpuModel3)
544538
const vector<string> models = {};
545539
shm_unlink(sharedFile);
546540

547-
Inference inference;
548-
ASSERT_TRUE(inference.Init(verbose, dlpuChipId, models, &capture));
541+
Inference inference{verbose, dlpuChipId, models, &capture};
549542
PredictModel3(inference, cpuModel3, imageFile1, 653, 197, false);
550543
PredictModel3(inference, cpuModel3, imageFile1, 653, 197, false);
551544
PredictModel3(inference, cpuModel3, imageFile1, 653, 197, false);
@@ -557,17 +550,15 @@ TEST(InferenceUnittest, InitTpu) {
557550
const bool verbose = get_verbose_status();
558551
const vector<string> models = {tpuModel1};
559552

560-
Inference inference;
561-
ASSERT_TRUE(inference.Init(verbose, tpuChipId, models, &capture));
553+
Inference inference{verbose, tpuChipId, models, &capture};
562554
}
563555

564556
TEST(InferenceUnittest, PredictTpuModel1Preload) {
565557
const bool verbose = get_verbose_status();
566558
const vector<string> models = {tpuModel1};
567559
shm_unlink(sharedFile);
568560

569-
Inference inference;
570-
ASSERT_TRUE(inference.Init(verbose, tpuChipId, models, &capture));
561+
Inference inference{verbose, tpuChipId, models, &capture};
571562
PredictModel1(inference, tpuModel1, imageFile1, 0.878906, 0.5, true);
572563
PredictModel1(inference, tpuModel1, imageFile1, 0.878906, 0.5, true);
573564
PredictModel1(inference, tpuModel1, imageFile1, 0.878906, 0.5, true);
@@ -585,8 +576,7 @@ TEST(InferenceUnittest, PredictTpuModel1) {
585576
const vector<string> models = {};
586577
shm_unlink(sharedFile);
587578

588-
Inference inference;
589-
ASSERT_TRUE(inference.Init(verbose, tpuChipId, models, &capture));
579+
Inference inference{verbose, tpuChipId, models, &capture};
590580
PredictModel1(inference, tpuModel1, imageFile1, 0.878906, 0.5, false);
591581
PredictModel1(inference, tpuModel1, imageFile1, 0.878906, 0.5, false);
592582
PredictModel1(inference, tpuModel1, imageFile1, 0.878906, 0.5, true);
@@ -599,8 +589,7 @@ TEST(InferenceUnittest, PredictTpuModel2) {
599589
const vector<string> models = {};
600590
shm_unlink(sharedFile);
601591

602-
Inference inference;
603-
ASSERT_TRUE(inference.Init(verbose, tpuChipId, models, &capture));
592+
Inference inference{verbose, tpuChipId, models, &capture};
604593
PredictModel2(inference, tpuModel2, imageFile1, 653, 118, false);
605594
PredictModel2(inference, tpuModel2, imageFile1, 653, 118, false);
606595
PredictModel2(inference, tpuModel2, imageFile1, 653, 118, false);
@@ -613,8 +602,7 @@ TEST(InferenceUnittest, PredictTpuModel3) {
613602
const vector<string> models = {};
614603
shm_unlink(sharedFile);
615604

616-
Inference inference;
617-
ASSERT_TRUE(inference.Init(verbose, tpuChipId, models, &capture));
605+
Inference inference{verbose, tpuChipId, models, &capture};
618606
PredictModel3(inference, tpuModel3, imageFile1, 653, 197, false);
619607
PredictModel3(inference, tpuModel3, imageFile1, 653, 197, false);
620608
PredictModel3(inference, tpuModel3, imageFile1, 653, 197, false);

0 commit comments

Comments
 (0)