diff --git a/c/include/nnstreamer-tizen-internal.h b/c/include/nnstreamer-tizen-internal.h index a24632dc..dd53dc85 100644 --- a/c/include/nnstreamer-tizen-internal.h +++ b/c/include/nnstreamer-tizen-internal.h @@ -52,7 +52,7 @@ typedef struct { int invoke_async; /**< The sub-plugin must support asynchronous output to use this option. If set to TRUE, the sub-plugin can generate multiple outputs asynchronously per single input. Otherwise, only synchronous single-output is expected and async callback is ignored. */ ml_tensors_data_cb invoke_async_cb; /**< Callback function to be called when the sub-plugin generates an output asynchronously. This is only available when invoke_async is set to TRUE. */ void *invoke_async_pdata; /**< Private data to be passed to async callback. */ - int latency_mode; /**< 1 - log invoke latency, 0 (default) - do not log */ + int latency_mode; /**< True to enable latency log. */ } ml_single_preset; /** diff --git a/c/src/ml-api-inference-single.c b/c/src/ml-api-inference-single.c index 51eb1455..d31b8840 100644 --- a/c/src/ml-api-inference-single.c +++ b/c/src/ml-api-inference-single.c @@ -2099,7 +2099,7 @@ _ml_validate_model_file (const char *const *model, } /** @todo Make sure num_models is correct for each nnfw type */ - switch (*nnfw) { + switch ((int) *nnfw) { case ML_NNFW_TYPE_NNFW: case ML_NNFW_TYPE_TVM: case ML_NNFW_TYPE_ONNX_RUNTIME: diff --git a/c/src/ml-api-service-extension.c b/c/src/ml-api-service-extension.c index e8cecc20..7a964eec 100644 --- a/c/src/ml-api-service-extension.c +++ b/c/src/ml-api-service-extension.c @@ -245,6 +245,38 @@ _ml_extension_destroy_tensors_info (void *data) ml_tensors_info_destroy (info); } +/** + * @brief Internal function to parse common option from json. + */ +static void +_ml_extension_conf_parse_common (ml_service_s * mls, JsonObject * object) +{ + const gchar *value = NULL; + + g_return_if_fail (object != NULL); + + if (json_object_has_member (object, "input_queue_size")) { + value = json_object_get_string_member (object, "input_queue_size"); + + if (STR_IS_VALID (value)) + _ml_service_extension_set_information (mls, "input_queue_size", value); + } + + if (json_object_has_member (object, "max_input")) { + value = json_object_get_string_member (object, "max_input"); + + if (STR_IS_VALID (value)) + _ml_service_extension_set_information (mls, "max_input", value); + } + + if (json_object_has_member (object, "timeout")) { + value = json_object_get_string_member (object, "timeout"); + + if (STR_IS_VALID (value)) + _ml_service_extension_set_information (mls, "timeout", value); + } +} + /** * @brief Internal function to parse single-shot info from json. */ @@ -553,20 +585,21 @@ static int _ml_extension_conf_parse_json (ml_service_s * mls, JsonObject * object) { ml_extension_s *ext = (ml_extension_s *) mls->priv; + JsonObject *sub = NULL; int status; if (json_object_has_member (object, "single")) { - JsonObject *single = json_object_get_object_member (object, "single"); + sub = json_object_get_object_member (object, "single"); - status = _ml_extension_conf_parse_single (mls, single); + status = _ml_extension_conf_parse_single (mls, sub); if (status != ML_ERROR_NONE) return status; ext->type = ML_EXTENSION_TYPE_SINGLE; } else if (json_object_has_member (object, "pipeline")) { - JsonObject *pipe = json_object_get_object_member (object, "pipeline"); + sub = json_object_get_object_member (object, "pipeline"); - status = _ml_extension_conf_parse_pipeline (mls, pipe); + status = _ml_extension_conf_parse_pipeline (mls, sub); if (status != ML_ERROR_NONE) return status; @@ -576,6 +609,7 @@ _ml_extension_conf_parse_json (ml_service_s * mls, JsonObject * object) "Failed to parse configuration file, cannot get the valid type from configuration."); } + _ml_extension_conf_parse_common (mls, sub); return ML_ERROR_NONE; } @@ -804,7 +838,10 @@ _ml_service_extension_set_information (ml_service_s * mls, const char *name, { ml_extension_s *ext = (ml_extension_s *) mls->priv; - /* Check limitation of message queue and other options. */ + /** + * Check limitation of message queue and other options. + * When adding new value, you should fix _ml_extension_conf_parse_common() also. + */ if (g_ascii_strcasecmp (name, "input_queue_size") == 0 || g_ascii_strcasecmp (name, "max_input") == 0) { ext->max_input = (guint) g_ascii_strtoull (value, NULL, 10); diff --git a/c/src/ml-api-service-private.h b/c/src/ml-api-service-private.h index d61c3417..90cee4b0 100644 --- a/c/src/ml-api-service-private.h +++ b/c/src/ml-api-service-private.h @@ -29,10 +29,11 @@ extern "C" { * @brief Macro for the event types of machine learning service. * @todo TBU, need ACR later (update enum for ml-service event, see ml_service_event_cb) */ -#define ML_SERVICE_EVENT_MODEL_REGISTERED 2 -#define ML_SERVICE_EVENT_PIPELINE_REGISTERED 3 -#define ML_SERVICE_EVENT_REPLY 4 -#define ML_SERVICE_EVENT_LAUNCH 5 +#define ML_SERVICE_EVENT_CUSTOM (0x7f000000) +#define ML_SERVICE_EVENT_MODEL_REGISTERED (ML_SERVICE_EVENT_CUSTOM + 1) +#define ML_SERVICE_EVENT_PIPELINE_REGISTERED (ML_SERVICE_EVENT_CUSTOM + 2) +#define ML_SERVICE_EVENT_REPLY (ML_SERVICE_EVENT_CUSTOM + 3) +#define ML_SERVICE_EVENT_LAUNCH (ML_SERVICE_EVENT_CUSTOM + 4) /** * @brief Enumeration for ml-service type. diff --git a/c/src/ml-api-service.c b/c/src/ml-api-service.c index c3e281ba..907ec230 100644 --- a/c/src/ml-api-service.c +++ b/c/src/ml-api-service.c @@ -237,6 +237,12 @@ _ml_service_conf_parse_tensors_info (JsonNode * info_node, if (JSON_NODE_HOLDS_ARRAY (info_node)) { array = json_node_get_array (info_node); info.num_tensors = json_array_get_length (array); + + if (info.num_tensors > NNS_TENSOR_SIZE_LIMIT) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The array length of json for tensor information (%u) exceeds the max limit.", + info.num_tensors); + } } for (i = 0; i < info.num_tensors; i++) { diff --git a/tests/capi/unittest_capi_service_extension.cc b/tests/capi/unittest_capi_service_extension.cc index f3322ac3..9fdc10ad 100644 --- a/tests/capi/unittest_capi_service_extension.cc +++ b/tests/capi/unittest_capi_service_extension.cc @@ -391,19 +391,22 @@ _extension_test_imgclf (ml_service_h handle, gboolean is_pipeline) } #if defined(ENABLE_LLAMACPP) +#define LLAMACPP_TEST_MODEL "TinyStories-656K-Q2_K.gguf" +#define LLAMACPP_TEST_MODEL_URL \ + "https://huggingface.co/tensorblock/TinyStories-656K-GGUF" + /** * @brief Macro to skip testcase if model file is not ready. */ -#define skip_llamacpp_tc(tc_name) \ - do { \ - g_autofree gchar *model_file = _get_model_path ("llama-2-7b-chat.Q2_K.gguf"); \ - if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) { \ - g_autofree gchar *msg = g_strdup_printf ( \ - "Skipping '%s' due to missing model file. " \ - "Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF.", \ - tc_name); \ - GTEST_SKIP () << msg; \ - } \ +#define skip_llamacpp_tc(tc_name) \ + do { \ + g_autofree gchar *model_file = _get_model_path (LLAMACPP_TEST_MODEL); \ + if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) { \ + g_autofree gchar *msg = g_strdup_printf ( \ + "Skipping '%s' due to missing model file '%s'. Please download model file from '%s'.", \ + tc_name, LLAMACPP_TEST_MODEL, LLAMACPP_TEST_MODEL_URL); \ + GTEST_SKIP () << msg; \ + } \ } while (0) /** diff --git a/tests/capi/unittest_capi_service_offloading.cc b/tests/capi/unittest_capi_service_offloading.cc index 4e6fcb22..ac6ee9ba 100644 --- a/tests/capi/unittest_capi_service_offloading.cc +++ b/tests/capi/unittest_capi_service_offloading.cc @@ -300,7 +300,7 @@ TEST_F (MLOffloadingService, registerInvalidParam02_n) int status; g_autofree gchar *data = g_strdup ("fakesrc ! fakesink"); - gsize len = strlen (data); + gsize len = strlen (data) + 1; status = _ml_service_offloading_request_raw (NULL, "req_raw", data, len); EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status); diff --git a/tests/test_models/config/config_single_llamacpp.conf b/tests/test_models/config/config_single_llamacpp.conf index c841c3fa..4b1d937c 100644 --- a/tests/test_models/config/config_single_llamacpp.conf +++ b/tests/test_models/config/config_single_llamacpp.conf @@ -2,7 +2,7 @@ "single" : { "framework" : "llamacpp", - "model" : ["../tests/test_models/models/llama-2-7b-chat.Q2_K.gguf"], + "model" : ["../tests/test_models/models/TinyStories-656K-Q2_K.gguf"], "custom" : "num_predict:32", "invoke_dynamic" : "true", "invoke_async" : "false" diff --git a/tests/test_models/config/config_single_llamacpp_async.conf b/tests/test_models/config/config_single_llamacpp_async.conf index fa5672e8..881d85ad 100644 --- a/tests/test_models/config/config_single_llamacpp_async.conf +++ b/tests/test_models/config/config_single_llamacpp_async.conf @@ -2,7 +2,7 @@ "single" : { "framework" : "llamacpp", - "model" : ["../tests/test_models/models/llama-2-7b-chat.Q2_K.gguf"], + "model" : ["../tests/test_models/models/TinyStories-656K-Q2_K.gguf"], "custom" : "num_predict:32", "invoke_dynamic" : "true", "invoke_async" : "true"