diff --git a/EXAMPLES.md b/EXAMPLES.md index 794d7185..18018807 100644 --- a/EXAMPLES.md +++ b/EXAMPLES.md @@ -19,10 +19,10 @@ Smart contract: `registerDataManager` { "key": string (required,len=36), "name": string (required,gte=1,lte=100), - "opener_hash": string (required,len=64,hexadecimal), + "opener_checksum": string (required,len=64,hexadecimal), "opener_storage_address": string (required,url), "type": string (required,gte=1,lte=30), - "description_hash": string (required,len=64,hexadecimal), + "description_checksum": string (required,len=64,hexadecimal), "description_storage_address": string (required,url), "objective_key": string (omitempty,len=36), "permissions": (required){ @@ -36,7 +36,7 @@ Smart contract: `registerDataManager` ``` ##### Command peer example: ```bash -peer chaincode invoke -n mycc -c '{"Args":["registerDataManager","{\"key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"name\":\"liver slide\",\"opener_hash\":\"da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc\",\"opener_storage_address\":\"https://toto/dataManager/42234/opener\",\"type\":\"images\",\"description_hash\":\"8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee\",\"description_storage_address\":\"https://toto/dataManager/42234/description\",\"objective_key\":\"\",\"permissions\":{\"process\":{\"public\":true,\"authorized_ids\":[]}},\"metadata\":null}"]}' -C myc +peer chaincode invoke -n mycc -c '{"Args":["registerDataManager","{\"key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"name\":\"liver slide\",\"opener_checksum\":\"da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc\",\"opener_storage_address\":\"https://toto/dataManager/42234/opener\",\"type\":\"images\",\"description_checksum\":\"8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee\",\"description_storage_address\":\"https://toto/dataManager/42234/description\",\"objective_key\":\"\",\"permissions\":{\"process\":{\"public\":true,\"authorized_ids\":[]}},\"metadata\":null}"]}' -C myc ``` ##### Command output: ```json @@ -61,7 +61,7 @@ peer chaincode invoke -n mycc -c '{"Args":["queryDataManager","{\"key\":\"da1bb7 ```json { "description": { - "hash": "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee", + "checksum": "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee", "storage_address": "https://toto/dataManager/42234/description" }, "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", @@ -69,7 +69,7 @@ peer chaincode invoke -n mycc -c '{"Args":["queryDataManager","{\"key\":\"da1bb7 "name": "liver slide", "objective_key": "", "opener": { - "hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "storage_address": "https://toto/dataManager/42234/opener" }, "owner": "SampleOrg", @@ -114,10 +114,10 @@ Smart contract: `registerObjective` { "key": string (required,len=36), "name": string (required,gte=1,lte=100), - "description_hash": string (required,len=64,hexadecimal), + "description_checksum": string (required,len=64,hexadecimal), "description_storage_address": string (required,url), "metrics_name": string (required,gte=1,lte=100), - "metrics_hash": string (required,len=64,hexadecimal), + "metrics_checksum": string (required,len=64,hexadecimal), "metrics_storage_address": string (required,url), "test_dataset": (omitempty){ "data_manager_key": string (omitempty,len=36), @@ -134,7 +134,7 @@ Smart contract: `registerObjective` ``` ##### Command peer example: ```bash -peer chaincode invoke -n mycc -c '{"Args":["registerObjective","{\"key\":\"5c1d9cd1-c2c1-082d-de09-21b56d11030c\",\"name\":\"MSI classification\",\"description_hash\":\"5c1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379\",\"description_storage_address\":\"https://toto/objective/222/description\",\"metrics_name\":\"accuracy\",\"metrics_hash\":\"4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379\",\"metrics_storage_address\":\"https://toto/objective/222/metrics\",\"test_dataset\":{\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"bb1bb7c3-1f62-244c-0f3a-761cc1688042\",\"bb2bb7c3-1f62-244c-0f3a-761cc1688042\"]},\"permissions\":{\"process\":{\"public\":true,\"authorized_ids\":[]}},\"metadata\":null}"]}' -C myc +peer chaincode invoke -n mycc -c '{"Args":["registerObjective","{\"key\":\"5c1d9cd1-c2c1-082d-de09-21b56d11030c\",\"name\":\"MSI classification\",\"description_checksum\":\"5c1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379\",\"description_storage_address\":\"https://toto/objective/222/description\",\"metrics_name\":\"accuracy\",\"metrics_checksum\":\"4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379\",\"metrics_storage_address\":\"https://toto/objective/222/metrics\",\"test_dataset\":{\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"bb1bb7c3-1f62-244c-0f3a-761cc1688042\",\"bb2bb7c3-1f62-244c-0f3a-761cc1688042\"]},\"permissions\":{\"process\":{\"public\":true,\"authorized_ids\":[]}},\"metadata\":null}"]}' -C myc ``` ##### Command output: ```json @@ -150,9 +150,9 @@ Smart contract: `registerAlgo` { "key": string (required,len=36), "name": string (required,gte=1,lte=100), - "hash": string (required,len=64,hexadecimal), + "checksum": string (required,len=64,hexadecimal), "storage_address": string (required,url), - "description_hash": string (required,len=64,hexadecimal), + "description_checksum": string (required,len=64,hexadecimal), "description_storage_address": string (required,url), "permissions": (required){ "process": (required){ @@ -165,7 +165,7 @@ Smart contract: `registerAlgo` ``` ##### Command peer example: ```bash -peer chaincode invoke -n mycc -c '{"Args":["registerAlgo","{\"key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"name\":\"hog + svm\",\"hash\":\"fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc\",\"storage_address\":\"https://toto/algo/222/algo\",\"description_hash\":\"e2dbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dca\",\"description_storage_address\":\"https://toto/algo/222/description\",\"permissions\":{\"process\":{\"public\":true,\"authorized_ids\":[]}},\"metadata\":null}"]}' -C myc +peer chaincode invoke -n mycc -c '{"Args":["registerAlgo","{\"key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"name\":\"hog + svm\",\"checksum\":\"fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc\",\"storage_address\":\"https://toto/algo/222/algo\",\"description_checksum\":\"e2dbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dca\",\"description_storage_address\":\"https://toto/algo/222/description\",\"permissions\":{\"process\":{\"public\":true,\"authorized_ids\":[]}},\"metadata\":null}"]}' -C myc ``` ##### Command output: ```json @@ -207,7 +207,7 @@ peer chaincode query -n mycc -c '{"Args":["queryDataManagers"]}' -C myc [ { "description": { - "hash": "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee", + "checksum": "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee", "storage_address": "https://toto/dataManager/42234/description" }, "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", @@ -215,7 +215,7 @@ peer chaincode query -n mycc -c '{"Args":["queryDataManagers"]}' -C myc "name": "liver slide", "objective_key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "opener": { - "hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "storage_address": "https://toto/dataManager/42234/opener" }, "owner": "SampleOrg", @@ -277,13 +277,13 @@ peer chaincode query -n mycc -c '{"Args":["queryObjectives"]}' -C myc [ { "description": { - "hash": "5c1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "5c1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/description" }, "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metadata": {}, "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "name": "accuracy", "storage_address": "https://toto/objective/222/metrics" }, @@ -318,7 +318,7 @@ Smart contract: `createTraintuple` "in_models": [string] (omitempty,dive,len=36), "data_manager_key": string (required,len=36), "data_sample_keys": [string] (required,unique,gt=0,dive,len=36), - "compute_plan_id": string (required_with=Rank), + "compute_plan_key": string (required_with=Rank), "rank": string (), "tag": string (omitempty,lte=64), "metadata": map (lte=100,dive,keys,lte=50,endkeys,lte=100), @@ -326,7 +326,7 @@ Smart contract: `createTraintuple` ``` ##### Command peer example: ```bash -peer chaincode invoke -n mycc -c '{"Args":["createTraintuple","{\"key\":\"b0289ab8-3a71-f01e-2b72-0259a6452244\",\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"in_models\":[],\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa1bb7c3-1f62-244c-0f3a-761cc1688042\",\"aa2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"compute_plan_id\":\"\",\"rank\":\"\",\"tag\":\"\",\"metadata\":null}"]}' -C myc +peer chaincode invoke -n mycc -c '{"Args":["createTraintuple","{\"key\":\"b0289ab8-3a71-f01e-2b72-0259a6452244\",\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"in_models\":[],\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa1bb7c3-1f62-244c-0f3a-761cc1688042\",\"aa2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"compute_plan_key\":\"\",\"rank\":\"\",\"tag\":\"\",\"metadata\":null}"]}' -C myc ``` ##### Command output: ```json @@ -345,7 +345,7 @@ Smart contract: `createTraintuple` "in_models": [string] (omitempty,dive,len=36), "data_manager_key": string (required,len=36), "data_sample_keys": [string] (required,unique,gt=0,dive,len=36), - "compute_plan_id": string (required_with=Rank), + "compute_plan_key": string (required_with=Rank), "rank": string (), "tag": string (omitempty,lte=64), "metadata": map (lte=100,dive,keys,lte=50,endkeys,lte=100), @@ -353,7 +353,7 @@ Smart contract: `createTraintuple` ``` ##### Command peer example: ```bash -peer chaincode invoke -n mycc -c '{"Args":["createTraintuple","{\"key\":\"bbb89ab8-3a71-f01e-2b72-0259a6452244\",\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"in_models\":[\"b0289ab8-3a71-f01e-2b72-0259a6452244\"],\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa1bb7c3-1f62-244c-0f3a-761cc1688042\",\"aa2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"compute_plan_id\":\"\",\"rank\":\"\",\"tag\":\"\",\"metadata\":null}"]}' -C myc +peer chaincode invoke -n mycc -c '{"Args":["createTraintuple","{\"key\":\"bbb89ab8-3a71-f01e-2b72-0259a6452244\",\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"in_models\":[\"b0289ab8-3a71-f01e-2b72-0259a6452244\"],\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa1bb7c3-1f62-244c-0f3a-761cc1688042\",\"aa2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"compute_plan_key\":\"\",\"rank\":\"\",\"tag\":\"\",\"metadata\":null}"]}' -C myc ``` ##### Command output: ```json @@ -386,21 +386,21 @@ peer chaincode invoke -n mycc -c '{"Args":["queryFilter","{\"indexName\":\"train [ { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", "metadata": {}, - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "worker": "SampleOrg" }, "in_models": null, @@ -437,21 +437,21 @@ peer chaincode invoke -n mycc -c '{"Args":["logStartTrain","{\"key\":\"b0289ab8- ```json { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", "metadata": {}, - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "worker": "SampleOrg" }, "in_models": null, @@ -480,34 +480,34 @@ Smart contract: `logSuccessTrain` "log": string (lte=200), "out_model": (required){ "key": string (required,len=36), - "hash": string (required,len=64,hexadecimal), + "checksum": string (required,len=64,hexadecimal), "storage_address": string (required), }, } ``` ##### Command peer example: ```bash -peer chaincode invoke -n mycc -c '{"Args":["logSuccessTrain","{\"key\":\"b0289ab8-3a71-f01e-2b72-0259a6452244\",\"log\":\"no error, ah ah ah\",\"out_model\":{\"key\":\"eedbb7c3-1f62-244c-0f3a-761cc1688042\",\"hash\":\"eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed\",\"storage_address\":\"https://substrabac/model/toto\"}}"]}' -C myc +peer chaincode invoke -n mycc -c '{"Args":["logSuccessTrain","{\"key\":\"b0289ab8-3a71-f01e-2b72-0259a6452244\",\"log\":\"no error, ah ah ah\",\"out_model\":{\"key\":\"eedbb7c3-1f62-244c-0f3a-761cc1688042\",\"checksum\":\"eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed\",\"storage_address\":\"https://substrabac/model/toto\"}}"]}' -C myc ``` ##### Command output: ```json { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", "metadata": {}, - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "worker": "SampleOrg" }, "in_models": null, @@ -515,7 +515,7 @@ peer chaincode invoke -n mycc -c '{"Args":["logSuccessTrain","{\"key\":\"b0289ab "log": "no error, ah ah ah", "metadata": {}, "out_model": { - "hash": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", + "checksum": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", "key": "eedbb7c3-1f62-244c-0f3a-761cc1688042", "storage_address": "https://substrabac/model/toto" }, @@ -547,21 +547,21 @@ peer chaincode invoke -n mycc -c '{"Args":["queryTraintuple","{\"key\":\"b0289ab ```json { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", "metadata": {}, - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "worker": "SampleOrg" }, "in_models": null, @@ -569,7 +569,7 @@ peer chaincode invoke -n mycc -c '{"Args":["queryTraintuple","{\"key\":\"b0289ab "log": "no error, ah ah ah", "metadata": {}, "out_model": { - "hash": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", + "checksum": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", "key": "eedbb7c3-1f62-244c-0f3a-761cc1688042", "storage_address": "https://substrabac/model/toto" }, @@ -678,21 +678,21 @@ peer chaincode invoke -n mycc -c '{"Args":["queryFilter","{\"indexName\":\"testt [ { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": true, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "bb1bb7c3-1f62-244c-0f3a-761cc1688042", "bb2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0, "worker": "SampleOrg" }, @@ -702,7 +702,7 @@ peer chaincode invoke -n mycc -c '{"Args":["queryFilter","{\"indexName\":\"testt "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -714,21 +714,21 @@ peer chaincode invoke -n mycc -c '{"Args":["queryFilter","{\"indexName\":\"testt }, { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": false, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0, "worker": "SampleOrg" }, @@ -738,7 +738,7 @@ peer chaincode invoke -n mycc -c '{"Args":["queryFilter","{\"indexName\":\"testt "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -767,21 +767,21 @@ peer chaincode invoke -n mycc -c '{"Args":["logStartTest","{\"key\":\"bbbada11-5 ```json { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": true, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "bb1bb7c3-1f62-244c-0f3a-761cc1688042", "bb2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0, "worker": "SampleOrg" }, @@ -791,7 +791,7 @@ peer chaincode invoke -n mycc -c '{"Args":["logStartTest","{\"key\":\"bbbada11-5 "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -821,21 +821,21 @@ peer chaincode invoke -n mycc -c '{"Args":["logSuccessTest","{\"key\":\"bbbada11 ```json { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": true, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "bb1bb7c3-1f62-244c-0f3a-761cc1688042", "bb2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0.9, "worker": "SampleOrg" }, @@ -845,7 +845,7 @@ peer chaincode invoke -n mycc -c '{"Args":["logSuccessTest","{\"key\":\"bbbada11 "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -873,21 +873,21 @@ peer chaincode query -n mycc -c '{"Args":["queryTesttuple","{\"key\":\"bbbada11- ```json { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": true, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "bb1bb7c3-1f62-244c-0f3a-761cc1688042", "bb2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0.9, "worker": "SampleOrg" }, @@ -897,7 +897,7 @@ peer chaincode query -n mycc -c '{"Args":["queryTesttuple","{\"key\":\"bbbada11- "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -918,21 +918,21 @@ peer chaincode query -n mycc -c '{"Args":["queryTesttuples"]}' -C myc [ { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": false, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0, "worker": "SampleOrg" }, @@ -942,7 +942,7 @@ peer chaincode query -n mycc -c '{"Args":["queryTesttuples"]}' -C myc "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -954,21 +954,21 @@ peer chaincode query -n mycc -c '{"Args":["queryTesttuples"]}' -C myc }, { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": true, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "bb1bb7c3-1f62-244c-0f3a-761cc1688042", "bb2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0.9, "worker": "SampleOrg" }, @@ -978,7 +978,7 @@ peer chaincode query -n mycc -c '{"Args":["queryTesttuples"]}' -C myc "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -990,21 +990,21 @@ peer chaincode query -n mycc -c '{"Args":["queryTesttuples"]}' -C myc }, { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": true, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "bb1bb7c3-1f62-244c-0f3a-761cc1688042", "bb2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0, "worker": "SampleOrg" }, @@ -1014,7 +1014,7 @@ peer chaincode query -n mycc -c '{"Args":["queryTesttuples"]}' -C myc "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -1045,21 +1045,21 @@ peer chaincode query -n mycc -c '{"Args":["queryModelDetails","{\"key\":\"b0289a "non_certified_testtuples": [ { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": false, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0, "worker": "SampleOrg" }, @@ -1069,7 +1069,7 @@ peer chaincode query -n mycc -c '{"Args":["queryModelDetails","{\"key\":\"b0289a "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -1082,21 +1082,21 @@ peer chaincode query -n mycc -c '{"Args":["queryModelDetails","{\"key\":\"b0289a ], "testtuple": { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, "certified": true, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "bb1bb7c3-1f62-244c-0f3a-761cc1688042", "bb2bb7c3-1f62-244c-0f3a-761cc1688042" ], - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "perf": 0.9, "worker": "SampleOrg" }, @@ -1106,7 +1106,7 @@ peer chaincode query -n mycc -c '{"Args":["queryModelDetails","{\"key\":\"b0289a "objective": { "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/metrics" } }, @@ -1118,21 +1118,21 @@ peer chaincode query -n mycc -c '{"Args":["queryModelDetails","{\"key\":\"b0289a }, "traintuple": { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", "metadata": {}, - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "worker": "SampleOrg" }, "in_models": null, @@ -1140,7 +1140,7 @@ peer chaincode query -n mycc -c '{"Args":["queryModelDetails","{\"key\":\"b0289a "log": "no error, ah ah ah", "metadata": {}, "out_model": { - "hash": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", + "checksum": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", "key": "eedbb7c3-1f62-244c-0f3a-761cc1688042", "storage_address": "https://substrabac/model/toto" }, @@ -1167,21 +1167,21 @@ peer chaincode query -n mycc -c '{"Args":["queryModels"]}' -C myc { "traintuple": { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", "metadata": {}, - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "worker": "SampleOrg" }, "in_models": null, @@ -1189,7 +1189,7 @@ peer chaincode query -n mycc -c '{"Args":["queryModels"]}' -C myc "log": "no error, ah ah ah", "metadata": {}, "out_model": { - "hash": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", + "checksum": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", "key": "eedbb7c3-1f62-244c-0f3a-761cc1688042", "storage_address": "https://substrabac/model/toto" }, @@ -1207,26 +1207,26 @@ peer chaincode query -n mycc -c '{"Args":["queryModels"]}' -C myc { "traintuple": { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, - "compute_plan_id": "", + "compute_plan_key": "", "creator": "SampleOrg", "dataset": { - "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", - "keys": [ + "data_sample_keys": [ "aa1bb7c3-1f62-244c-0f3a-761cc1688042", "aa2bb7c3-1f62-244c-0f3a-761cc1688042" ], + "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", "metadata": {}, - "opener_hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "opener_checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "worker": "SampleOrg" }, "in_models": [ { - "hash": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", + "checksum": "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed", "key": "b0289ab8-3a71-f01e-2b72-0259a6452244", "storage_address": "https://substrabac/model/toto", "traintuple_key": "b0289ab8-3a71-f01e-2b72-0259a6452244" @@ -1288,7 +1288,7 @@ peer chaincode query -n mycc -c '{"Args":["queryDataset","{\"key\":\"da1bb7c3-1f ```json { "description": { - "hash": "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee", + "checksum": "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee", "storage_address": "https://toto/dataManager/42234/description" }, "key": "da1bb7c3-1f62-244c-0f3a-761cc1688042", @@ -1296,7 +1296,7 @@ peer chaincode query -n mycc -c '{"Args":["queryDataset","{\"key\":\"da1bb7c3-1f "name": "liver slide", "objective_key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "opener": { - "hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "storage_address": "https://toto/dataManager/42234/opener" }, "owner": "SampleOrg", @@ -1367,7 +1367,7 @@ peer chaincode query -n mycc -c '{"Args":["queryDataset","{\"key\":\"38a320b2-a6 ```json { "description": { - "hash": "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee", + "checksum": "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee", "storage_address": "https://toto/dataManager/42234/description" }, "key": "38a320b2-a67c-8003-cc74-8d6666534f2b", @@ -1375,7 +1375,7 @@ peer chaincode query -n mycc -c '{"Args":["queryDataset","{\"key\":\"38a320b2-a6 "name": "liver slide", "objective_key": "", "opener": { - "hash": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", "storage_address": "https://toto/dataManager/42234/opener" }, "owner": "SampleOrg", @@ -1400,7 +1400,7 @@ Smart contract: `createComputePlan` { "tag": string (omitempty,lte=64), "metadata": map (lte=100,dive,keys,lte=50,endkeys,lte=100), - "compute_plan_id": string (required,len=36), + "key": string (required,len=36), "traintuples": (omitempty) [{ "key": string (required,len=36), "data_manager_key": string (required,len=36), @@ -1450,7 +1450,7 @@ Smart contract: `createComputePlan` ``` ##### Command peer example: ```bash -peer chaincode invoke -n mycc -c '{"Args":["createComputePlan","{\"clean_models\":false,\"tag\":\"a tag is simply a string\",\"metadata\":null,\"compute_plan_id\":\"00000000-50f6-26d3-fa86-1bf6387e3896\",\"traintuples\":[{\"key\":\"11000000-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa1bb7c3-1f62-244c-0f3a-761cc1688042\"],\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"id\":\"firstTraintupleID\",\"in_models_ids\":null,\"tag\":\"\",\"metadata\":null},{\"key\":\"22000000-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"id\":\"secondTraintupleID\",\"in_models_ids\":[\"firstTraintupleID\"],\"tag\":\"\",\"metadata\":null}],\"aggregatetuples\":null,\"composite_traintuples\":null,\"testtuples\":[{\"key\":\"11000033-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"bb1bb7c3-1f62-244c-0f3a-761cc1688042\",\"bb2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"objective_key\":\"5c1d9cd1-c2c1-082d-de09-21b56d11030c\",\"tag\":\"\",\"metadata\":null,\"traintuple_id\":\"secondTraintupleID\"}]}"]}' -C myc +peer chaincode invoke -n mycc -c '{"Args":["createComputePlan","{\"clean_models\":false,\"tag\":\"a tag is simply a string\",\"metadata\":null,\"key\":\"00000000-50f6-26d3-fa86-1bf6387e3896\",\"traintuples\":[{\"key\":\"11000000-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa1bb7c3-1f62-244c-0f3a-761cc1688042\"],\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"id\":\"firstTraintupleID\",\"in_models_ids\":null,\"tag\":\"\",\"metadata\":null},{\"key\":\"22000000-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"id\":\"secondTraintupleID\",\"in_models_ids\":[\"firstTraintupleID\"],\"tag\":\"\",\"metadata\":null}],\"aggregatetuples\":null,\"composite_traintuples\":null,\"testtuples\":[{\"key\":\"11000033-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"bb1bb7c3-1f62-244c-0f3a-761cc1688042\",\"bb2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"objective_key\":\"5c1d9cd1-c2c1-082d-de09-21b56d11030c\",\"tag\":\"\",\"metadata\":null,\"traintuple_id\":\"secondTraintupleID\"}]}"]}' -C myc ``` ##### Command output: ```json @@ -1458,12 +1458,12 @@ peer chaincode invoke -n mycc -c '{"Args":["createComputePlan","{\"clean_models\ "aggregatetuple_keys": null, "clean_models": false, "composite_traintuple_keys": null, - "compute_plan_id": "00000000-50f6-26d3-fa86-1bf6387e3896", "done_count": 0, "id_to_key": { "firstTraintupleID": "11000000-50f6-26d3-fa86-1bf6387e3896", "secondTraintupleID": "22000000-50f6-26d3-fa86-1bf6387e3896" }, + "key": "00000000-50f6-26d3-fa86-1bf6387e3896", "metadata": {}, "status": "todo", "tag": "a tag is simply a string", @@ -1483,7 +1483,7 @@ Smart contract: `updateComputePlan` ##### JSON Inputs: ```go { - "compute_plan_id": string (required,len=36), + "key": string (required,len=36), "traintuples": (omitempty) [{ "key": string (required,len=36), "data_manager_key": string (required,len=36), @@ -1533,7 +1533,7 @@ Smart contract: `updateComputePlan` ``` ##### Command peer example: ```bash -peer chaincode invoke -n mycc -c '{"Args":["updateComputePlan","{\"compute_plan_id\":\"00000000-50f6-26d3-fa86-1bf6387e3896\",\"traintuples\":[{\"key\":\"33000000-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa1bb7c3-1f62-244c-0f3a-761cc1688042\"],\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"id\":\"thirdTraintupleID\",\"in_models_ids\":[\"firstTraintupleID\",\"secondTraintupleID\"],\"tag\":\"\",\"metadata\":null}],\"aggregatetuples\":null,\"composite_traintuples\":null,\"testtuples\":[{\"key\":\"22000033-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"bb1bb7c3-1f62-244c-0f3a-761cc1688042\",\"bb2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"objective_key\":\"5c1d9cd1-c2c1-082d-de09-21b56d11030c\",\"tag\":\"\",\"metadata\":null,\"traintuple_id\":\"thirdTraintupleID\"}]}"]}' -C myc +peer chaincode invoke -n mycc -c '{"Args":["updateComputePlan","{\"key\":\"00000000-50f6-26d3-fa86-1bf6387e3896\",\"traintuples\":[{\"key\":\"33000000-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"aa1bb7c3-1f62-244c-0f3a-761cc1688042\"],\"algo_key\":\"fd1bb7c3-1f62-244c-0f3a-761cc1688042\",\"id\":\"thirdTraintupleID\",\"in_models_ids\":[\"firstTraintupleID\",\"secondTraintupleID\"],\"tag\":\"\",\"metadata\":null}],\"aggregatetuples\":null,\"composite_traintuples\":null,\"testtuples\":[{\"key\":\"22000033-50f6-26d3-fa86-1bf6387e3896\",\"data_manager_key\":\"da1bb7c3-1f62-244c-0f3a-761cc1688042\",\"data_sample_keys\":[\"bb1bb7c3-1f62-244c-0f3a-761cc1688042\",\"bb2bb7c3-1f62-244c-0f3a-761cc1688042\"],\"objective_key\":\"5c1d9cd1-c2c1-082d-de09-21b56d11030c\",\"tag\":\"\",\"metadata\":null,\"traintuple_id\":\"thirdTraintupleID\"}]}"]}' -C myc ``` ##### Command output: ```json @@ -1541,11 +1541,11 @@ peer chaincode invoke -n mycc -c '{"Args":["updateComputePlan","{\"compute_plan_ "aggregatetuple_keys": null, "clean_models": false, "composite_traintuple_keys": null, - "compute_plan_id": "00000000-50f6-26d3-fa86-1bf6387e3896", "done_count": 0, "id_to_key": { "thirdTraintupleID": "33000000-50f6-26d3-fa86-1bf6387e3896" }, + "key": "00000000-50f6-26d3-fa86-1bf6387e3896", "metadata": {}, "status": "todo", "tag": "a tag is simply a string", @@ -1580,13 +1580,13 @@ peer chaincode invoke -n mycc -c '{"Args":["queryObjectiveLeaderboard","{\"objec { "objective": { "description": { - "hash": "5c1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "5c1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "storage_address": "https://toto/objective/222/description" }, "key": "5c1d9cd1-c2c1-082d-de09-21b56d11030c", "metadata": {}, "metrics": { - "hash": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", + "checksum": "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379", "name": "accuracy", "storage_address": "https://toto/objective/222/metrics" }, @@ -1611,7 +1611,8 @@ peer chaincode invoke -n mycc -c '{"Args":["queryObjectiveLeaderboard","{\"objec "testtuples": [ { "algo": { - "hash": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "checksum": "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc", + "key": "fd1bb7c3-1f62-244c-0f3a-761cc1688042", "name": "hog + svm", "storage_address": "https://toto/algo/222/algo" }, @@ -1643,9 +1644,9 @@ peer chaincode invoke -n mycc -c '{"Args":["queryComputePlan","{\"key\":\"000000 "aggregatetuple_keys": null, "clean_models": false, "composite_traintuple_keys": null, - "compute_plan_id": "00000000-50f6-26d3-fa86-1bf6387e3896", "done_count": 0, "id_to_key": {}, + "key": "00000000-50f6-26d3-fa86-1bf6387e3896", "metadata": {}, "status": "todo", "tag": "a tag is simply a string", @@ -1672,9 +1673,9 @@ peer chaincode invoke -n mycc -c '{"Args":["queryComputePlans"]}' -C myc "aggregatetuple_keys": null, "clean_models": false, "composite_traintuple_keys": null, - "compute_plan_id": "00000000-50f6-26d3-fa86-1bf6387e3896", "done_count": 0, "id_to_key": {}, + "key": "00000000-50f6-26d3-fa86-1bf6387e3896", "metadata": {}, "status": "todo", "tag": "a tag is simply a string", @@ -1710,9 +1711,9 @@ peer chaincode invoke -n mycc -c '{"Args":["cancelComputePlan","{\"key\":\"00000 "aggregatetuple_keys": null, "clean_models": false, "composite_traintuple_keys": null, - "compute_plan_id": "00000000-50f6-26d3-fa86-1bf6387e3896", "done_count": 0, "id_to_key": {}, + "key": "00000000-50f6-26d3-fa86-1bf6387e3896", "metadata": {}, "status": "canceled", "tag": "a tag is simply a string", diff --git a/chaincode/algo.go b/chaincode/algo.go index 8f1a4303..ac02588b 100644 --- a/chaincode/algo.go +++ b/chaincode/algo.go @@ -35,10 +35,10 @@ func (algo *Algo) Set(db *LedgerDB, inp inputAlgo) (err error) { algo.Key = inp.Key algo.AssetType = AlgoType algo.Name = inp.Name - algo.Hash = inp.Hash + algo.Checksum = inp.Checksum algo.StorageAddress = inp.StorageAddress - algo.Description = &HashDress{ - Hash: inp.DescriptionHash, + algo.Description = &ChecksumAddress{ + Checksum: inp.DescriptionChecksum, StorageAddress: inp.DescriptionStorageAddress, } algo.Owner = owner diff --git a/chaincode/algo_aggregate.go b/chaincode/algo_aggregate.go index 2458d579..76b53293 100644 --- a/chaincode/algo_aggregate.go +++ b/chaincode/algo_aggregate.go @@ -35,10 +35,10 @@ func (algo *AggregateAlgo) Set(db *LedgerDB, inp inputAggregateAlgo) (err error) algo.Key = inp.Key algo.AssetType = AggregateAlgoType algo.Name = inp.Name - algo.Hash = inp.Hash + algo.Checksum = inp.Checksum algo.StorageAddress = inp.StorageAddress - algo.Description = &HashDress{ - Hash: inp.DescriptionHash, + algo.Description = &ChecksumAddress{ + Checksum: inp.DescriptionChecksum, StorageAddress: inp.DescriptionStorageAddress, } algo.Owner = owner diff --git a/chaincode/algo_aggregate_test.go b/chaincode/algo_aggregate_test.go index 2a89970a..6ee90d28 100644 --- a/chaincode/algo_aggregate_test.go +++ b/chaincode/algo_aggregate_test.go @@ -28,12 +28,12 @@ func TestAggregateAlgo(t *testing.T) { // Add algo with invalid field inpAlgo := inputAggregateAlgo{ inputAlgo: inputAlgo{ - DescriptionHash: "aaa", + DescriptionChecksum: "aaa", }, } args := inpAlgo.createDefault() resp := mockStub.MockInvoke("42", args) - assert.EqualValuesf(t, 400, resp.Status, "when adding algo with invalid hash, status %d and message %s", resp.Status, resp.Message) + assert.EqualValuesf(t, 400, resp.Status, "when adding algo with invalid checksum, status %d and message %s", resp.Status, resp.Message) // Properly add algo resp, tt := registerItem(t, *mockStub, "aggregateAlgo") @@ -55,12 +55,12 @@ func TestAggregateAlgo(t *testing.T) { outputAlgo: outputAlgo{ Key: algoKey, Name: inpAlgo.Name, - Content: &HashDress{ - Hash: inpAlgo.Hash, + Content: &ChecksumAddress{ + Checksum: inpAlgo.Checksum, StorageAddress: inpAlgo.StorageAddress, }, - Description: &HashDress{ - Hash: inpAlgo.DescriptionHash, + Description: &ChecksumAddress{ + Checksum: inpAlgo.DescriptionChecksum, StorageAddress: inpAlgo.DescriptionStorageAddress, }, Owner: worker, diff --git a/chaincode/algo_composite.go b/chaincode/algo_composite.go index af0d27c6..eea55052 100644 --- a/chaincode/algo_composite.go +++ b/chaincode/algo_composite.go @@ -35,10 +35,10 @@ func (algo *CompositeAlgo) Set(db *LedgerDB, inp inputCompositeAlgo) (err error) algo.Key = inp.Key algo.AssetType = CompositeAlgoType algo.Name = inp.Name - algo.Hash = inp.Hash + algo.Checksum = inp.Checksum algo.StorageAddress = inp.StorageAddress - algo.Description = &HashDress{ - Hash: inp.DescriptionHash, + algo.Description = &ChecksumAddress{ + Checksum: inp.DescriptionChecksum, StorageAddress: inp.DescriptionStorageAddress, } algo.Owner = owner diff --git a/chaincode/algo_composite_test.go b/chaincode/algo_composite_test.go index f3fe4623..67b88f73 100644 --- a/chaincode/algo_composite_test.go +++ b/chaincode/algo_composite_test.go @@ -28,12 +28,12 @@ func TestCompositeAlgo(t *testing.T) { // Add algo with invalid field inpAlgo := inputCompositeAlgo{ inputAlgo: inputAlgo{ - DescriptionHash: "aaa", + DescriptionChecksum: "aaa", }, } args := inpAlgo.createDefault() resp := mockStub.MockInvoke("42", args) - assert.EqualValuesf(t, 400, resp.Status, "when adding algo with invalid hash, status %d and message %s", resp.Status, resp.Message) + assert.EqualValuesf(t, 400, resp.Status, "when adding algo with invalid checksum, status %d and message %s", resp.Status, resp.Message) // Properly add algo resp, tt := registerItem(t, *mockStub, "compositeAlgo") @@ -55,12 +55,12 @@ func TestCompositeAlgo(t *testing.T) { outputAlgo: outputAlgo{ Key: algoKey, Name: inpAlgo.Name, - Content: &HashDress{ - Hash: inpAlgo.Hash, + Content: &ChecksumAddress{ + Checksum: inpAlgo.Checksum, StorageAddress: inpAlgo.StorageAddress, }, - Description: &HashDress{ - Hash: inpAlgo.DescriptionHash, + Description: &ChecksumAddress{ + Checksum: inpAlgo.DescriptionChecksum, StorageAddress: inpAlgo.DescriptionStorageAddress, }, Owner: worker, diff --git a/chaincode/algo_test.go b/chaincode/algo_test.go index 5d209016..00d305d4 100644 --- a/chaincode/algo_test.go +++ b/chaincode/algo_test.go @@ -27,11 +27,11 @@ func TestAlgo(t *testing.T) { // Add algo with invalid field inpAlgo := inputAlgo{ - DescriptionHash: "aaa", + DescriptionChecksum: "aaa", } args := inpAlgo.createDefault() resp := mockStub.MockInvoke("42", args) - assert.EqualValuesf(t, 400, resp.Status, "when adding algo with invalid hash, status %d and message %s", resp.Status, resp.Message) + assert.EqualValuesf(t, 400, resp.Status, "when adding algo with invalid checksum, status %d and message %s", resp.Status, resp.Message) // Properly add algo resp, tt := registerItem(t, *mockStub, "algo") @@ -51,12 +51,12 @@ func TestAlgo(t *testing.T) { expectedAlgo := outputAlgo{ Key: algoKey, Name: inpAlgo.Name, - Content: &HashDress{ - Hash: inpAlgo.Hash, + Content: &ChecksumAddress{ + Checksum: inpAlgo.Checksum, StorageAddress: inpAlgo.StorageAddress, }, - Description: &HashDress{ - Hash: inpAlgo.DescriptionHash, + Description: &ChecksumAddress{ + Checksum: inpAlgo.DescriptionChecksum, StorageAddress: inpAlgo.DescriptionStorageAddress, }, Owner: worker, diff --git a/chaincode/compute_plan.go b/chaincode/compute_plan.go index 3ef93c92..f5f1e220 100644 --- a/chaincode/compute_plan.go +++ b/chaincode/compute_plan.go @@ -128,7 +128,7 @@ func updateComputePlan(db *LedgerDB, args []string) (resp outputComputePlan, err len(inp.CompositeTraintuples) + len(inp.Testtuples) if count == 0 { - return resp, errors.BadRequest("empty update for compute plan %s", inp.ComputePlanID) + return resp, errors.BadRequest("empty update for compute plan %s", inp.Key) } return updateComputePlanInternal(db, inp) } @@ -139,7 +139,7 @@ func createComputePlanInternal(db *LedgerDB, inp inputComputePlan, tag string, m computePlan.Tag = tag computePlan.Metadata = metadata computePlan.CleanModels = cleanModels - err = computePlan.Create(db, inp.ComputePlanID) + err = computePlan.Create(db, inp.Key) if err != nil { return resp, err } @@ -148,7 +148,7 @@ func createComputePlanInternal(db *LedgerDB, inp inputComputePlan, tag string, m len(inp.CompositeTraintuples) + len(inp.Testtuples) if count == 0 { - resp.Fill(inp.ComputePlanID, computePlan, []string{}) + resp.Fill(inp.Key, computePlan, []string{}) return resp, nil } return updateComputePlanInternal(db, inp) @@ -156,7 +156,7 @@ func createComputePlanInternal(db *LedgerDB, inp inputComputePlan, tag string, m func updateComputePlanInternal(db *LedgerDB, inp inputComputePlan) (resp outputComputePlan, err error) { var tupleKey string - computePlan, err := db.GetComputePlan(inp.ComputePlanID) + computePlan, err := db.GetComputePlan(inp.Key) if err != nil { return resp, err } @@ -176,7 +176,7 @@ func updateComputePlanInternal(db *LedgerDB, inp inputComputePlan) (resp outputC inpTraintuple := inputTraintuple{ Rank: strconv.Itoa(task.Depth), } - inpTraintuple.ComputePlanID = inp.ComputePlanID + inpTraintuple.ComputePlanKey = inp.Key err = inpTraintuple.Fill(computeTraintuple, IDToTrainTask) if err != nil { return resp, errors.BadRequest("traintuple ID %s: "+err.Error(), computeTraintuple.ID) @@ -193,7 +193,7 @@ func updateComputePlanInternal(db *LedgerDB, inp inputComputePlan) (resp outputC inpCompositeTraintuple := inputCompositeTraintuple{ Rank: strconv.Itoa(task.Depth), } - inpCompositeTraintuple.ComputePlanID = inp.ComputePlanID + inpCompositeTraintuple.ComputePlanKey = inp.Key err = inpCompositeTraintuple.Fill(computeCompositeTraintuple, IDToTrainTask) if err != nil { return resp, errors.BadRequest("traintuple ID %s: "+err.Error(), computeCompositeTraintuple.ID) @@ -209,7 +209,7 @@ func updateComputePlanInternal(db *LedgerDB, inp inputComputePlan) (resp outputC inpAggregatetuple := inputAggregatetuple{ Rank: strconv.Itoa(task.Depth), } - inpAggregatetuple.ComputePlanID = inp.ComputePlanID + inpAggregatetuple.ComputePlanKey = inp.Key err = inpAggregatetuple.Fill(computeAggregatetuple, IDToTrainTask) if err != nil { return resp, errors.BadRequest("traintuple ID %s: "+err.Error(), computeAggregatetuple.ID) @@ -239,16 +239,16 @@ func updateComputePlanInternal(db *LedgerDB, inp inputComputePlan) (resp outputC } - computePlan, err = db.GetComputePlan(inp.ComputePlanID) + computePlan, err = db.GetComputePlan(inp.Key) if err != nil { return resp, err } computePlan.IDToTrainTask = IDToTrainTask - err = computePlan.Save(db, inp.ComputePlanID) + err = computePlan.Save(db, inp.Key) if err != nil { return resp, err } - resp.Fill(inp.ComputePlanID, computePlan, NewIDs) + resp.Fill(inp.Key, computePlan, NewIDs) return resp, err } @@ -263,11 +263,11 @@ func queryComputePlan(db *LedgerDB, args []string) (resp outputComputePlan, err func queryComputePlans(db *LedgerDB, args []string) (resp []outputComputePlan, err error) { resp = []outputComputePlan{} - computePlanIDs, err := db.GetIndexKeys("computePlan~id", []string{"computePlan"}) + computePlanKeys, err := db.GetIndexKeys("computePlan~key", []string{"computePlan"}) if err != nil { return } - for _, key := range computePlanIDs { + for _, key := range computePlanKeys { var computePlan outputComputePlan computePlan, err = getOutComputePlan(db, key) if err != nil { @@ -278,7 +278,7 @@ func queryComputePlans(db *LedgerDB, args []string) (resp []outputComputePlan, e return resp, err } -// getComputePlan returns details for a compute plan id. +// getComputePlan returns details for a compute plan key. // Traintuples, CompositeTraintuples and Aggregatetuples are ordered by ascending rank. func getOutComputePlan(db *LedgerDB, key string) (resp outputComputePlan, err error) { @@ -316,12 +316,11 @@ func cancelComputePlan(db *LedgerDB, args []string) (resp outputComputePlan, err return resp, nil } -// Create generate on ID for the compute plan, add it to the ledger -// and register it in the compute plan index -func (cp *ComputePlan) Create(db *LedgerDB, ID string) error { +// Create adds a Compute Plan to the ledger and registers it in the compute plan index +func (cp *ComputePlan) Create(db *LedgerDB, key string) error { cp.StateKey = GetRandomHash() cp.AssetType = ComputePlanType - err := db.Add(ID, cp) + err := db.Add(key, cp) if err != nil { return err } @@ -329,15 +328,15 @@ func (cp *ComputePlan) Create(db *LedgerDB, ID string) error { if err != nil { return err } - if err := db.CreateIndex("computePlan~id", []string{"computePlan", ID}); err != nil { + if err := db.CreateIndex("computePlan~key", []string{"computePlan", key}); err != nil { return err } return nil } // Save add or update the compute plan in the ledger -func (cp *ComputePlan) Save(db *LedgerDB, ID string) error { - err := db.Put(ID, cp) +func (cp *ComputePlan) Save(db *LedgerDB, key string) error { + err := db.Put(key, cp) if err != nil { return err } @@ -408,11 +407,11 @@ func (cp *ComputePlan) AddTuple(tupleType AssetType, key, status string) { // UpdateComputePlanState retreive the compute plan if the ID is not empty, // check if the updated status change anything and save it if it's the case -func UpdateComputePlanState(db *LedgerDB, ComputePlanID, tupleStatus, tupleKey string) error { - if ComputePlanID == "" { +func UpdateComputePlanState(db *LedgerDB, ComputePlanKey, tupleStatus, tupleKey string) error { + if ComputePlanKey == "" { return nil } - cp, err := db.GetComputePlan(ComputePlanID) + cp, err := db.GetComputePlan(ComputePlanKey) if err != nil { return err } @@ -426,7 +425,7 @@ func UpdateComputePlanState(db *LedgerDB, ComputePlanID, tupleStatus, tupleKey s return err } if stateUpdated || len(doneModels) != 0 { - err = db.AddComputePlanEvent(ComputePlanID, cp.State.Status, doneModels) + err = db.AddComputePlanEvent(ComputePlanKey, cp.State.Status, doneModels) if err != nil { return err } @@ -435,13 +434,13 @@ func UpdateComputePlanState(db *LedgerDB, ComputePlanID, tupleStatus, tupleKey s return nil } -// TryAddIntermediaryModel will reference the hash model if the compute plan ID +// TryAddIntermediaryModel will reference the model key if the compute plan key // is not empty and if it's an intermediary model meaning without any children -func TryAddIntermediaryModel(db *LedgerDB, ComputePlanID, tupleKey, modelKey string) error { - if ComputePlanID == "" { +func TryAddIntermediaryModel(db *LedgerDB, ComputePlanKey, tupleKey, modelKey string) error { + if ComputePlanKey == "" { return nil } - cp, err := db.GetComputePlan(ComputePlanID) + cp, err := db.GetComputePlan(ComputePlanKey) if err != nil { return err } @@ -464,7 +463,7 @@ func TryAddIntermediaryModel(db *LedgerDB, ComputePlanID, tupleKey, modelKey str // UpdateIntermediaryModelsInuse check all models listed as intermediary. If any of // them are 'done', meaning that there is no train like tuples or testtuples -// planned to use this model. If that the case its hash will be added to the +// planned to use this model. If that the case its key will be added to the // returned slice and remove from the compute plan's one. func (cp *ComputePlan) UpdateIntermediaryModelsInuse(db *LedgerDB) ([]string, error) { if !cp.CleanModels { diff --git a/chaincode/compute_plan_test.go b/chaincode/compute_plan_test.go index 336b50da..aa1862b8 100644 --- a/chaincode/compute_plan_test.go +++ b/chaincode/compute_plan_test.go @@ -23,7 +23,7 @@ import ( var ( defaultComputePlan = inputComputePlan{ - ComputePlanID: computePlanID, + Key: computePlanKey, Traintuples: []inputComputePlanTraintuple{ inputComputePlanTraintuple{ Key: computePlanTraintupleKey1, @@ -82,7 +82,7 @@ var ( // // modelCompositionComputePlan = inputComputePlan{ - ComputePlanID: computePlanID, + Key: computePlanKey, CompositeTraintuples: []inputComputePlanCompositeTraintuple{ { Key: computePlanCompositeTraintupleKey1, @@ -269,7 +269,7 @@ func TestCreateComputePlanCompositeAggregate(t *testing.T) { IDs := []string{"compositeTraintuple1", "compositeTraintuple2", "aggregatetuple1", "aggregatetuple2"} inCP := inputComputePlan{ - ComputePlanID: computePlanID, + Key: computePlanKey, CompositeTraintuples: []inputComputePlanCompositeTraintuple{ { Key: computePlanCompositeTraintupleKey1, @@ -323,7 +323,7 @@ func TestCreateComputePlanCompositeAggregate(t *testing.T) { require.Contains(t, outCP.AggregatetupleKeys, aggtuples[1].Key) // Query the compute plan - cp, err := queryComputePlan(db, assetToArgs(inputKey{Key: outCP.ComputePlanID})) + cp, err := queryComputePlan(db, assetToArgs(inputKey{Key: outCP.Key})) assert.NoError(t, err, "calling queryComputePlan should succeed") assert.NotNil(t, cp) assert.Equal(t, 2, len(cp.CompositeTraintupleKeys)) @@ -372,18 +372,18 @@ func TestCreateComputePlan(t *testing.T) { assert.Equal(t, inCP.Traintuples[0].AlgoKey, first.Algo.Key) algo1, err := queryAlgo(db, assetToArgs(inputKey{Key: inCP.Traintuples[0].AlgoKey})) assert.NoError(t, err) - assert.Equal(t, algo1.Content.Hash, first.Algo.Hash) + assert.Equal(t, algo1.Content.Checksum, first.Algo.Checksum) assert.Equal(t, StatusTodo, first.Status) // check second traintuple assert.NotZero(t, second) assert.EqualValues(t, first.Key, second.InModels[0].TraintupleKey) - assert.EqualValues(t, first.ComputePlanID, second.ComputePlanID) + assert.EqualValues(t, first.ComputePlanKey, second.ComputePlanKey) assert.Len(t, second.InModels, 1) assert.Equal(t, inCP.Traintuples[1].AlgoKey, second.Algo.Key) algo2, err := queryAlgo(db, assetToArgs(inputKey{Key: inCP.Traintuples[1].AlgoKey})) assert.NoError(t, err) - assert.Equal(t, algo2.Content.Hash, second.Algo.Hash) + assert.Equal(t, algo2.Content.Checksum, second.Algo.Checksum) assert.Equal(t, StatusWaiting, second.Status) // Check the testtuples @@ -410,7 +410,7 @@ func TestQueryComputePlan(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, outCP) - cp, err := queryComputePlan(db, assetToArgs(inputKey{Key: outCP.ComputePlanID})) + cp, err := queryComputePlan(db, assetToArgs(inputKey{Key: outCP.Key})) assert.NoError(t, err, "calling queryComputePlan should succeed") assert.NotNil(t, cp) validateDefaultComputePlan(t, cp) @@ -456,7 +456,7 @@ func TestComputePlanEmptyTesttuples(t *testing.T) { db := NewLedgerDB(mockStub) inCP := inputComputePlan{ - ComputePlanID: computePlanID, + Key: computePlanKey, Traintuples: []inputComputePlanTraintuple{ inputComputePlanTraintuple{ Key: computePlanTraintupleKey1, @@ -482,7 +482,7 @@ func TestComputePlanEmptyTesttuples(t *testing.T) { assert.NotNil(t, outCP) assert.Len(t, outCP.TesttupleKeys, 0) - cp, err := queryComputePlan(db, assetToArgs(inputKey{Key: outCP.ComputePlanID})) + cp, err := queryComputePlan(db, assetToArgs(inputKey{Key: outCP.Key})) assert.NoError(t, err, "calling queryComputePlan should succeed") assert.NotNil(t, cp) assert.Len(t, outCP.TesttupleKeys, 0) @@ -519,10 +519,10 @@ func TestCancelComputePlan(t *testing.T) { assert.NotNil(t, db.event) assert.Len(t, db.event.CompositeTraintuples, 2) - _, err = cancelComputePlan(db, assetToArgs(inputKey{Key: out.ComputePlanID})) + _, err = cancelComputePlan(db, assetToArgs(inputKey{Key: out.Key})) assert.NoError(t, err) - computePlan, err := getOutComputePlan(db, out.ComputePlanID) + computePlan, err := getOutComputePlan(db, out.Key) assert.Equal(t, StatusCanceled, computePlan.Status) tuples, err := queryCompositeTraintuples(db, []string{}) @@ -563,10 +563,10 @@ func TestStartedTuplesOfCanceledComputePlan(t *testing.T) { logStartCompositeTrain(db, assetToArgs(inputKey{out.CompositeTraintupleKeys[1]})) logFailCompositeTrain(db, assetToArgs(inputKey{out.CompositeTraintupleKeys[1]})) - _, err = cancelComputePlan(db, assetToArgs(inputKey{Key: out.ComputePlanID})) + _, err = cancelComputePlan(db, assetToArgs(inputKey{Key: out.Key})) assert.NoError(t, err) - computePlan, err := getOutComputePlan(db, out.ComputePlanID) + computePlan, err := getOutComputePlan(db, out.Key) assert.Equal(t, StatusCanceled, computePlan.Status) tuples, err := queryCompositeTraintuples(db, []string{}) @@ -594,7 +594,7 @@ func TestLogSuccessAfterCancel(t *testing.T) { logStartCompositeTrain(db, assetToArgs(inputKey{out.CompositeTraintupleKeys[0]})) logStartCompositeTrain(db, assetToArgs(inputKey{out.CompositeTraintupleKeys[1]})) - _, err = cancelComputePlan(db, assetToArgs(inputKey{Key: out.ComputePlanID})) + _, err = cancelComputePlan(db, assetToArgs(inputKey{Key: out.Key})) assert.NoError(t, err) inp := inputLogSuccessCompositeTrain{} @@ -603,7 +603,7 @@ func TestLogSuccessAfterCancel(t *testing.T) { _, err = logSuccessCompositeTrain(db, assetToArgs(inp)) assert.NoError(t, err) - computePlan, err := getOutComputePlan(db, out.ComputePlanID) + computePlan, err := getOutComputePlan(db, out.Key) assert.Equal(t, StatusCanceled, computePlan.Status) expected := []string{StatusDoing, StatusDone, StatusAborted, StatusAborted} @@ -622,7 +622,7 @@ func TestCreateTagedEmptyComputePlan(t *testing.T) { inp := inputNewComputePlan{ inputComputePlan: inputComputePlan{ - ComputePlanID: computePlanID, + Key: computePlanKey, }, Tag: tag} out, err := createComputePlan(db, assetToArgs(inp)) @@ -639,16 +639,16 @@ func TestComputePlanMetrics(t *testing.T) { out, err := createComputePlanInternal(db, defaultComputePlan, tag, map[string]string{}, false) assert.NoError(t, err) - checkComputePlanMetrics(t, db, out.ComputePlanID, 0, 3) + checkComputePlanMetrics(t, db, out.Key, 0, 3) traintupleToDone(t, db, out.TraintupleKeys[0]) - checkComputePlanMetrics(t, db, out.ComputePlanID, 1, 3) + checkComputePlanMetrics(t, db, out.Key, 1, 3) traintupleToDone(t, db, out.TraintupleKeys[1]) - checkComputePlanMetrics(t, db, out.ComputePlanID, 2, 3) + checkComputePlanMetrics(t, db, out.Key, 2, 3) testtupleToDone(t, db, out.TesttupleKeys[0]) - checkComputePlanMetrics(t, db, out.ComputePlanID, 3, 3) + checkComputePlanMetrics(t, db, out.Key, 3, 3) } func traintupleToDone(t *testing.T, db *LedgerDB, key string) { @@ -658,7 +658,7 @@ func traintupleToDone(t *testing.T, db *LedgerDB, key string) { success := inputLogSuccessTrain{} success.Key = key - success.OutModel.Hash = GetRandomHash() + success.OutModel.Checksum = GetRandomHash() success.fillDefaults() _, err = logSuccessTrain(db, assetToArgs(success)) assert.NoError(t, err) @@ -675,8 +675,8 @@ func testtupleToDone(t *testing.T, db *LedgerDB, key string) { assert.NoError(t, err) } -func checkComputePlanMetrics(t *testing.T, db *LedgerDB, cpID string, doneCount, tupleCount int) { - out, err := getOutComputePlan(db, cpID) +func checkComputePlanMetrics(t *testing.T, db *LedgerDB, cpKey string, doneCount, tupleCount int) { + out, err := getOutComputePlan(db, cpKey) assert.NoError(t, err) assert.Equal(t, doneCount, out.DoneCount) assert.Equal(t, tupleCount, out.TupleCount) @@ -689,12 +689,12 @@ func TestUpdateComputePlan(t *testing.T) { registerItem(t, *mockStub, "aggregateAlgo") db := NewLedgerDB(mockStub) - out, err := createComputePlanInternal(db, inputComputePlan{ComputePlanID: computePlanID}, tag, map[string]string{}, false) + out, err := createComputePlanInternal(db, inputComputePlan{Key: computePlanKey}, tag, map[string]string{}, false) assert.NoError(t, err) assert.Equal(t, tag, out.Tag) inp := defaultComputePlan - inp.ComputePlanID = out.ComputePlanID + inp.Key = out.Key out, err = updateComputePlanInternal(db, inp) assert.NoError(t, err) validateDefaultComputePlan(t, out) @@ -704,7 +704,7 @@ func TestUpdateComputePlan(t *testing.T) { NewID := "Update" up := inputComputePlan{ - ComputePlanID: out.ComputePlanID, + Key: out.Key, Traintuples: []inputComputePlanTraintuple{ { Key: computePlanTraintupleKey3, @@ -778,12 +778,12 @@ func TestCreateSameComputePlanTwice(t *testing.T) { registerItem(t, *mockStub, "aggregateAlgo") db := NewLedgerDB(mockStub) - out, err := createComputePlanInternal(db, inputComputePlan{ComputePlanID: computePlanID}, tag, map[string]string{}, false) + out, err := createComputePlanInternal(db, inputComputePlan{Key: computePlanKey}, tag, map[string]string{}, false) assert.NoError(t, err) assert.Equal(t, tag, out.Tag) up := inputComputePlan{ - ComputePlanID: out.ComputePlanID, + Key: out.Key, Traintuples: []inputComputePlanTraintuple{ { Key: computePlanTraintupleKey3, @@ -824,12 +824,12 @@ func TestCreateSameComputePlanTwice(t *testing.T) { assert.NoError(t, err) // Upload the same tuples inside another compute plan - out, err = createComputePlanInternal(db, inputComputePlan{ComputePlanID: computePlanID2}, tag, map[string]string{}, false) + out, err = createComputePlanInternal(db, inputComputePlan{Key: computePlanKey2}, tag, map[string]string{}, false) assert.NoError(t, err) assert.Equal(t, tag, out.Tag) inp := defaultComputePlan - inp.ComputePlanID = out.ComputePlanID + inp.Key = out.Key out, err = updateComputePlanInternal(db, inp) assert.NoError(t, err) } diff --git a/chaincode/data.go b/chaincode/data.go index cb751ca0..84ebc479 100644 --- a/chaincode/data.go +++ b/chaincode/data.go @@ -27,14 +27,14 @@ func (dataManager *DataManager) Set(db *LedgerDB, inp inputDataManager) (string, dataManager.ObjectiveKey = inp.ObjectiveKey dataManager.AssetType = DataManagerType dataManager.Name = inp.Name - dataManager.Opener = &HashDress{ - Hash: inp.OpenerHash, + dataManager.Opener = &ChecksumAddress{ + Checksum: inp.OpenerChecksum, StorageAddress: inp.OpenerStorageAddress, } dataManager.Type = inp.Type dataManager.Metadata = inp.Metadata - dataManager.Description = &HashDress{ - Hash: inp.DescriptionHash, + dataManager.Description = &ChecksumAddress{ + Checksum: inp.DescriptionChecksum, StorageAddress: inp.DescriptionStorageAddress, } owner, err := GetTxCreator(db.cc) diff --git a/chaincode/data_test.go b/chaincode/data_test.go index 646cddc8..f33710dc 100644 --- a/chaincode/data_test.go +++ b/chaincode/data_test.go @@ -39,11 +39,11 @@ func TestDataManager(t *testing.T) { // Add dataManager with invalid field inpDataManager := inputDataManager{ - OpenerHash: "aaa", + OpenerChecksum: "aaa", } args := inpDataManager.createDefault() resp := mockStub.MockInvoke("42", args) - assert.EqualValuesf(t, 400, resp.Status, "when adding dataManager with invalid opener hash, status %d and message %s", resp.Status, resp.Message) + assert.EqualValuesf(t, 400, resp.Status, "when adding dataManager with invalid opener checksum, status %d and message %s", resp.Status, resp.Message) // Properly add dataManager resp, tt := registerItem(t, *mockStub, "dataManager") @@ -70,15 +70,15 @@ func TestDataManager(t *testing.T) { Key: dataManagerKey, Owner: worker, Name: inpDataManager.Name, - Description: &HashDress{ + Description: &ChecksumAddress{ StorageAddress: inpDataManager.DescriptionStorageAddress, - Hash: inpDataManager.DescriptionHash, + Checksum: inpDataManager.DescriptionChecksum, }, Permissions: outputPermissions{ Process: Permission{Public: true, AuthorizedIDs: []string{}}, }, - Opener: &HashDress{ - Hash: inpDataManager.OpenerHash, + Opener: &ChecksumAddress{ + Checksum: inpDataManager.OpenerChecksum, StorageAddress: inpDataManager.OpenerStorageAddress, }, Type: inpDataManager.Type, @@ -147,7 +147,7 @@ func TestDataset(t *testing.T) { } args := inpDataSample.createDefault() resp := mockStub.MockInvoke("42", args) - assert.EqualValuesf(t, 400, resp.Status, "when adding dataSample with invalid hash, status %d and message %s", resp.Status, resp.Message) + assert.EqualValuesf(t, 400, resp.Status, "when adding dataSample with invalid key, status %d and message %s", resp.Status, resp.Message) // Add dataSample with unexiting dataManager inpDataSample = inputDataSample{} diff --git a/chaincode/generate_examples_test.go b/chaincode/generate_examples_test.go index 6e8f3c15..a66f0823 100644 --- a/chaincode/generate_examples_test.go +++ b/chaincode/generate_examples_test.go @@ -249,7 +249,7 @@ func TestPipeline(t *testing.T) { fmt.Fprintln(&out, "#### ------------ Create a ComputePlan ------------") inputCP := inputNewComputePlan{inputComputePlan: inputComputePlan{ - ComputePlanID: computePlanID, + Key: computePlanKey, }} inputCP.Tag = tag inputCP.inputComputePlan = defaultComputePlan @@ -259,7 +259,7 @@ func TestPipeline(t *testing.T) { fmt.Fprintln(&out, "#### ------------ Update a ComputePlan ------------") upCP := inputComputePlan{} - upCP.ComputePlanID = outCp.ComputePlanID + upCP.Key = outCp.Key upCP.Traintuples = []inputComputePlanTraintuple{ { Key: computePlanTraintupleKey3, @@ -289,11 +289,11 @@ func TestPipeline(t *testing.T) { callAssertAndPrint("invoke", "queryObjectiveLeaderboard", inpLeaderboard) fmt.Fprintln(&out, "#### ------------ Query Compute Plan(s) ------------") - callAssertAndPrint("invoke", "queryComputePlan", inputKey{outCp.ComputePlanID}) + callAssertAndPrint("invoke", "queryComputePlan", inputKey{outCp.Key}) callAssertAndPrint("invoke", "queryComputePlans", nil) fmt.Fprintln(&out, "#### ------------ Cancel a ComputePlan ------------") - callAssertAndPrint("invoke", "cancelComputePlan", inputKey{outCp.ComputePlanID}) + callAssertAndPrint("invoke", "cancelComputePlan", inputKey{outCp.Key}) // Use the output to check the EXAMPLES.md file and if asked update it doc := out.String() diff --git a/chaincode/input.go b/chaincode/input.go index a55dba9b..2ad60d24 100644 --- a/chaincode/input.go +++ b/chaincode/input.go @@ -33,10 +33,10 @@ var ( type inputObjective struct { Key string `validate:"required,len=36" json:"key"` Name string `validate:"required,gte=1,lte=100" json:"name"` - DescriptionHash string `validate:"required,len=64,hexadecimal" json:"description_hash"` + DescriptionChecksum string `validate:"required,len=64,hexadecimal" json:"description_checksum"` DescriptionStorageAddress string `validate:"required,url" json:"description_storage_address"` MetricsName string `validate:"required,gte=1,lte=100" json:"metrics_name"` - MetricsHash string `validate:"required,len=64,hexadecimal" json:"metrics_hash"` + MetricsChecksum string `validate:"required,len=64,hexadecimal" json:"metrics_checksum"` MetricsStorageAddress string `validate:"required,url" json:"metrics_storage_address"` TestDataset inputDataset `validate:"omitempty" json:"test_dataset"` Permissions inputPermissions `validate:"required" json:"permissions"` @@ -53,9 +53,9 @@ type inputDataset struct { type inputAlgo struct { Key string `validate:"required,len=36" json:"key"` Name string `validate:"required,gte=1,lte=100" json:"name"` - Hash string `validate:"required,len=64,hexadecimal" json:"hash"` + Checksum string `validate:"required,len=64,hexadecimal" json:"checksum"` StorageAddress string `validate:"required,url" json:"storage_address"` - DescriptionHash string `validate:"required,len=64,hexadecimal" json:"description_hash"` + DescriptionChecksum string `validate:"required,len=64,hexadecimal" json:"description_checksum"` DescriptionStorageAddress string `validate:"required,url" json:"description_storage_address"` Permissions inputPermissions `validate:"required" json:"permissions"` Metadata map[string]string `validate:"lte=100,dive,keys,lte=50,endkeys,lte=100" json:"metadata"` @@ -65,10 +65,10 @@ type inputAlgo struct { type inputDataManager struct { Key string `validate:"required,len=36" json:"key"` Name string `validate:"required,gte=1,lte=100" json:"name"` - OpenerHash string `validate:"required,len=64,hexadecimal" json:"opener_hash"` + OpenerChecksum string `validate:"required,len=64,hexadecimal" json:"opener_checksum"` OpenerStorageAddress string `validate:"required,url" json:"opener_storage_address"` Type string `validate:"required,gte=1,lte=30" json:"type"` - DescriptionHash string `validate:"required,len=64,hexadecimal" json:"description_hash"` + DescriptionChecksum string `validate:"required,len=64,hexadecimal" json:"description_checksum"` DescriptionStorageAddress string `validate:"required,url" json:"description_storage_address"` ObjectiveKey string `validate:"omitempty,len=36" json:"objective_key"` //`validate:"required"` Permissions inputPermissions `validate:"required" json:"permissions"` @@ -101,7 +101,7 @@ type inputTraintuple struct { InModels []string `validate:"omitempty,dive,len=36" json:"in_models"` DataManagerKey string `validate:"required,len=36" json:"data_manager_key"` DataSampleKeys []string `validate:"required,unique,gt=0,dive,len=36" json:"data_sample_keys"` - ComputePlanID string `validate:"required_with=Rank" json:"compute_plan_id"` + ComputePlanKey string `validate:"required_with=Rank" json:"compute_plan_key"` Rank string `json:"rank"` Tag string `validate:"omitempty,lte=64" json:"tag"` Metadata map[string]string `validate:"lte=100,dive,keys,lte=50,endkeys,lte=100" json:"metadata"` @@ -124,7 +124,7 @@ type inputKey struct { type inputLogSuccessTrain struct { inputLog - OutModel inputKeyHashDress `validate:"required" json:"out_model"` + OutModel inputKeyChecksumAddress `validate:"required" json:"out_model"` } type inputLogSuccessTest struct { inputLog @@ -141,14 +141,14 @@ type inputLog struct { Log string `validate:"lte=200" json:"log"` } -type inputKeyHash struct { - Key string `validate:"required,len=36" json:"key"` - Hash string `validate:"required,len=64,hexadecimal" json:"hash"` +type inputKeyChecksum struct { + Key string `validate:"required,len=36" json:"key"` + Checksum string `validate:"required,len=64,hexadecimal" json:"checksum"` } -type inputKeyHashDress struct { +type inputKeyChecksumAddress struct { Key string `validate:"required,len=36" json:"key"` - Hash string `validate:"required,len=64,hexadecimal" json:"hash"` + Checksum string `validate:"required,len=64,hexadecimal" json:"checksum"` StorageAddress string `validate:"required" json:"storage_address"` } @@ -160,7 +160,7 @@ type inputQueryFilter struct { // inputConputePlan represent a coherent set of tuples uploaded together. type inputComputePlan struct { - ComputePlanID string `validate:"required,len=36" json:"compute_plan_id"` + Key string `validate:"required,len=36" json:"key"` Traintuples []inputComputePlanTraintuple `validate:"omitempty" json:"traintuples"` Aggregatetuples []inputComputePlanAggregatetuple `validate:"omitempty" json:"aggregatetuples"` CompositeTraintuples []inputComputePlanCompositeTraintuple `validate:"omitempty" json:"composite_traintuples"` diff --git a/chaincode/input_aggregate.go b/chaincode/input_aggregate.go index 1351d025..232bda93 100644 --- a/chaincode/input_aggregate.go +++ b/chaincode/input_aggregate.go @@ -16,14 +16,14 @@ package main // inputAggregatetuple is the representation of input args to register an aggregate Tuple type inputAggregatetuple struct { - Key string `validate:"required,len=36" json:"key"` - AlgoKey string `validate:"required,len=36" json:"algo_key"` - InModels []string `validate:"omitempty,dive,len=36" json:"in_models"` - ComputePlanID string `validate:"required_with=Rank" json:"compute_plan_id"` - Metadata map[string]string `validate:"lte=100,dive,keys,lte=50,endkeys,lte=100" json:"metadata"` - Rank string `json:"rank"` - Tag string `validate:"omitempty,lte=64" json:"tag"` - Worker string `validate:"required" json:"worker"` + Key string `validate:"required,len=36" json:"key"` + AlgoKey string `validate:"required,len=36" json:"algo_key"` + InModels []string `validate:"omitempty,dive,len=36" json:"in_models"` + ComputePlanKey string `validate:"required_with=Rank" json:"compute_plan_key"` + Metadata map[string]string `validate:"lte=100,dive,keys,lte=50,endkeys,lte=100" json:"metadata"` + Rank string `json:"rank"` + Tag string `validate:"omitempty,lte=64" json:"tag"` + Worker string `validate:"required" json:"worker"` } type inputAggregateAlgo struct { diff --git a/chaincode/input_composite.go b/chaincode/input_composite.go index 082cc8c5..c34e918f 100644 --- a/chaincode/input_composite.go +++ b/chaincode/input_composite.go @@ -23,7 +23,7 @@ type inputCompositeTraintuple struct { OutTrunkModelPermissions inputPermissions `validate:"required" json:"out_trunk_model_permissions"` DataManagerKey string `validate:"required,len=36" json:"data_manager_key"` DataSampleKeys []string `validate:"required,unique,gt=0,dive,len=36" json:"data_sample_keys"` - ComputePlanID string `validate:"required_with=Rank" json:"compute_plan_id"` + ComputePlanKey string `validate:"required_with=Rank" json:"compute_plan_key"` Rank string `json:"rank"` Tag string `validate:"omitempty,lte=64" json:"tag"` Metadata map[string]string `validate:"lte=100,dive,keys,lte=50,endkeys,lte=100" json:"metadata"` @@ -35,6 +35,6 @@ type inputCompositeAlgo struct { type inputLogSuccessCompositeTrain struct { inputLog - OutHeadModel inputKeyHash `validate:"required" json:"out_head_model"` - OutTrunkModel inputKeyHashDress `validate:"required" json:"out_trunk_model"` + OutHeadModel inputKeyChecksum `validate:"required" json:"out_head_model"` + OutTrunkModel inputKeyChecksumAddress `validate:"required" json:"out_trunk_model"` } diff --git a/chaincode/input_test.go b/chaincode/input_test.go index 6dcdf241..44e1deca 100644 --- a/chaincode/input_test.go +++ b/chaincode/input_test.go @@ -27,8 +27,8 @@ func (dataManager *inputDataManager) createDefault() [][]byte { if dataManager.Name == "" { dataManager.Name = "liver slide" } - if dataManager.OpenerHash == "" { - dataManager.OpenerHash = dataManagerOpenerHash + if dataManager.OpenerChecksum == "" { + dataManager.OpenerChecksum = dataManagerOpenerChecksum } if dataManager.OpenerStorageAddress == "" { dataManager.OpenerStorageAddress = "https://toto/dataManager/42234/opener" @@ -36,8 +36,8 @@ func (dataManager *inputDataManager) createDefault() [][]byte { if dataManager.Type == "" { dataManager.Type = "images" } - if dataManager.DescriptionHash == "" { - dataManager.DescriptionHash = "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee" + if dataManager.DescriptionChecksum == "" { + dataManager.DescriptionChecksum = "8d4bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eee" } if dataManager.DescriptionStorageAddress == "" { dataManager.DescriptionStorageAddress = "https://toto/dataManager/42234/description" @@ -68,8 +68,8 @@ func (objective *inputObjective) createDefault() [][]byte { if objective.Name == "" { objective.Name = "MSI classification" } - if objective.DescriptionHash == "" { - objective.DescriptionHash = objectiveDescriptionHash + if objective.DescriptionChecksum == "" { + objective.DescriptionChecksum = objectiveDescriptionChecksum } if objective.DescriptionStorageAddress == "" { objective.DescriptionStorageAddress = "https://toto/objective/222/description" @@ -77,8 +77,8 @@ func (objective *inputObjective) createDefault() [][]byte { if objective.MetricsName == "" { objective.MetricsName = "accuracy" } - if objective.MetricsHash == "" { - objective.MetricsHash = objectiveMetricsHash + if objective.MetricsChecksum == "" { + objective.MetricsChecksum = objectiveMetricsChecksum } if objective.MetricsStorageAddress == "" { objective.MetricsStorageAddress = objectiveMetricsStorageAddress @@ -101,14 +101,14 @@ func (algo *inputAlgo) createDefault() [][]byte { if algo.Name == "" { algo.Name = algoName } - if algo.Hash == "" { - algo.Hash = algoHash + if algo.Checksum == "" { + algo.Checksum = algoChecksum } if algo.StorageAddress == "" { algo.StorageAddress = algoStorageAddress } - if algo.DescriptionHash == "" { - algo.DescriptionHash = "e2dbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dca" + if algo.DescriptionChecksum == "" { + algo.DescriptionChecksum = "e2dbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dca" } if algo.DescriptionStorageAddress == "" { algo.DescriptionStorageAddress = "https://toto/algo/222/description" @@ -125,14 +125,14 @@ func (algo *inputCompositeAlgo) createDefault() [][]byte { if algo.Name == "" { algo.Name = compositeAlgoName } - if algo.Hash == "" { - algo.Hash = compositeAlgoHash + if algo.Checksum == "" { + algo.Checksum = compositeAlgoChecksum } if algo.StorageAddress == "" { algo.StorageAddress = compositeAlgoStorageAddress } - if algo.DescriptionHash == "" { - algo.DescriptionHash = "e2dbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcb" + if algo.DescriptionChecksum == "" { + algo.DescriptionChecksum = "e2dbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcb" } if algo.DescriptionStorageAddress == "" { algo.DescriptionStorageAddress = "https://toto/compositeAlgo/222/description" @@ -154,14 +154,14 @@ func (algo *inputAggregateAlgo) fillDefaults() { if algo.Name == "" { algo.Name = aggregateAlgoName } - if algo.Hash == "" { - algo.Hash = aggregateAlgoHash + if algo.Checksum == "" { + algo.Checksum = aggregateAlgoChecksum } if algo.StorageAddress == "" { algo.StorageAddress = aggregateAlgoStorageAddress } - if algo.DescriptionHash == "" { - algo.DescriptionHash = "e2dbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcb" + if algo.DescriptionChecksum == "" { + algo.DescriptionChecksum = "e2dbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcb" } if algo.DescriptionStorageAddress == "" { algo.DescriptionStorageAddress = "https://toto/aggregateAlgo/222/description" @@ -256,8 +256,8 @@ func (success *inputLogSuccessTrain) fillDefaults() { if success.OutModel.Key == "" { success.OutModel.Key = modelKey } - if success.OutModel.Hash == "" { - success.OutModel.Hash = modelHash + if success.OutModel.Checksum == "" { + success.OutModel.Checksum = modelChecksum } if success.OutModel.StorageAddress == "" { success.OutModel.StorageAddress = modelAddress @@ -283,14 +283,14 @@ func (success *inputLogSuccessCompositeTrain) fillDefaults() { if success.OutHeadModel.Key == "" { success.OutHeadModel.Key = headModelKey } - if success.OutHeadModel.Hash == "" { - success.OutHeadModel.Hash = headModelHash + if success.OutHeadModel.Checksum == "" { + success.OutHeadModel.Checksum = headModelChecksum } if success.OutTrunkModel.Key == "" { success.OutTrunkModel.Key = trunkModelKey } - if success.OutTrunkModel.Hash == "" { - success.OutTrunkModel.Hash = trunkModelHash + if success.OutTrunkModel.Checksum == "" { + success.OutTrunkModel.Checksum = trunkModelChecksum } if success.OutTrunkModel.StorageAddress == "" { success.OutTrunkModel.StorageAddress = trunkModelAddress diff --git a/chaincode/ledger.go b/chaincode/ledger.go index 84a86820..1c31d16d 100644 --- a/chaincode/ledger.go +++ b/chaincode/ledger.go @@ -45,15 +45,15 @@ type StatusUpdater interface { // Objective is the representation of one of the element type stored in the ledger type Objective struct { - Key string `json:"key"` - Name string `json:"name"` - AssetType AssetType `json:"asset_type"` - Description *HashDress `json:"description"` - Metrics *HashDressName `json:"metrics"` - Owner string `json:"owner"` - TestDataset *Dataset `json:"test_dataset"` - Permissions Permissions `json:"permissions"` - Metadata map[string]string `json:"metadata"` + Key string `json:"key"` + Name string `json:"name"` + AssetType AssetType `json:"asset_type"` + Description *ChecksumAddress `json:"description"` + Metrics *ChecksumAddressName `json:"metrics"` + Owner string `json:"owner"` + TestDataset *Dataset `json:"test_dataset"` + Permissions Permissions `json:"permissions"` + Metadata map[string]string `json:"metadata"` } // DataManager is the representation of one of the elements type stored in the ledger @@ -61,9 +61,9 @@ type DataManager struct { Key string `json:"key"` Name string `json:"name"` AssetType AssetType `json:"asset_type"` - Opener *HashDress `json:"opener"` + Opener *ChecksumAddress `json:"opener"` Type string `json:"type"` - Description *HashDress `json:"description"` + Description *ChecksumAddress `json:"description"` Owner string `json:"owner"` ObjectiveKey string `json:"objective_key"` Permissions Permissions `json:"permissions"` @@ -83,9 +83,9 @@ type Algo struct { Key string `json:"key"` Name string `json:"name"` AssetType AssetType `json:"asset_type"` - Hash string `json:"hash"` + Checksum string `json:"checksum"` StorageAddress string `json:"storage_address"` - Description *HashDress `json:"description"` + Description *ChecksumAddress `json:"description"` Owner string `json:"owner"` Permissions Permissions `json:"permissions"` Metadata map[string]string `json:"metadata"` @@ -105,101 +105,101 @@ type AggregateAlgo struct { // that are common to Traintuple, CompositeTraintuple and // AggregateTuple type GenericTuple struct { - AssetType AssetType `json:"asset_type"` - AlgoKey string `json:"algo_key"` - ComputePlanID string `json:"compute_plan_id"` - Creator string `json:"creator"` - Log string `json:"log"` - Metadata map[string]string `json:"metadata"` - Rank int `json:"rank"` - Status string `json:"status"` - Tag string `json:"tag"` + AssetType AssetType `json:"asset_type"` + AlgoKey string `json:"algo_key"` + ComputePlanKey string `json:"compute_plan_key"` + Creator string `json:"creator"` + Log string `json:"log"` + Metadata map[string]string `json:"metadata"` + Rank int `json:"rank"` + Status string `json:"status"` + Tag string `json:"tag"` } // Traintuple is the representation of one the element type stored in the ledger. It describes a training task occuring on the platform type Traintuple struct { - Key string `json:"key"` - AssetType AssetType `json:"asset_type"` - AlgoKey string `json:"algo_key"` - ComputePlanID string `json:"compute_plan_id"` - Creator string `json:"creator"` - Log string `json:"log"` - Metadata map[string]string `json:"metadata"` - Rank int `json:"rank"` - Status string `json:"status"` - Tag string `json:"tag"` - Dataset *Dataset `json:"dataset"` - InModelKeys []string `json:"in_models"` - OutModel *KeyHashDress `json:"out_model"` - Permissions Permissions `json:"permissions"` + Key string `json:"key"` + AssetType AssetType `json:"asset_type"` + AlgoKey string `json:"algo_key"` + ComputePlanKey string `json:"compute_plan_key"` + Creator string `json:"creator"` + Log string `json:"log"` + Metadata map[string]string `json:"metadata"` + Rank int `json:"rank"` + Status string `json:"status"` + Tag string `json:"tag"` + Dataset *Dataset `json:"dataset"` + InModelKeys []string `json:"in_models"` + OutModel *KeyChecksumAddress `json:"out_model"` + Permissions Permissions `json:"permissions"` } // CompositeTraintuple is like a traintuple, but for composite model composition type CompositeTraintuple struct { - Key string `json:"key"` - AssetType AssetType `json:"asset_type"` - AlgoKey string `json:"algo_key"` - ComputePlanID string `json:"compute_plan_id"` - Creator string `json:"creator"` - Log string `json:"log"` - Metadata map[string]string `json:"metadata"` - Rank int `json:"rank"` - Status string `json:"status"` - Tag string `json:"tag"` - Dataset *Dataset `json:"dataset"` - InHeadModel string `json:"in_head_model"` - InTrunkModel string `json:"in_trunk_model"` - OutHeadModel CompositeTraintupleOutHeadModel `json:"out_head_model"` - OutTrunkModel CompositeTraintupleOutModel `json:"out_trunk_model"` + Key string `json:"key"` + AssetType AssetType `json:"asset_type"` + AlgoKey string `json:"algo_key"` + ComputePlanKey string `json:"compute_plan_key"` + Creator string `json:"creator"` + Log string `json:"log"` + Metadata map[string]string `json:"metadata"` + Rank int `json:"rank"` + Status string `json:"status"` + Tag string `json:"tag"` + Dataset *Dataset `json:"dataset"` + InHeadModel string `json:"in_head_model"` + InTrunkModel string `json:"in_trunk_model"` + OutHeadModel CompositeTraintupleOutHeadModel `json:"out_head_model"` + OutTrunkModel CompositeTraintupleOutModel `json:"out_trunk_model"` } // Aggregatetuple is like a traintuple, but for aggregate model composition type Aggregatetuple struct { - Key string `json:"key"` - AssetType AssetType `json:"asset_type"` - AlgoKey string `json:"algo_key"` - ComputePlanID string `json:"compute_plan_id"` - Creator string `json:"creator"` - Log string `json:"log"` - Metadata map[string]string `json:"metadata"` - Rank int `json:"rank"` - Status string `json:"status"` - Tag string `json:"tag"` - InModelKeys []string `json:"in_models"` - OutModel *KeyHashDress `json:"out_model"` - Permissions Permissions `json:"permissions"` // TODO (aggregate): what do permissions mean here? - Worker string `json:"worker"` + Key string `json:"key"` + AssetType AssetType `json:"asset_type"` + AlgoKey string `json:"algo_key"` + ComputePlanKey string `json:"compute_plan_key"` + Creator string `json:"creator"` + Log string `json:"log"` + Metadata map[string]string `json:"metadata"` + Rank int `json:"rank"` + Status string `json:"status"` + Tag string `json:"tag"` + InModelKeys []string `json:"in_models"` + OutModel *KeyChecksumAddress `json:"out_model"` + Permissions Permissions `json:"permissions"` // TODO (aggregate): what do permissions mean here? + Worker string `json:"worker"` } // CompositeTraintupleOutModel is the out-model of a CompositeTraintuple type CompositeTraintupleOutModel struct { - OutModel *KeyHashDress `json:"out_model"` - Permissions Permissions `json:"permissions"` + OutModel *KeyChecksumAddress `json:"out_model"` + Permissions Permissions `json:"permissions"` } // CompositeTraintupleOutHeadModel is the out-model of a CompositeTraintuple type CompositeTraintupleOutHeadModel struct { - OutModel *KeyHash `json:"out_model"` - Permissions Permissions `json:"permissions"` + OutModel *KeyChecksum `json:"out_model"` + Permissions Permissions `json:"permissions"` } // Testtuple is the representation of one the element type stored in the ledger. It describes a training task occuring on the platform type Testtuple struct { - Key string `json:"key"` - AlgoKey string `json:"algo"` - AssetType AssetType `json:"asset_type"` - Certified bool `json:"certified"` - ComputePlanID string `json:"compute_plan_id"` - Creator string `json:"creator"` - Dataset *TtDataset `json:"dataset"` - Log string `json:"log"` - Metadata map[string]string `json:"metadata"` - TraintupleKey string `json:"traintuple_key"` - ObjectiveKey string `json:"objective"` - Permissions Permissions `json:"permissions"` - Rank int `json:"rank"` - Status string `json:"status"` - Tag string `json:"tag"` + Key string `json:"key"` + AlgoKey string `json:"algo"` + AssetType AssetType `json:"asset_type"` + Certified bool `json:"certified"` + ComputePlanKey string `json:"compute_plan_key"` + Creator string `json:"creator"` + Dataset *TtDataset `json:"dataset"` + Log string `json:"log"` + Metadata map[string]string `json:"metadata"` + TraintupleKey string `json:"traintuple_key"` + ObjectiveKey string `json:"objective"` + Permissions Permissions `json:"permissions"` + Rank int `json:"rank"` + Status string `json:"status"` + Tag string `json:"tag"` } // ComputePlan is the ledger's representation of a compute plan. @@ -237,45 +237,45 @@ type TrainTask struct { // Struct used in the representation of elements stored in the ledger // --------------------------------------------------------------------------------- -// KeyHash ... -type KeyHash struct { - Key string `json:"key"` - Hash string `json:"hash"` +// KeyChecksum ... +type KeyChecksum struct { + Key string `json:"key"` + Checksum string `json:"checksum"` } -// HashDress stores a hash and a Storage Address -type HashDress struct { - Hash string `json:"hash"` +// ChecksumAddress stores a checksum and a Storage Address +type ChecksumAddress struct { + Checksum string `json:"checksum"` StorageAddress string `json:"storage_address"` } -// KeyHashDress ... -type KeyHashDress struct { +// KeyChecksumAddress ... +type KeyChecksumAddress struct { Key string `json:"key"` - Hash string `json:"hash"` + Checksum string `json:"checksum"` StorageAddress string `json:"storage_address"` } -// HashDressName stores a hash, storage address and a name -type HashDressName struct { - Hash string `json:"hash"` +// ChecksumAddressName stores a checksum, a storage address, and a name +type ChecksumAddressName struct { + Checksum string `json:"checksum"` StorageAddress string `json:"storage_address"` Name string `json:"name"` } -// KeyHashDressName ... -type KeyHashDressName struct { +// KeyChecksumAddressName ... +type KeyChecksumAddressName struct { Key string `json:"key"` - Hash string `json:"hash"` + Checksum string `json:"checksum"` StorageAddress string `json:"storage_address"` Name string `json:"name"` } -// Model stores the traintupleKey leading to the model, its hash and storage addressl +// Model stores the traintupleKey leading to the model, its checksum and storage address type Model struct { Key string `json:"key"` TraintupleKey string `json:"traintuple_key"` - Hash string `json:"hash"` + Checksum string `json:"checksum"` StorageAddress string `json:"storage_address"` } @@ -295,15 +295,15 @@ type Dataset struct { type TtDataset struct { Key string `json:"key"` Worker string `json:"worker"` - DataSampleKeys []string `json:"keys"` - OpenerHash string `json:"opener_hash"` + DataSampleKeys []string `json:"data_sample_keys"` + OpenerChecksum string `json:"opener_checksum"` Perf float32 `json:"perf"` } // TtObjective stores info about a objective in a Traintuple type TtObjective struct { - Key string `json:"key"` - Metrics *HashDress `json:"metrics"` + Key string `json:"key"` + Metrics *ChecksumAddress `json:"metrics"` } // Node stores informations about node registered into the network, diff --git a/chaincode/ledger_db.go b/chaincode/ledger_db.go index 6390ba11..bf6a4146 100644 --- a/chaincode/ledger_db.go +++ b/chaincode/ledger_db.go @@ -200,7 +200,7 @@ func (db *LedgerDB) GetGenericTuple(key string) (GenericTuple, error) { if err != nil { return asset, err } - asset.Status, err = determineTupleStatus(db, asset.Status, asset.ComputePlanID) + asset.Status, err = determineTupleStatus(db, asset.Status, asset.ComputePlanKey) return asset, nil } @@ -323,7 +323,7 @@ func (db *LedgerDB) GetTraintuple(key string) (Traintuple, error) { if traintuple.AssetType != TraintupleType { return traintuple, errors.NotFound("traintuple %s not found", key) } - traintuple.Status, err = determineTupleStatus(db, traintuple.Status, traintuple.ComputePlanID) + traintuple.Status, err = determineTupleStatus(db, traintuple.Status, traintuple.ComputePlanKey) return traintuple, err } @@ -337,7 +337,7 @@ func (db *LedgerDB) GetCompositeTraintuple(key string) (CompositeTraintuple, err if traintuple.AssetType != CompositeTraintupleType { return traintuple, errors.NotFound("composite traintuple %s not found", key) } - traintuple.Status, err = determineTupleStatus(db, traintuple.Status, traintuple.ComputePlanID) + traintuple.Status, err = determineTupleStatus(db, traintuple.Status, traintuple.ComputePlanKey) return traintuple, err } @@ -351,18 +351,18 @@ func (db *LedgerDB) GetAggregatetuple(key string) (Aggregatetuple, error) { if aggregatetuple.AssetType != AggregatetupleType { return aggregatetuple, errors.NotFound("aggregatetuple %s not found", key) } - aggregatetuple.Status, err = determineTupleStatus(db, aggregatetuple.Status, aggregatetuple.ComputePlanID) + aggregatetuple.Status, err = determineTupleStatus(db, aggregatetuple.Status, aggregatetuple.ComputePlanKey) return aggregatetuple, err } // GetComputePlan fetches a ComputePlan from the ledger using its unique ID -func (db *LedgerDB) GetComputePlan(ID string) (ComputePlan, error) { +func (db *LedgerDB) GetComputePlan(key string) (ComputePlan, error) { computePlan := ComputePlan{} - if err := db.Get(ID, &computePlan); err != nil { + if err := db.Get(key, &computePlan); err != nil { return computePlan, err } if computePlan.AssetType != ComputePlanType { - return computePlan, errors.NotFound("compute plan %s not found", ID) + return computePlan, errors.NotFound("compute plan %s not found", key) } if err := db.Get(computePlan.StateKey, &(computePlan.State)); err != nil { return computePlan, err @@ -370,10 +370,10 @@ func (db *LedgerDB) GetComputePlan(ID string) (ComputePlan, error) { return computePlan, nil } -// GetOutModelKeyHashDress retrieves an out-Model from a tuple key. +// GetOutModelKeyChecksumAddress retrieves an out-Model from a tuple key. // In case of CompositeTraintuple it return its trunk model // Return an error if the tupleKey was not found. -func (db *LedgerDB) GetOutModelKeyHashDress(tupleKey string, allowedAssetTypes []AssetType) (*KeyHashDress, error) { +func (db *LedgerDB) GetOutModelKeyChecksumAddress(tupleKey string, allowedAssetTypes []AssetType) (*KeyChecksumAddress, error) { for _, assetType := range allowedAssetTypes { switch assetType { case CompositeTraintupleType: @@ -393,19 +393,19 @@ func (db *LedgerDB) GetOutModelKeyHashDress(tupleKey string, allowedAssetTypes [ return tuple.OutModel, nil } default: - return nil, errors.Internal("GetOutModelKeyHashDress: Unsupported asset type %s", assetType) + return nil, errors.Internal("GetOutModelKeyChecksumAddress: Unsupported asset type %s", assetType) } } return nil, errors.NotFound( - "GetOutModelKeyHashDress: Could not find tuple with key \"%s\". Allowed types: %v.", + "GetOutModelKeyChecksumAddress: Could not find tuple with key \"%s\". Allowed types: %v.", tupleKey, allowedAssetTypes) } -// GetOutHeadModelKeyHash retrieves an out-Head-Model from a composite traintuple key. +// GetOutHeadModelKeyChecksum retrieves an out-Head-Model from a composite traintuple key. // Return an error if the compositeTraintupleKey was not found. -func (db *LedgerDB) GetOutHeadModelKeyHash(compositeTraintupleKey string) (*KeyHash, error) { +func (db *LedgerDB) GetOutHeadModelKeyChecksum(compositeTraintupleKey string) (*KeyChecksum, error) { tuple, err := db.GetCompositeTraintuple(compositeTraintupleKey) if err != nil { return nil, err @@ -423,7 +423,7 @@ func (db *LedgerDB) GetTesttuple(key string) (Testtuple, error) { if testtuple.AssetType != TesttupleType { return testtuple, errors.NotFound("testtuple %s not found", key) } - testtuple.Status, err = determineTupleStatus(db, testtuple.Status, testtuple.ComputePlanID) + testtuple.Status, err = determineTupleStatus(db, testtuple.Status, testtuple.ComputePlanKey) return testtuple, err } @@ -513,15 +513,15 @@ func (db *LedgerDB) AddTupleEvent(tupleKey string) error { } // AddComputePlanEvent add the compute plan matching the ID to the event struct -func (db *LedgerDB) AddComputePlanEvent(ComputePlanID, status string, ModelsToDelete []string) error { +func (db *LedgerDB) AddComputePlanEvent(ComputePlanKey, status string, ModelsToDelete []string) error { if db.event == nil { db.event = &Event{} } cp := eventComputePlan{ - ComputePlanID: ComputePlanID, - Status: status, + ComputePlanKey: ComputePlanKey, + Status: status, } - algokeys, err := db.GetIndexKeys("algo~computeplanid~key", []string{"algo", ComputePlanID}) + algokeys, err := db.GetIndexKeys("algo~computeplankey~key", []string{"algo", ComputePlanKey}) if err != nil { return err } diff --git a/chaincode/ledger_db_test.go b/chaincode/ledger_db_test.go index b66fc61a..e1d6d67e 100644 --- a/chaincode/ledger_db_test.go +++ b/chaincode/ledger_db_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestGetOutModelKeyHashDress(t *testing.T) { +func TestGetOutModelKeyChecksumAddress(t *testing.T) { scc := new(SubstraChaincode) mockStub := NewMockStubWithRegisterNode("substra", scc) db := NewLedgerDB(mockStub) @@ -33,20 +33,20 @@ func TestGetOutModelKeyHashDress(t *testing.T) { // 1. Correct requests - _, err = db.GetOutModelKeyHashDress(regular, []AssetType{TraintupleType}) + _, err = db.GetOutModelKeyChecksumAddress(regular, []AssetType{TraintupleType}) assert.NoError(t, err, "the regular traintuple should be found when requesting regular traintuples") - _, err = db.GetOutHeadModelKeyHash(composite) + _, err = db.GetOutHeadModelKeyChecksum(composite) assert.NoError(t, err, "the composite traintuple should be found when requesting composite traintuples") - _, err = db.GetOutModelKeyHashDress(composite, []AssetType{CompositeTraintupleType}) + _, err = db.GetOutModelKeyChecksumAddress(composite, []AssetType{CompositeTraintupleType}) assert.NoError(t, err, "the composite traintuple should be found when requesting composite traintuples") // 2. Incorrect requests - _, err = db.GetOutModelKeyHashDress(regular, []AssetType{CompositeTraintupleType}) + _, err = db.GetOutModelKeyChecksumAddress(regular, []AssetType{CompositeTraintupleType}) assert.Error(t, err, "the regular traintuple should not be found when requesting composite traintuples only") - _, err = db.GetOutModelKeyHashDress(composite, []AssetType{TraintupleType}) + _, err = db.GetOutModelKeyChecksumAddress(composite, []AssetType{TraintupleType}) assert.Error(t, err, "the composite traintuple should be found when requesting regular traintuples only") } diff --git a/chaincode/main_test.go b/chaincode/main_test.go index e2aa8e99..705b56a6 100644 --- a/chaincode/main_test.go +++ b/chaincode/main_test.go @@ -29,35 +29,35 @@ import ( ) const objectiveKey = "5c1d9cd1-c2c1-082d-de09-21b56d11030c" -const objectiveDescriptionHash = "5c1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379" -const objectiveMetricsHash = "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379" +const objectiveDescriptionChecksum = "5c1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379" +const objectiveMetricsChecksum = "4a1d9cd1c2c1082dde0921b56d11030c81f62fbb51932758b58ac2569dd0b379" const objectiveMetricsStorageAddress = "https://toto/objective/222/metrics" const dataManagerKey = "da1bb7c3-1f62-244c-0f3a-761cc1688042" -const dataManagerOpenerHash = "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc" +const dataManagerOpenerChecksum = "da1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc" const trainDataSampleKey1 = "aa1bb7c3-1f62-244c-0f3a-761cc1688042" const trainDataSampleKey2 = "aa2bb7c3-1f62-244c-0f3a-761cc1688042" const testDataSampleKey1 = "bb1bb7c3-1f62-244c-0f3a-761cc1688042" const testDataSampleKey2 = "bb2bb7c3-1f62-244c-0f3a-761cc1688042" const algoKey = "fd1bb7c3-1f62-244c-0f3a-761cc1688042" const algoKey2 = "cccbb7c3-1f62-244c-0f3a-761cc1688042" -const algoHash = "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc" +const algoChecksum = "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcc" const algoStorageAddress = "https://toto/algo/222/algo" const algoName = "hog + svm" const compositeAlgoKey = "cccbb7c3-1f62-244c-0f3a-761cc1688042" -const compositeAlgoHash = "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcd" +const compositeAlgoChecksum = "fd1bb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482dcd" const compositeAlgoStorageAddress = "https://toto/compositeAlgo/222/algo" const compositeAlgoName = "hog + svm composite" const aggregateAlgoKey = "dddbb7c3-1f62-244c-0f3a-761cc1688042" -const aggregateAlgoHash = "dddbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482ddd" +const aggregateAlgoChecksum = "dddbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482ddd" const aggregateAlgoStorageAddress = "https://toto/aggregateAlgo/222/algo" const aggregateAlgoName = "hog + svm aggregate" const modelKey = "eedbb7c3-1f62-244c-0f3a-761cc1688042" -const modelHash = "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed" +const modelChecksum = "eedbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482eed" const modelAddress = "https://substrabac/model/toto" const headModelKey = modelKey -const headModelHash = modelHash +const headModelChecksum = modelChecksum const trunkModelKey = "ccdbb7c3-1f62-244c-0f3a-761cc1688042" -const trunkModelHash = "ccdbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482ecc" +const trunkModelChecksum = "ccdbb7c31f62244c0f3a761cc168804227115793d01c270021fe3f7935482ecc" const trunkModelAddress = "https://substrabac/model/titi" const worker = "SampleOrg" const traintupleKey = "b0289ab8-3a71-f01e-2b72-0259a6452244" @@ -68,8 +68,8 @@ const testtupleKey = "dadada11-50f6-26d3-fa86-1bf6387e3896" const testtupleKey2 = "bbbada11-50f6-26d3-fa86-1bf6387e3896" const testtupleKey3 = "cccada11-50f6-26d3-fa86-1bf6387e3896" const tag = "a tag is simply a string" -const computePlanID = "00000000-50f6-26d3-fa86-1bf6387e3896" -const computePlanID2 = "11111111-50f6-26d3-fa86-1bf6387e3896" +const computePlanKey = "00000000-50f6-26d3-fa86-1bf6387e3896" +const computePlanKey2 = "11111111-50f6-26d3-fa86-1bf6387e3896" const computePlanTraintupleKey1 = "11000000-50f6-26d3-fa86-1bf6387e3896" const computePlanTraintupleKey2 = "22000000-50f6-26d3-fa86-1bf6387e3896" const computePlanTraintupleKey3 = "33000000-50f6-26d3-fa86-1bf6387e3896" diff --git a/chaincode/objective.go b/chaincode/objective.go index 043d4bfc..31758ad7 100644 --- a/chaincode/objective.go +++ b/chaincode/objective.go @@ -44,13 +44,13 @@ func (objective *Objective) Set(db *LedgerDB, inp inputObjective) (dataManagerKe objective.Key = inp.Key objective.AssetType = ObjectiveType objective.Name = inp.Name - objective.Description = &HashDress{ - Hash: inp.DescriptionHash, + objective.Description = &ChecksumAddress{ + Checksum: inp.DescriptionChecksum, StorageAddress: inp.DescriptionStorageAddress, } - objective.Metrics = &HashDressName{ + objective.Metrics = &ChecksumAddressName{ Name: inp.MetricsName, - Hash: inp.MetricsHash, + Checksum: inp.MetricsChecksum, StorageAddress: inp.MetricsStorageAddress, } objective.Metadata = inp.Metadata diff --git a/chaincode/objective_test.go b/chaincode/objective_test.go index afa2faee..bc091d16 100644 --- a/chaincode/objective_test.go +++ b/chaincode/objective_test.go @@ -61,6 +61,10 @@ func TestLeaderBoard(t *testing.T) { require.Len(t, leaderboard.Testtuples, 1) assert.Equal(t, keyMap.Key, leaderboard.Testtuples[0].Key) assert.Equal(t, traintupleKey, leaderboard.Testtuples[0].TraintupleKey) + assert.Equal(t, algoKey, leaderboard.Testtuples[0].Algo.Key) + assert.Equal(t, algoChecksum, leaderboard.Testtuples[0].Algo.Checksum) + assert.Equal(t, algoName, leaderboard.Testtuples[0].Algo.Name) + assert.Equal(t, algoStorageAddress, leaderboard.Testtuples[0].Algo.StorageAddress) } func TestRegisterObjectiveWhitoutDataset(t *testing.T) { scc := new(SubstraChaincode) @@ -112,11 +116,11 @@ func TestObjective(t *testing.T) { // Add objective with invalid field inpObjective := inputObjective{ - DescriptionHash: "aaa", + DescriptionChecksum: "aaa", } args := inpObjective.createDefault() resp := mockStub.MockInvoke("42", args) - assert.EqualValuesf(t, 400, resp.Status, "when adding objective with invalid hash, status %d and message %s", resp.Status, resp.Message) + assert.EqualValuesf(t, 400, resp.Status, "when adding objective with invalid checksum, status %d and message %s", resp.Status, resp.Message) // Add objective with unexisting test dataSample inpObjective = inputObjective{} @@ -138,7 +142,7 @@ func TestObjective(t *testing.T) { objectiveKey, "when adding objective: unexpected returned objective key - %s / %s", objectiveKey, - inpObjective.DescriptionHash) + inpObjective.DescriptionChecksum) // Query objective from key and check the consistency of returned arguments args = [][]byte{[]byte("queryObjective"), keyToJSON(objectiveKey)} @@ -156,15 +160,15 @@ func TestObjective(t *testing.T) { Metadata: map[string]string{}, }, Name: inpObjective.Name, - Description: &HashDress{ + Description: &ChecksumAddress{ StorageAddress: inpObjective.DescriptionStorageAddress, - Hash: objectiveDescriptionHash, + Checksum: objectiveDescriptionChecksum, }, Permissions: outputPermissions{ Process: Permission{Public: true, AuthorizedIDs: []string{}}, }, - Metrics: &HashDressName{ - Hash: inpObjective.MetricsHash, + Metrics: &ChecksumAddressName{ + Checksum: inpObjective.MetricsChecksum, Name: inpObjective.MetricsName, StorageAddress: inpObjective.MetricsStorageAddress, }, diff --git a/chaincode/output.go b/chaincode/output.go index 47c8660f..3665435c 100644 --- a/chaincode/output.go +++ b/chaincode/output.go @@ -25,14 +25,14 @@ const OutputAssetPaginationHardLimit = 500 // Struct use as output representation of ledger data type outputObjective struct { - Key string `json:"key"` - Name string `json:"name"` - Description *HashDress `json:"description"` - Metrics *HashDressName `json:"metrics"` - Owner string `json:"owner"` - TestDataset *Dataset `json:"test_dataset"` - Permissions outputPermissions `json:"permissions"` - Metadata map[string]string `json:"metadata"` + Key string `json:"key"` + Name string `json:"name"` + Description *ChecksumAddress `json:"description"` + Metrics *ChecksumAddressName `json:"metrics"` + Owner string `json:"owner"` + TestDataset *Dataset `json:"test_dataset"` + Permissions outputPermissions `json:"permissions"` + Metadata map[string]string `json:"metadata"` } func (out *outputObjective) Fill(in Objective) { @@ -52,11 +52,11 @@ func (out *outputObjective) Fill(in Objective) { // outputDataManager is the return representation of the DataManager type stored in the ledger type outputDataManager struct { ObjectiveKey string `json:"objective_key"` - Description *HashDress `json:"description"` + Description *ChecksumAddress `json:"description"` Key string `json:"key"` Metadata map[string]string `json:"metadata"` Name string `json:"name"` - Opener *HashDress `json:"opener"` + Opener *ChecksumAddress `json:"opener"` Owner string `json:"owner"` Permissions outputPermissions `json:"permissions"` Type string `json:"type"` @@ -103,8 +103,8 @@ func (out *outputDataset) Fill(in DataManager, trainKeys []string, testKeys []st type outputAlgo struct { Key string `json:"key"` Name string `json:"name"` - Content *HashDress `json:"content"` - Description *HashDress `json:"description"` + Content *ChecksumAddress `json:"content"` + Description *ChecksumAddress `json:"description"` Owner string `json:"owner"` Permissions outputPermissions `json:"permissions"` Metadata map[string]string `json:"metadata"` @@ -113,8 +113,8 @@ type outputAlgo struct { func (out *outputAlgo) Fill(in Algo) { out.Key = in.Key out.Name = in.Name - out.Content = &HashDress{ - Hash: in.Hash, + out.Content = &ChecksumAddress{ + Checksum: in.Checksum, StorageAddress: in.StorageAddress, } out.Description = in.Description @@ -127,27 +127,27 @@ func (out *outputAlgo) Fill(in Algo) { type outputTtDataset struct { Key string `json:"key"` Worker string `json:"worker"` - DataSampleKeys []string `json:"keys"` - OpenerHash string `json:"opener_hash"` + DataSampleKeys []string `json:"data_sample_keys"` + OpenerChecksum string `json:"opener_checksum"` Metadata map[string]string `json:"metadata"` } // outputTraintuple is the representation of one the element type stored in the // ledger. It describes a training task occuring on the platform type outputTraintuple struct { - Key string `json:"key"` - Algo *KeyHashDressName `json:"algo"` - Creator string `json:"creator"` - Dataset *outputTtDataset `json:"dataset"` - ComputePlanID string `json:"compute_plan_id"` - InModels []*Model `json:"in_models"` - Log string `json:"log"` - Metadata map[string]string `json:"metadata"` - OutModel *KeyHashDress `json:"out_model"` - Permissions outputPermissions `json:"permissions"` - Rank int `json:"rank"` - Status string `json:"status"` - Tag string `json:"tag"` + Key string `json:"key"` + Algo *KeyChecksumAddressName `json:"algo"` + Creator string `json:"creator"` + Dataset *outputTtDataset `json:"dataset"` + ComputePlanKey string `json:"compute_plan_key"` + InModels []*Model `json:"in_models"` + Log string `json:"log"` + Metadata map[string]string `json:"metadata"` + OutModel *KeyChecksumAddress `json:"out_model"` + Permissions outputPermissions `json:"permissions"` + Rank int `json:"rank"` + Status string `json:"status"` + Tag string `json:"tag"` } //Fill is a method of the receiver outputTraintuple. It returns all elements necessary to do a training task from a trainuple stored in the ledger @@ -160,7 +160,7 @@ func (outputTraintuple *outputTraintuple) Fill(db *LedgerDB, traintuple Traintup outputTraintuple.Metadata = initMapOutput(traintuple.Metadata) outputTraintuple.Status = traintuple.Status outputTraintuple.Rank = traintuple.Rank - outputTraintuple.ComputePlanID = traintuple.ComputePlanID + outputTraintuple.ComputePlanKey = traintuple.ComputePlanKey outputTraintuple.OutModel = traintuple.OutModel outputTraintuple.Tag = traintuple.Tag // fill algo @@ -169,10 +169,10 @@ func (outputTraintuple *outputTraintuple) Fill(db *LedgerDB, traintuple Traintup err = errors.Internal("could not retrieve algo with key %s - %s", traintuple.AlgoKey, err.Error()) return } - outputTraintuple.Algo = &KeyHashDressName{ + outputTraintuple.Algo = &KeyChecksumAddressName{ Key: algo.Key, Name: algo.Name, - Hash: algo.Hash, + Checksum: algo.Checksum, StorageAddress: algo.StorageAddress} // fill inModels @@ -189,7 +189,7 @@ func (outputTraintuple *outputTraintuple) Fill(db *LedgerDB, traintuple Traintup } if parentTraintuple.OutModel != nil { inModel.Key = parentTraintuple.Key - inModel.Hash = parentTraintuple.OutModel.Hash + inModel.Checksum = parentTraintuple.OutModel.Checksum inModel.StorageAddress = parentTraintuple.OutModel.StorageAddress } outputTraintuple.InModels = append(outputTraintuple.InModels, inModel) @@ -206,7 +206,7 @@ func (outputTraintuple *outputTraintuple) Fill(db *LedgerDB, traintuple Traintup Key: dataManager.Key, Worker: traintuple.Dataset.Worker, DataSampleKeys: traintuple.Dataset.DataSampleKeys, - OpenerHash: dataManager.Opener.Hash, + OpenerChecksum: dataManager.Opener.Checksum, Metadata: initMapOutput(traintuple.Dataset.Metadata), } @@ -214,26 +214,26 @@ func (outputTraintuple *outputTraintuple) Fill(db *LedgerDB, traintuple Traintup } type outputTesttuple struct { - Algo *KeyHashDressName `json:"algo"` - Certified bool `json:"certified"` - ComputePlanID string `json:"compute_plan_id"` - Creator string `json:"creator"` - Dataset *TtDataset `json:"dataset"` - Key string `json:"key"` - Log string `json:"log"` - Metadata map[string]string `json:"metadata"` - Objective *TtObjective `json:"objective"` - Rank int `json:"rank"` - Status string `json:"status"` - Tag string `json:"tag"` - TraintupleKey string `json:"traintuple_key"` - TraintupleType string `json:"traintuple_type"` + Algo *KeyChecksumAddressName `json:"algo"` + Certified bool `json:"certified"` + ComputePlanKey string `json:"compute_plan_key"` + Creator string `json:"creator"` + Dataset *TtDataset `json:"dataset"` + Key string `json:"key"` + Log string `json:"log"` + Metadata map[string]string `json:"metadata"` + Objective *TtObjective `json:"objective"` + Rank int `json:"rank"` + Status string `json:"status"` + Tag string `json:"tag"` + TraintupleKey string `json:"traintuple_key"` + TraintupleType string `json:"traintuple_type"` } func (out *outputTesttuple) Fill(db *LedgerDB, in Testtuple) error { out.Key = in.Key out.Certified = in.Certified - out.ComputePlanID = in.ComputePlanID + out.ComputePlanKey = in.ComputePlanKey out.Creator = in.Creator out.Dataset = in.Dataset out.Log = in.Log @@ -271,10 +271,10 @@ func (out *outputTesttuple) Fill(db *LedgerDB, in Testtuple) error { } algo = aggregateAlgo.Algo } - out.Algo = &KeyHashDressName{ + out.Algo = &KeyChecksumAddressName{ Key: algo.Key, Name: algo.Name, - Hash: algo.Hash, + Checksum: algo.Checksum, StorageAddress: algo.StorageAddress} // fill objective @@ -285,8 +285,8 @@ func (out *outputTesttuple) Fill(db *LedgerDB, in Testtuple) error { if objective.Metrics == nil { return errors.Internal("objective %s is missing metrics values", in.ObjectiveKey) } - metrics := HashDress{ - Hash: objective.Metrics.Hash, + metrics := ChecksumAddress{ + Checksum: objective.Metrics.Checksum, StorageAddress: objective.Metrics.StorageAddress, } out.Objective = &TtObjective{ @@ -321,13 +321,13 @@ type Event struct { type eventComputePlan struct { AlgoKeys []string `json:"algo_keys"` - ComputePlanID string `json:"compute_plan_id"` + ComputePlanKey string `json:"compute_plan_key"` ModelsToDelete []string `json:"models_to_delete"` Status string `json:"status"` } type outputComputePlan struct { - ComputePlanID string `json:"compute_plan_id"` + Key string `json:"key"` TraintupleKeys []string `json:"traintuple_keys"` AggregatetupleKeys []string `json:"aggregatetuple_keys"` CompositeTraintupleKeys []string `json:"composite_traintuple_keys"` @@ -342,7 +342,7 @@ type outputComputePlan struct { } func (out *outputComputePlan) Fill(key string, in ComputePlan, newIDs []string) { - out.ComputePlanID = key + out.Key = key nb := getLimitedNbSliceElements(in.TraintupleKeys) out.TraintupleKeys = in.TraintupleKeys[:nb] nb = getLimitedNbSliceElements(in.AggregatetupleKeys) @@ -395,12 +395,12 @@ func (out outputBoardTuples) Less(i, j int) bool { } type outputBoardTuple struct { - Algo *HashDressName `json:"algo"` - Creator string `json:"creator"` - Key string `json:"key"` - TraintupleKey string `json:"traintuple_key"` - Perf float32 `json:"perf"` - Tag string `json:"tag"` + Algo *KeyChecksumAddressName `json:"algo"` + Creator string `json:"creator"` + Key string `json:"key"` + TraintupleKey string `json:"traintuple_key"` + Perf float32 `json:"perf"` + Tag string `json:"tag"` } func (out *outputBoardTuple) Fill(db *LedgerDB, in Testtuple, testtupleKey string) error { @@ -410,9 +410,10 @@ func (out *outputBoardTuple) Fill(db *LedgerDB, in Testtuple, testtupleKey strin if err != nil { return err } - out.Algo = &HashDressName{ + out.Algo = &KeyChecksumAddressName{ + Key: algo.Key, Name: algo.Name, - Hash: algo.Hash, + Checksum: algo.Checksum, StorageAddress: algo.StorageAddress, } out.TraintupleKey = in.TraintupleKey diff --git a/chaincode/output_aggregate.go b/chaincode/output_aggregate.go index da3a300a..88aaa9bb 100644 --- a/chaincode/output_aggregate.go +++ b/chaincode/output_aggregate.go @@ -17,19 +17,19 @@ package main import "chaincode/errors" type outputAggregatetuple struct { - Key string `json:"key"` - Algo *KeyHashDressName `json:"algo"` - Creator string `json:"creator"` - ComputePlanID string `json:"compute_plan_id"` - Log string `json:"log"` - Metadata map[string]string `json:"metadata"` - InModels []*Model `json:"in_models"` - OutModel *KeyHashDress `json:"out_model"` - Rank int `json:"rank"` - Status string `json:"status"` - Tag string `json:"tag"` - Permissions outputPermissions `json:"permissions"` - Worker string `json:"worker"` + Key string `json:"key"` + Algo *KeyChecksumAddressName `json:"algo"` + Creator string `json:"creator"` + ComputePlanKey string `json:"compute_plan_key"` + Log string `json:"log"` + Metadata map[string]string `json:"metadata"` + InModels []*Model `json:"in_models"` + OutModel *KeyChecksumAddress `json:"out_model"` + Rank int `json:"rank"` + Status string `json:"status"` + Tag string `json:"tag"` + Permissions outputPermissions `json:"permissions"` + Worker string `json:"worker"` } type outputAggregateAlgo struct { @@ -48,7 +48,7 @@ func (outputAggregatetuple *outputAggregatetuple) Fill(db *LedgerDB, traintuple outputAggregatetuple.Metadata = initMapOutput(traintuple.Metadata) outputAggregatetuple.Status = traintuple.Status outputAggregatetuple.Rank = traintuple.Rank - outputAggregatetuple.ComputePlanID = traintuple.ComputePlanID + outputAggregatetuple.ComputePlanKey = traintuple.ComputePlanKey outputAggregatetuple.OutModel = traintuple.OutModel outputAggregatetuple.Tag = traintuple.Tag algo, err := db.GetAggregateAlgo(traintuple.AlgoKey) @@ -56,10 +56,10 @@ func (outputAggregatetuple *outputAggregatetuple) Fill(db *LedgerDB, traintuple err = errors.Internal("could not retrieve aggregate algo with key %s - %s", traintuple.AlgoKey, err.Error()) return } - outputAggregatetuple.Algo = &KeyHashDressName{ + outputAggregatetuple.Algo = &KeyChecksumAddressName{ Key: algo.Key, Name: algo.Name, - Hash: algo.Hash, + Checksum: algo.Checksum, StorageAddress: algo.StorageAddress} // fill inModels @@ -67,7 +67,7 @@ func (outputAggregatetuple *outputAggregatetuple) Fill(db *LedgerDB, traintuple if inModelKey == "" { break } - keyHashDress, _err := db.GetOutModelKeyHashDress(inModelKey, []AssetType{TraintupleType, CompositeTraintupleType, AggregatetupleType}) + keyChecksumAddress, _err := db.GetOutModelKeyChecksumAddress(inModelKey, []AssetType{TraintupleType, CompositeTraintupleType, AggregatetupleType}) if _err != nil { err = errors.Internal("could not fill in-model with key \"%s\": %s", inModelKey, _err.Error()) return @@ -75,10 +75,10 @@ func (outputAggregatetuple *outputAggregatetuple) Fill(db *LedgerDB, traintuple inModel := &Model{ TraintupleKey: inModelKey, } - if keyHashDress != nil { - inModel.Key = keyHashDress.Key - inModel.Hash = keyHashDress.Hash - inModel.StorageAddress = keyHashDress.StorageAddress + if keyChecksumAddress != nil { + inModel.Key = keyChecksumAddress.Key + inModel.Checksum = keyChecksumAddress.Checksum + inModel.StorageAddress = keyChecksumAddress.StorageAddress } outputAggregatetuple.InModels = append(outputAggregatetuple.InModels, inModel) } diff --git a/chaincode/output_composite.go b/chaincode/output_composite.go index f3718a53..250c5aba 100644 --- a/chaincode/output_composite.go +++ b/chaincode/output_composite.go @@ -21,30 +21,30 @@ type outputCompositeAlgo struct { } type outputCompositeTraintuple struct { - Key string `json:"key"` - Algo *KeyHashDressName `json:"algo"` - Creator string `json:"creator"` - Dataset *outputTtDataset `json:"dataset"` - ComputePlanID string `json:"compute_plan_id"` - InHeadModel *Model `json:"in_head_model"` - InTrunkModel *Model `json:"in_trunk_model"` - Log string `json:"log"` - Metadata map[string]string `json:"metadata"` - OutHeadModel outHeadModelComposite `json:"out_head_model"` - OutTrunkModel outModelComposite `json:"out_trunk_model"` - Rank int `json:"rank"` - Status string `json:"status"` - Tag string `json:"tag"` + Key string `json:"key"` + Algo *KeyChecksumAddressName `json:"algo"` + Creator string `json:"creator"` + Dataset *outputTtDataset `json:"dataset"` + ComputePlanKey string `json:"compute_plan_key"` + InHeadModel *Model `json:"in_head_model"` + InTrunkModel *Model `json:"in_trunk_model"` + Log string `json:"log"` + Metadata map[string]string `json:"metadata"` + OutHeadModel outHeadModelComposite `json:"out_head_model"` + OutTrunkModel outModelComposite `json:"out_trunk_model"` + Rank int `json:"rank"` + Status string `json:"status"` + Tag string `json:"tag"` } type outHeadModelComposite struct { - OutModel *KeyHash `json:"out_model"` + OutModel *KeyChecksum `json:"out_model"` Permissions outputPermissions `json:"permissions"` } type outModelComposite struct { - OutModel *KeyHashDress `json:"out_model"` - Permissions outputPermissions `json:"permissions"` + OutModel *KeyChecksumAddress `json:"out_model"` + Permissions outputPermissions `json:"permissions"` } //Fill is a method of the receiver outputCompositeTraintuple. It returns all elements necessary to do a training task from a trainuple stored in the ledger @@ -56,7 +56,7 @@ func (outputCompositeTraintuple *outputCompositeTraintuple) Fill(db *LedgerDB, t outputCompositeTraintuple.Metadata = initMapOutput(traintuple.Metadata) outputCompositeTraintuple.Status = traintuple.Status outputCompositeTraintuple.Rank = traintuple.Rank - outputCompositeTraintuple.ComputePlanID = traintuple.ComputePlanID + outputCompositeTraintuple.ComputePlanKey = traintuple.ComputePlanKey outputCompositeTraintuple.OutHeadModel = outHeadModelComposite{ OutModel: traintuple.OutHeadModel.OutModel, Permissions: getOutPermissions(traintuple.OutHeadModel.Permissions)} @@ -70,16 +70,16 @@ func (outputCompositeTraintuple *outputCompositeTraintuple) Fill(db *LedgerDB, t err = errors.Internal("could not retrieve composite algo with key %s - %s", traintuple.AlgoKey, err.Error()) return } - outputCompositeTraintuple.Algo = &KeyHashDressName{ + outputCompositeTraintuple.Algo = &KeyChecksumAddressName{ Key: algo.Key, Name: algo.Name, - Hash: algo.Hash, + Checksum: algo.Checksum, StorageAddress: algo.StorageAddress} // fill in-model (head) if traintuple.InHeadModel != "" { // Head can only be a composite traintuple's head out model - outHeadModel, _err := db.GetOutHeadModelKeyHash(traintuple.InHeadModel) + outHeadModel, _err := db.GetOutHeadModelKeyChecksum(traintuple.InHeadModel) if _err != nil { err = errors.Internal("could not fill (head) in-model with key \"%s\": %s", traintuple.InHeadModel, _err.Error()) return @@ -89,7 +89,7 @@ func (outputCompositeTraintuple *outputCompositeTraintuple) Fill(db *LedgerDB, t if outHeadModel != nil { outputCompositeTraintuple.InHeadModel.Key = outHeadModel.Key - outputCompositeTraintuple.InHeadModel.Hash = outHeadModel.Hash + outputCompositeTraintuple.InHeadModel.Checksum = outHeadModel.Checksum } } @@ -99,7 +99,7 @@ func (outputCompositeTraintuple *outputCompositeTraintuple) Fill(db *LedgerDB, t // - a traintuple's out model // - a composite traintuple's head out model // - an aggregate tuple's out model - outModel, _err := db.GetOutModelKeyHashDress(traintuple.InTrunkModel, []AssetType{TraintupleType, CompositeTraintupleType, AggregatetupleType}) + outModel, _err := db.GetOutModelKeyChecksumAddress(traintuple.InTrunkModel, []AssetType{TraintupleType, CompositeTraintupleType, AggregatetupleType}) if _err != nil { err = errors.Internal("could not fill (trunk) in-model with key \"%s\": %s", traintuple.InTrunkModel, _err.Error()) return @@ -109,7 +109,7 @@ func (outputCompositeTraintuple *outputCompositeTraintuple) Fill(db *LedgerDB, t if outModel != nil { outputCompositeTraintuple.InTrunkModel.Key = outModel.Key - outputCompositeTraintuple.InTrunkModel.Hash = outModel.Hash + outputCompositeTraintuple.InTrunkModel.Checksum = outModel.Checksum outputCompositeTraintuple.InTrunkModel.StorageAddress = outModel.StorageAddress } } @@ -125,7 +125,7 @@ func (outputCompositeTraintuple *outputCompositeTraintuple) Fill(db *LedgerDB, t Key: dataManager.Key, Worker: traintuple.Dataset.Worker, DataSampleKeys: traintuple.Dataset.DataSampleKeys, - OpenerHash: dataManager.Opener.Hash, + OpenerChecksum: dataManager.Opener.Checksum, Metadata: initMapOutput(traintuple.Dataset.Metadata), } diff --git a/chaincode/output_test.go b/chaincode/output_test.go index 72b6298f..aef5ea30 100644 --- a/chaincode/output_test.go +++ b/chaincode/output_test.go @@ -43,5 +43,4 @@ func TestLeaderboardSort(t *testing.T) { for i, boardTuple := range boardTuples { assert.EqualValues(t, float32(unOrderedPerf[i]), boardTuple.Perf) } - } diff --git a/chaincode/testtuple.go b/chaincode/testtuple.go index 88e7e2cd..1d769adf 100644 --- a/chaincode/testtuple.go +++ b/chaincode/testtuple.go @@ -92,7 +92,7 @@ func (testtuple *Testtuple) SetFromInput(db *LedgerDB, inp inputTesttuple) error Key: dataManager.Key, Worker: dataManager.Owner, DataSampleKeys: dataSampleKeys, - OpenerHash: dataManager.Opener.Hash, + OpenerChecksum: dataManager.Opener.Checksum, } return nil } @@ -127,7 +127,7 @@ func (testtuple *Testtuple) SetFromTraintuple(db *LedgerDB, traintupleKey string tupleCreator = traintuple.Creator status = traintuple.Status testtuple.AlgoKey = traintuple.AlgoKey - testtuple.ComputePlanID = traintuple.ComputePlanID + testtuple.ComputePlanKey = traintuple.ComputePlanKey testtuple.Rank = traintuple.Rank case CompositeTraintupleType: compositeTraintuple, err := db.GetCompositeTraintuple(traintupleKey) @@ -138,7 +138,7 @@ func (testtuple *Testtuple) SetFromTraintuple(db *LedgerDB, traintupleKey string tupleCreator = compositeTraintuple.Creator status = compositeTraintuple.Status testtuple.AlgoKey = compositeTraintuple.AlgoKey - testtuple.ComputePlanID = compositeTraintuple.ComputePlanID + testtuple.ComputePlanKey = compositeTraintuple.ComputePlanKey testtuple.Rank = compositeTraintuple.Rank case AggregatetupleType: tuple, err := db.GetAggregatetuple(traintupleKey) @@ -149,7 +149,7 @@ func (testtuple *Testtuple) SetFromTraintuple(db *LedgerDB, traintupleKey string tupleCreator = tuple.Creator status = tuple.Status testtuple.AlgoKey = tuple.AlgoKey - testtuple.ComputePlanID = tuple.ComputePlanID + testtuple.ComputePlanKey = tuple.ComputePlanKey testtuple.Rank = tuple.Rank default: return errors.BadRequest("key %s is not a valid traintuple", traintupleKey) @@ -173,15 +173,15 @@ func (testtuple *Testtuple) SetFromTraintuple(db *LedgerDB, traintupleKey string // AddToComputePlan add the testtuple to the compute plan of it's model func (testtuple *Testtuple) AddToComputePlan(db *LedgerDB, testtupleKey string) error { - if testtuple.ComputePlanID == "" { + if testtuple.ComputePlanKey == "" { return nil } - computePlan, err := db.GetComputePlan(testtuple.ComputePlanID) + computePlan, err := db.GetComputePlan(testtuple.ComputePlanKey) if err != nil { return err } computePlan.AddTuple(TesttupleType, testtupleKey, testtuple.Status) - err = computePlan.Save(db, testtuple.ComputePlanID) + err = computePlan.Save(db, testtuple.ComputePlanKey) if err != nil { return err } @@ -449,7 +449,7 @@ func (testtuple *Testtuple) commitStatusUpdate(db *LedgerDB, testtupleKey string if err := db.UpdateIndex(indexName, oldAttributes, newAttributes); err != nil { return err } - if err := UpdateComputePlanState(db, testtuple.ComputePlanID, newStatus, testtupleKey); err != nil { + if err := UpdateComputePlanState(db, testtuple.ComputePlanKey, newStatus, testtupleKey); err != nil { return err } logger.Infof("testtuple %s status updated: %s (from=%s)", testtupleKey, newStatus, oldStatus) diff --git a/chaincode/testtuple_test.go b/chaincode/testtuple_test.go index f0493b5b..4c255dfb 100644 --- a/chaincode/testtuple_test.go +++ b/chaincode/testtuple_test.go @@ -118,28 +118,28 @@ func TestQueryTesttuple(t *testing.T) { traintupleKey string expectedTypeString string expectedAlgoName string - expectedAlgoHash string + expectedAlgoChecksum string expectedAlgoStorageAddress string }{ { traintupleKey: traintupleKey, expectedTypeString: "traintuple", expectedAlgoName: algoName, - expectedAlgoHash: algoHash, + expectedAlgoChecksum: algoChecksum, expectedAlgoStorageAddress: algoStorageAddress, }, { traintupleKey: compositeTraintupleKey, expectedTypeString: "composite_traintuple", expectedAlgoName: compositeAlgoName, - expectedAlgoHash: compositeAlgoHash, + expectedAlgoChecksum: compositeAlgoChecksum, expectedAlgoStorageAddress: compositeAlgoStorageAddress, }, { traintupleKey: aggregatetupleKey, expectedTypeString: "aggregatetuple", expectedAlgoName: aggregateAlgoName, - expectedAlgoHash: aggregateAlgoHash, + expectedAlgoChecksum: aggregateAlgoChecksum, expectedAlgoStorageAddress: aggregateAlgoStorageAddress, }, } @@ -175,11 +175,11 @@ func TestQueryTesttuple(t *testing.T) { assert.Equal(t, inpTesttuple.TraintupleKey, testtuple.TraintupleKey) assert.Equal(t, tt.expectedTypeString, testtuple.TraintupleType) assert.Equal(t, tt.expectedAlgoName, testtuple.Algo.Name) - assert.Equal(t, tt.expectedAlgoHash, testtuple.Algo.Hash) + assert.Equal(t, tt.expectedAlgoChecksum, testtuple.Algo.Checksum) assert.Equal(t, tt.expectedAlgoStorageAddress, testtuple.Algo.StorageAddress) assert.Equal(t, StatusWaiting, testtuple.Status) assert.Equal(t, objectiveKey, testtuple.Objective.Key) - assert.Equal(t, objectiveMetricsHash, testtuple.Objective.Metrics.Hash) + assert.Equal(t, objectiveMetricsChecksum, testtuple.Objective.Metrics.Checksum) assert.Equal(t, objectiveMetricsStorageAddress, testtuple.Objective.Metrics.StorageAddress) assert.Equal(t, "", testtuple.Log) assert.Equal(t, "", testtuple.Tag) @@ -187,7 +187,7 @@ func TestQueryTesttuple(t *testing.T) { assert.Equal(t, dataManagerKey, testtuple.Dataset.Key) assert.Equal(t, dataSampleKeys, testtuple.Dataset.DataSampleKeys) assert.Equal(t, dataManagerKey, testtuple.Dataset.Key) - assert.Equal(t, dataManagerOpenerHash, testtuple.Dataset.OpenerHash) + assert.Equal(t, dataManagerOpenerChecksum, testtuple.Dataset.OpenerChecksum) assert.False(t, testtuple.Certified) }) } diff --git a/chaincode/traintuple.go b/chaincode/traintuple.go index 97df809c..d70ee965 100644 --- a/chaincode/traintuple.go +++ b/chaincode/traintuple.go @@ -42,7 +42,7 @@ func (traintuple *Traintuple) SetFromInput(db *LedgerDB, inp inputTraintuple) er traintuple.Key = inp.Key traintuple.AssetType = TraintupleType traintuple.Creator = creator - traintuple.ComputePlanID = inp.ComputePlanID + traintuple.ComputePlanKey = inp.ComputePlanKey traintuple.Metadata = inp.Metadata traintuple.Tag = inp.Tag algo, err := db.GetAlgo(inp.AlgoKey) @@ -107,15 +107,15 @@ func (traintuple *Traintuple) SetFromParents(db *LedgerDB, inModels []string) er // AddToComputePlan set the traintuple's parameters that determines if it's part of on ComputePlan and how. // It uses the inputTraintuple values as follow: -// - If neither ComputePlanID nor rank is set it returns immediately -// - If rank is 0 and ComputePlanID empty, it's start a new one using this traintuple key -// - If rank and ComputePlanID are set, it checks if there are coherent with previous ones and set it. +// - If neither ComputePlanKey nor rank is set it returns immediately +// - If rank is 0 and ComputePlanKey empty, it's start a new one using this traintuple key +// - If rank and ComputePlanKey are set, it checks if there are coherent with previous ones and set it. // Use checkComputePlanAvailability to ensure the compute plan exists and no other tuple is registered with the same worker/rank func (traintuple *Traintuple) AddToComputePlan(db *LedgerDB, inp inputTraintuple, traintupleKey string, checkComputePlanAvailability bool) error { - // check ComputePlanID and Rank and set it when required + // check ComputePlanKey and Rank and set it when required var err error if inp.Rank == "" { - if inp.ComputePlanID != "" { + if inp.ComputePlanKey != "" { return errors.BadRequest("invalid inputs, a ComputePlan should have a rank") } return nil @@ -124,13 +124,13 @@ func (traintuple *Traintuple) AddToComputePlan(db *LedgerDB, inp inputTraintuple if err != nil { return err } - traintuple.ComputePlanID = inp.ComputePlanID - computePlan, err := db.GetComputePlan(inp.ComputePlanID) + traintuple.ComputePlanKey = inp.ComputePlanKey + computePlan, err := db.GetComputePlan(inp.ComputePlanKey) if err != nil { return err } computePlan.AddTuple(TraintupleType, traintupleKey, traintuple.Status) - err = computePlan.Save(db, traintuple.ComputePlanID) + err = computePlan.Save(db, traintuple.ComputePlanKey) if err != nil { return err } @@ -139,11 +139,11 @@ func (traintuple *Traintuple) AddToComputePlan(db *LedgerDB, inp inputTraintuple return nil } var ttKeys []string - ttKeys, err = db.GetIndexKeys("computePlan~computeplanid~worker~rank~key", []string{"computePlan", inp.ComputePlanID, traintuple.Dataset.Worker, inp.Rank}) + ttKeys, err = db.GetIndexKeys("computePlan~computeplankey~worker~rank~key", []string{"computePlan", inp.ComputePlanKey, traintuple.Dataset.Worker, inp.Rank}) if err != nil { return err } else if len(ttKeys) > 0 { - err = errors.BadRequest("ComputePlanID %s with worker %s rank %d already exists", inp.ComputePlanID, traintuple.Dataset.Worker, traintuple.Rank) + err = errors.BadRequest("ComputePlanKey %s with worker %s rank %d already exists", inp.ComputePlanKey, traintuple.Dataset.Worker, traintuple.Rank) return err } return nil @@ -170,11 +170,11 @@ func (traintuple *Traintuple) Save(db *LedgerDB, traintupleKey string) error { return err } } - if traintuple.ComputePlanID != "" { - if err := db.CreateIndex("computePlan~computeplanid~worker~rank~key", []string{"computePlan", traintuple.ComputePlanID, traintuple.Dataset.Worker, strconv.Itoa(traintuple.Rank), traintupleKey}); err != nil { + if traintuple.ComputePlanKey != "" { + if err := db.CreateIndex("computePlan~computeplankey~worker~rank~key", []string{"computePlan", traintuple.ComputePlanKey, traintuple.Dataset.Worker, strconv.Itoa(traintuple.Rank), traintupleKey}); err != nil { return err } - if err := db.CreateIndex("algo~computeplanid~key", []string{"algo", traintuple.ComputePlanID, traintuple.AlgoKey}); err != nil { + if err := db.CreateIndex("algo~computeplankey~key", []string{"algo", traintuple.ComputePlanKey, traintuple.AlgoKey}); err != nil { return err } } @@ -286,9 +286,9 @@ func logSuccessTrain(db *LedgerDB, args []string) (o outputTraintuple, err error return } - traintuple.OutModel = &KeyHashDress{ + traintuple.OutModel = &KeyChecksumAddress{ Key: inp.OutModel.Key, - Hash: inp.OutModel.Hash, + Checksum: inp.OutModel.Checksum, StorageAddress: inp.OutModel.StorageAddress} traintuple.Log += inp.Log @@ -296,7 +296,7 @@ func logSuccessTrain(db *LedgerDB, args []string) (o outputTraintuple, err error if err != nil { return } - err = TryAddIntermediaryModel(db, traintuple.ComputePlanID, traintupleKey, traintuple.OutModel.Key) + err = TryAddIntermediaryModel(db, traintuple.ComputePlanKey, traintupleKey, traintuple.OutModel.Key) if err != nil { return } @@ -352,7 +352,7 @@ func logFailTrain(db *LedgerDB, args []string) (o outputTraintuple, err error) { } // Do not propagate failure if we are in a compute plan - if traintuple.ComputePlanID != "" { + if traintuple.ComputePlanKey != "" { return } // update depending tuples @@ -596,7 +596,7 @@ func (traintuple *Traintuple) commitStatusUpdate(db *LedgerDB, traintupleKey str if err := db.UpdateIndex(indexName, oldAttributes, newAttributes); err != nil { return err } - if err := UpdateComputePlanState(db, traintuple.ComputePlanID, newStatus, traintupleKey); err != nil { + if err := UpdateComputePlanState(db, traintuple.ComputePlanKey, newStatus, traintupleKey); err != nil { return err } logger.Infof("traintuple %s status updated: %s (from=%s)", traintupleKey, newStatus, oldStatus) diff --git a/chaincode/traintuple_composite.go b/chaincode/traintuple_composite.go index fe5f1270..788cc0d0 100644 --- a/chaincode/traintuple_composite.go +++ b/chaincode/traintuple_composite.go @@ -40,7 +40,7 @@ func (traintuple *CompositeTraintuple) SetFromInput(db *LedgerDB, inp inputCompo traintuple.Key = inp.Key traintuple.AssetType = CompositeTraintupleType traintuple.Creator = creator - traintuple.ComputePlanID = inp.ComputePlanID + traintuple.ComputePlanKey = inp.ComputePlanKey traintuple.Metadata = inp.Metadata traintuple.Tag = inp.Tag algo, err := db.GetCompositeAlgo(inp.AlgoKey) @@ -148,15 +148,15 @@ func (traintuple *CompositeTraintuple) SetFromParents(db *LedgerDB, inp inputCom // AddToComputePlan set the traintuple's parameters that determines if it's part of on ComputePlan and how. // It uses the inputCompositeTraintuple values as follow: -// - If neither ComputePlanID nor rank is set it returns immediately -// - If rank is 0 and ComputePlanID empty, it's start a new one using this traintuple key -// - If rank and ComputePlanID are set, it checks if there are coherent with previous ones and set it. +// - If neither ComputePlanKey nor rank is set it returns immediately +// - If rank is 0 and ComputePlanKey empty, it's start a new one using this traintuple key +// - If rank and ComputePlanKey are set, it checks if there are coherent with previous ones and set it. // Use checkComputePlanAvailability to ensure the compute plan exists and no other tuple is registered with the same worker/rank func (traintuple *CompositeTraintuple) AddToComputePlan(db *LedgerDB, inp inputCompositeTraintuple, traintupleKey string, checkComputePlanAvailability bool) error { - // check ComputePlanID and Rank and set it when required + // check ComputePlanKey and Rank and set it when required var err error if inp.Rank == "" { - if inp.ComputePlanID != "" { + if inp.ComputePlanKey != "" { return errors.BadRequest("invalid inputs, a ComputePlan should have a rank") } return nil @@ -165,13 +165,13 @@ func (traintuple *CompositeTraintuple) AddToComputePlan(db *LedgerDB, inp inputC if err != nil { return err } - traintuple.ComputePlanID = inp.ComputePlanID - computePlan, err := db.GetComputePlan(inp.ComputePlanID) + traintuple.ComputePlanKey = inp.ComputePlanKey + computePlan, err := db.GetComputePlan(inp.ComputePlanKey) if err != nil { return err } computePlan.AddTuple(CompositeTraintupleType, traintupleKey, traintuple.Status) - err = computePlan.Save(db, traintuple.ComputePlanID) + err = computePlan.Save(db, traintuple.ComputePlanKey) if err != nil { return err } @@ -180,11 +180,11 @@ func (traintuple *CompositeTraintuple) AddToComputePlan(db *LedgerDB, inp inputC return nil } var ttKeys []string - ttKeys, err = db.GetIndexKeys("computePlan~computeplanid~worker~rank~key", []string{"computePlan", inp.ComputePlanID, traintuple.Dataset.Worker, inp.Rank}) + ttKeys, err = db.GetIndexKeys("computePlan~computeplankey~worker~rank~key", []string{"computePlan", inp.ComputePlanKey, traintuple.Dataset.Worker, inp.Rank}) if err != nil { return err } else if len(ttKeys) > 0 { - err = errors.BadRequest("ComputePlanID %s with worker %s rank %d already exists", inp.ComputePlanID, traintuple.Dataset.Worker, traintuple.Rank) + err = errors.BadRequest("ComputePlanKey %s with worker %s rank %d already exists", inp.ComputePlanKey, traintuple.Dataset.Worker, traintuple.Rank) return err } return nil @@ -214,11 +214,11 @@ func (traintuple *CompositeTraintuple) Save(db *LedgerDB, traintupleKey string) if err := db.CreateIndex("tuple~inModel~key", []string{"tuple", traintuple.InTrunkModel, traintupleKey}); err != nil { return err } - if traintuple.ComputePlanID != "" { - if err := db.CreateIndex("computePlan~computeplanid~worker~rank~key", []string{"computePlan", traintuple.ComputePlanID, traintuple.Dataset.Worker, strconv.Itoa(traintuple.Rank), traintupleKey}); err != nil { + if traintuple.ComputePlanKey != "" { + if err := db.CreateIndex("computePlan~computeplankey~worker~rank~key", []string{"computePlan", traintuple.ComputePlanKey, traintuple.Dataset.Worker, strconv.Itoa(traintuple.Rank), traintupleKey}); err != nil { return err } - if err := db.CreateIndex("algo~computeplanid~key", []string{"algo", traintuple.ComputePlanID, traintuple.AlgoKey}); err != nil { + if err := db.CreateIndex("algo~computeplankey~key", []string{"algo", traintuple.ComputePlanKey, traintuple.AlgoKey}); err != nil { return err } } @@ -330,13 +330,13 @@ func logSuccessCompositeTrain(db *LedgerDB, args []string) (o outputCompositeTra return } - compositeTraintuple.OutHeadModel.OutModel = &KeyHash{ - Key: inp.OutHeadModel.Key, - Hash: inp.OutHeadModel.Hash} + compositeTraintuple.OutHeadModel.OutModel = &KeyChecksum{ + Key: inp.OutHeadModel.Key, + Checksum: inp.OutHeadModel.Checksum} - compositeTraintuple.OutTrunkModel.OutModel = &KeyHashDress{ + compositeTraintuple.OutTrunkModel.OutModel = &KeyChecksumAddress{ Key: inp.OutTrunkModel.Key, - Hash: inp.OutTrunkModel.Hash, + Checksum: inp.OutTrunkModel.Checksum, StorageAddress: inp.OutTrunkModel.StorageAddress} compositeTraintuple.Log += inp.Log @@ -344,7 +344,7 @@ func logSuccessCompositeTrain(db *LedgerDB, args []string) (o outputCompositeTra if err != nil { return } - err = TryAddIntermediaryModel(db, compositeTraintuple.ComputePlanID, compositeTraintupleKey, inp.OutHeadModel.Key) + err = TryAddIntermediaryModel(db, compositeTraintuple.ComputePlanKey, compositeTraintupleKey, inp.OutHeadModel.Key) if err != nil { return } @@ -352,7 +352,7 @@ func logSuccessCompositeTrain(db *LedgerDB, args []string) (o outputCompositeTra if err != nil { return } - err = TryAddIntermediaryModel(db, compositeTraintuple.ComputePlanID, compositeTraintupleKey, inp.OutTrunkModel.Key) + err = TryAddIntermediaryModel(db, compositeTraintuple.ComputePlanKey, compositeTraintupleKey, inp.OutTrunkModel.Key) if err != nil { return } @@ -406,7 +406,7 @@ func logFailCompositeTrain(db *LedgerDB, args []string) (o outputCompositeTraint return } // Do not propagate failure if we are in a compute plan - if compositeTraintuple.ComputePlanID != "" { + if compositeTraintuple.ComputePlanKey != "" { return } // update depending tuples @@ -570,7 +570,7 @@ func (traintuple *CompositeTraintuple) commitStatusUpdate(db *LedgerDB, traintup if err := db.UpdateIndex(indexName, oldAttributes, newAttributes); err != nil { return err } - if err := UpdateComputePlanState(db, traintuple.ComputePlanID, newStatus, traintupleKey); err != nil { + if err := UpdateComputePlanState(db, traintuple.ComputePlanKey, newStatus, traintupleKey); err != nil { return err } logger.Infof("compositetraintuple %s status updated: %s (from=%s)", traintupleKey, newStatus, oldStatus) diff --git a/chaincode/traintuple_composite_test.go b/chaincode/traintuple_composite_test.go index d6007f4e..a306f091 100644 --- a/chaincode/traintuple_composite_test.go +++ b/chaincode/traintuple_composite_test.go @@ -153,7 +153,7 @@ func TestTraintupleComputePlanCreationComposite(t *testing.T) { // Add dataManager, dataSample and algo registerItem(t, *mockStub, "compositeAlgo") - inpTraintuple := inputCompositeTraintuple{ComputePlanID: "someComputePlanID"} + inpTraintuple := inputCompositeTraintuple{ComputePlanKey: "someComputePlanKey"} args := inpTraintuple.createDefault() resp := mockStub.MockInvoke("42", args) require.EqualValues(t, 400, resp.Status, "should failed for missing rank") @@ -163,14 +163,14 @@ func TestTraintupleComputePlanCreationComposite(t *testing.T) { args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) require.EqualValues(t, 400, resp.Status, "should failed for invalid rank") - require.Contains(t, resp.Message, "Field validation for 'ComputePlanID' failed on the 'required_with' tag") + require.Contains(t, resp.Message, "Field validation for 'ComputePlanKey' failed on the 'required_with' tag") cpKey := RandomUUID() - inCP := inputComputePlan{ComputePlanID: cpKey} + inCP := inputComputePlan{Key: cpKey} resp = mockStub.MockInvoke("42", inCP.getArgs()) require.EqualValues(t, 200, resp.Status) - inpTraintuple = inputCompositeTraintuple{Rank: "0", ComputePlanID: cpKey} + inpTraintuple = inputCompositeTraintuple{Rank: "0", ComputePlanKey: cpKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) assert.EqualValues(t, 200, resp.Status) @@ -202,11 +202,11 @@ func TestTraintupleMultipleCommputePlanCreationsComposite(t *testing.T) { registerItem(t, *mockStub, "compositeAlgo") cpKey := RandomUUID() - inCP := inputComputePlan{ComputePlanID: cpKey} + inCP := inputComputePlan{Key: cpKey} resp := mockStub.MockInvoke("42", inCP.getArgs()) require.EqualValues(t, 200, resp.Status) - inpTraintuple := inputCompositeTraintuple{Rank: "0", ComputePlanID: cpKey} + inpTraintuple := inputCompositeTraintuple{Rank: "0", ComputePlanKey: cpKey} args := inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) assert.EqualValues(t, 200, resp.Status) @@ -223,7 +223,7 @@ func TestTraintupleMultipleCommputePlanCreationsComposite(t *testing.T) { InHeadModelKey: key, InTrunkModelKey: key, Rank: "0", - ComputePlanID: cpKey} + ComputePlanKey: cpKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) assert.EqualValues(t, 400, resp.Status, resp.Message, "should failed to add a traintuple of the same rank") @@ -234,21 +234,21 @@ func TestTraintupleMultipleCommputePlanCreationsComposite(t *testing.T) { InHeadModelKey: key, InTrunkModelKey: key, Rank: "1", - ComputePlanID: "notarealone"} + ComputePlanKey: "notarealone"} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) - assert.EqualValues(t, 404, resp.Status, resp.Message, "should failed to add a traintuple to an unexisting ComputePlanID") + assert.EqualValues(t, 404, resp.Status, resp.Message, "should failed to add a traintuple to an unexisting ComputePlanKey") - // Succesfully add a traintuple to the same ComputePlanID + // Succesfully add a traintuple to the same ComputePlanKey inpTraintuple = inputCompositeTraintuple{ Key: RandomUUID(), InHeadModelKey: key, InTrunkModelKey: key, Rank: "1", - ComputePlanID: ct.ComputePlanID} + ComputePlanKey: ct.ComputePlanKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) - assert.EqualValues(t, 200, resp.Status, resp.Message, "should be able do create a traintuple with the same ComputePlanID") + assert.EqualValues(t, 200, resp.Status, resp.Message, "should be able do create a traintuple with the same ComputePlanKey") err = json.Unmarshal(resp.Payload, &res) assert.NoError(t, err, "should unmarshal without problem") } @@ -263,7 +263,7 @@ func TestTraintupleComposite(t *testing.T) { } args := inpTraintuple.createDefault() resp := mockStub.MockInvoke("42", args) - assert.EqualValuesf(t, 400, resp.Status, "when adding objective with invalid hash, status %d and message %s", resp.Status, resp.Message) + assert.EqualValuesf(t, 400, resp.Status, "when adding objective with invalid key, status %d and message %s", resp.Status, resp.Message) // Add traintuple with unexisting algo inpTraintuple = inputCompositeTraintuple{} @@ -288,9 +288,9 @@ func TestTraintupleComposite(t *testing.T) { assert.NoError(t, err, "when unmarshalling queried composite traintuple") expected := outputCompositeTraintuple{ Key: compositeTraintupleKey, - Algo: &KeyHashDressName{ + Algo: &KeyChecksumAddressName{ Key: compositeAlgoKey, - Hash: compositeAlgoHash, + Checksum: compositeAlgoChecksum, Name: compositeAlgoName, StorageAddress: compositeAlgoStorageAddress, }, @@ -298,7 +298,7 @@ func TestTraintupleComposite(t *testing.T) { Dataset: &outputTtDataset{ Key: dataManagerKey, DataSampleKeys: []string{trainDataSampleKey1, trainDataSampleKey2}, - OpenerHash: dataManagerOpenerHash, + OpenerChecksum: dataManagerOpenerChecksum, Worker: worker, Metadata: map[string]string{}, }, @@ -383,12 +383,12 @@ func TestTraintupleComposite(t *testing.T) { endTraintuple := outputCompositeTraintuple{} assert.NoError(t, json.Unmarshal(resp.Payload, &endTraintuple)) expected.Log = success.Log - expected.OutHeadModel.OutModel = &KeyHash{ - Key: headModelKey, - Hash: headModelHash} - expected.OutTrunkModel.OutModel = &KeyHashDress{ + expected.OutHeadModel.OutModel = &KeyChecksum{ + Key: headModelKey, + Checksum: headModelChecksum} + expected.OutTrunkModel.OutModel = &KeyChecksumAddress{ Key: trunkModelKey, - Hash: trunkModelHash, + Checksum: trunkModelChecksum, StorageAddress: trunkModelAddress} expected.Status = traintupleStatus[1] assert.Exactly(t, expected, endTraintuple, "retreived CompositeTraintuple does not correspond to what is expected") @@ -448,13 +448,13 @@ func TestInsertTraintupleTwiceComposite(t *testing.T) { // create a composite traintuple and start a ComplutePlan cpKey := RandomUUID() - inCP := inputComputePlan{ComputePlanID: cpKey} + inCP := inputComputePlan{Key: cpKey} resp = mockStub.MockInvoke("42", inCP.getArgs()) require.EqualValues(t, 200, resp.Status) inpTraintuple := inputCompositeTraintuple{ - Rank: "0", - ComputePlanID: cpKey, + Rank: "0", + ComputePlanKey: cpKey, } inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", methodAndAssetToByte("createCompositeTraintuple", inpTraintuple)) @@ -467,7 +467,7 @@ func TestInsertTraintupleTwiceComposite(t *testing.T) { // create a second composite traintuple in the same ComputePlan inpTraintuple.Key = traintupleKey2 inpTraintuple.Rank = "1" - inpTraintuple.ComputePlanID = tuple.ComputePlanID + inpTraintuple.ComputePlanKey = tuple.ComputePlanKey inpTraintuple.InHeadModelKey = _key.Key inpTraintuple.InTrunkModelKey = _key.Key resp = mockStub.MockInvoke("42", methodAndAssetToByte("createCompositeTraintuple", inpTraintuple)) @@ -544,7 +544,6 @@ func TestCreateCompositeTraintupleInModels(t *testing.T) { if tt.withInHeadModel { // create head traintuple inpHeadTraintuple := inputCompositeTraintuple{} - // make the traintuple unique so that it has a unique hash inpHeadTraintuple.DataSampleKeys = []string{trainDataSampleKey1} args = inpHeadTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) @@ -558,7 +557,6 @@ func TestCreateCompositeTraintupleInModels(t *testing.T) { if tt.withInTrunkModel { // create trunk traintuple inpTrunkTraintuple := inputCompositeTraintuple{} - // make the traintuple unique so that it has a unique hash inpTrunkTraintuple.DataSampleKeys = []string{trainDataSampleKey2} args = inpTrunkTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) @@ -787,12 +785,12 @@ func TestCorrectParent(t *testing.T) { // fetch aggregate child, and check its in-model is the parent's trunk out-model child1, _ := queryAggregatetuple(db, assetToArgs(inputKey{Key: child1Key})) assert.Equal(t, trunkModelKey, child1.InModels[0].Key) - assert.Equal(t, trunkModelHash, child1.InModels[0].Hash) + assert.Equal(t, trunkModelChecksum, child1.InModels[0].Checksum) // fetch composite child, and check its head in-model is the parent's head out-model child2, _ := queryCompositeTraintuple(db, assetToArgs(inputKey{Key: child2Key})) assert.Equal(t, headModelKey, child2.InHeadModel.Key) - assert.Equal(t, headModelHash, child2.InHeadModel.Hash) + assert.Equal(t, headModelChecksum, child2.InHeadModel.Checksum) } func TestCreateTesttuplePermissions(t *testing.T) { @@ -831,7 +829,7 @@ func TestHeadModelDifferentWorker(t *testing.T) { // new dataset on new worker inpDM := inputDataManager{Key: strings.Replace(dataManagerKey, "1", "2", 1)} inpDM.createDefault() - inpDM.OpenerHash = GetRandomHash() + inpDM.OpenerChecksum = GetRandomHash() outDM, err := registerDataManager(db, assetToArgs(inpDM)) assert.NoError(t, err) diff --git a/chaincode/traintuple_test.go b/chaincode/traintuple_test.go index 203bf7ed..6c22a31b 100644 --- a/chaincode/traintuple_test.go +++ b/chaincode/traintuple_test.go @@ -140,14 +140,14 @@ func TestTraintupleComputePlanCreation(t *testing.T) { // Add dataManager, dataSample and algo registerItem(t, *mockStub, "algo") - inpTraintuple := inputTraintuple{ComputePlanID: "someComputePlanID"} + inpTraintuple := inputTraintuple{ComputePlanKey: "someComputePlanKey"} args := inpTraintuple.createDefault() resp := mockStub.MockInvoke("42", args) require.EqualValues(t, 400, resp.Status, "should failed for missing rank") require.Contains(t, resp.Message, "invalid inputs, a ComputePlan should have a rank", "invalid error message") cpKey := RandomUUID() - inCP := inputComputePlan{ComputePlanID: cpKey} + inCP := inputComputePlan{Key: cpKey} resp = mockStub.MockInvoke("42", inCP.getArgs()) require.EqualValues(t, 200, resp.Status) @@ -155,9 +155,9 @@ func TestTraintupleComputePlanCreation(t *testing.T) { args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) require.EqualValues(t, 400, resp.Status, "should failed for invalid rank") - require.Contains(t, resp.Message, "Field validation for 'ComputePlanID' failed on the 'required_with' tag") + require.Contains(t, resp.Message, "Field validation for 'ComputePlanKey' failed on the 'required_with' tag") - inpTraintuple = inputTraintuple{Rank: "0", ComputePlanID: cpKey} + inpTraintuple = inputTraintuple{Rank: "0", ComputePlanKey: cpKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) assert.EqualValues(t, 200, resp.Status) @@ -189,11 +189,11 @@ func TestTraintupleMultipleCommputePlanCreations(t *testing.T) { registerItem(t, *mockStub, "algo") cpKey := RandomUUID() - inCP := inputComputePlan{ComputePlanID: cpKey} + inCP := inputComputePlan{Key: cpKey} resp := mockStub.MockInvoke("42", inCP.getArgs()) require.EqualValues(t, 200, resp.Status) - inpTraintuple := inputTraintuple{Rank: "0", ComputePlanID: cpKey} + inpTraintuple := inputTraintuple{Rank: "0", ComputePlanKey: cpKey} args := inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) assert.EqualValues(t, 200, resp.Status) @@ -206,33 +206,33 @@ func TestTraintupleMultipleCommputePlanCreations(t *testing.T) { assert.NoError(t, err) // Failed to add a traintuple with the same rank inpTraintuple = inputTraintuple{ - Key: RandomUUID(), - InModels: []string{key}, - Rank: "0", - ComputePlanID: cpKey} + Key: RandomUUID(), + InModels: []string{key}, + Rank: "0", + ComputePlanKey: cpKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) assert.EqualValues(t, 400, resp.Status, resp.Message, "should failed to add a traintuple of the same rank") // Failed to add a traintuple to an unexisting CommputePlan inpTraintuple = inputTraintuple{ - Key: RandomUUID(), - InModels: []string{key}, - Rank: "1", - ComputePlanID: "notarealone"} + Key: RandomUUID(), + InModels: []string{key}, + Rank: "1", + ComputePlanKey: "notarealone"} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) - assert.EqualValues(t, 404, resp.Status, resp.Message, "should failed to add a traintuple to an unexisting ComputePlanID") + assert.EqualValues(t, 404, resp.Status, resp.Message, "should failed to add a traintuple to an unexisting ComputePlanKey") - // Succesfully add a traintuple to the same ComputePlanID + // Succesfully add a traintuple to the same ComputePlanKey inpTraintuple = inputTraintuple{ - Key: RandomUUID(), - InModels: []string{key}, - Rank: "1", - ComputePlanID: tuple.ComputePlanID} + Key: RandomUUID(), + InModels: []string{key}, + Rank: "1", + ComputePlanKey: tuple.ComputePlanKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) - assert.EqualValues(t, 200, resp.Status, resp.Message, "should be able do create a traintuple with the same ComputePlanID") + assert.EqualValues(t, 200, resp.Status, resp.Message, "should be able do create a traintuple with the same ComputePlanKey") err = json.Unmarshal(resp.Payload, &res) assert.NoError(t, err, "should unmarshal without problem") ttkey := res.Key @@ -244,14 +244,14 @@ func TestTraintupleMultipleCommputePlanCreations(t *testing.T) { assert.EqualValues(t, 200, resp.Status) inpTraintuple = inputTraintuple{ - Key: RandomUUID(), - AlgoKey: newAlgoKey, - InModels: []string{ttkey}, - Rank: "2", - ComputePlanID: tuple.ComputePlanID} + Key: RandomUUID(), + AlgoKey: newAlgoKey, + InModels: []string{ttkey}, + Rank: "2", + ComputePlanKey: tuple.ComputePlanKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) - assert.EqualValues(t, 200, resp.Status, resp.Message, "should be able to create a traintuple with the same ComputePlanID and different algo keys") + assert.EqualValues(t, 200, resp.Status, resp.Message, "should be able to create a traintuple with the same ComputePlanKey and different algo keys") } func TestTraintuple(t *testing.T) { @@ -264,7 +264,7 @@ func TestTraintuple(t *testing.T) { } args := inpTraintuple.createDefault() resp := mockStub.MockInvoke("42", args) - assert.EqualValuesf(t, 400, resp.Status, "when adding objective with invalid hash, status %d and message %s", resp.Status, resp.Message) + assert.EqualValuesf(t, 400, resp.Status, "when adding objective with invalid key, status %d and message %s", resp.Status, resp.Message) // Add traintuple with unexisting algo inpTraintuple = inputTraintuple{} @@ -289,9 +289,9 @@ func TestTraintuple(t *testing.T) { assert.NoError(t, err, "when unmarshalling queried traintuple") expected := outputTraintuple{ Key: traintupleKey, - Algo: &KeyHashDressName{ + Algo: &KeyChecksumAddressName{ Key: algoKey, - Hash: algoHash, + Checksum: algoChecksum, Name: algoName, StorageAddress: algoStorageAddress, }, @@ -299,7 +299,7 @@ func TestTraintuple(t *testing.T) { Dataset: &outputTtDataset{ Key: dataManagerKey, DataSampleKeys: []string{trainDataSampleKey1, trainDataSampleKey2}, - OpenerHash: dataManagerOpenerHash, + OpenerChecksum: dataManagerOpenerChecksum, Worker: worker, Metadata: map[string]string{}, }, @@ -377,9 +377,9 @@ func TestTraintuple(t *testing.T) { endTraintuple := outputTraintuple{} assert.NoError(t, json.Unmarshal(resp.Payload, &endTraintuple)) expected.Log = success.Log - expected.OutModel = &KeyHashDress{ + expected.OutModel = &KeyChecksumAddress{ Key: modelKey, - Hash: modelHash, + Checksum: modelChecksum, StorageAddress: modelAddress} expected.Status = traintupleStatus[1] assert.Exactly(t, expected, endTraintuple, "retreived Traintuple does not correspond to what is expected") @@ -427,13 +427,13 @@ func TestInsertTraintupleTwice(t *testing.T) { // create a traintuple and start a ComplutePlan cpKey := RandomUUID() - inCP := inputComputePlan{ComputePlanID: cpKey} + inCP := inputComputePlan{Key: cpKey} resp := mockStub.MockInvoke("42", inCP.getArgs()) require.EqualValues(t, 200, resp.Status) inpTraintuple := inputTraintuple{ - Rank: "0", - ComputePlanID: cpKey, + Rank: "0", + ComputePlanKey: cpKey, } inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", methodAndAssetToByte("createTraintuple", inpTraintuple)) @@ -445,7 +445,7 @@ func TestInsertTraintupleTwice(t *testing.T) { // create a second traintuple in the same ComputePlan inpTraintuple.Key = RandomUUID() inpTraintuple.Rank = "1" - inpTraintuple.ComputePlanID = tuple.ComputePlanID + inpTraintuple.ComputePlanKey = tuple.ComputePlanKey inpTraintuple.InModels = []string{traintupleKey} resp = mockStub.MockInvoke("42", methodAndAssetToByte("createTraintuple", inpTraintuple)) assert.EqualValues(t, http.StatusOK, resp.Status) diff --git a/chaincode/tuple.go b/chaincode/tuple.go index f392aaaa..98e250b2 100644 --- a/chaincode/tuple.go +++ b/chaincode/tuple.go @@ -16,9 +16,6 @@ package main import ( "chaincode/errors" - "crypto/sha256" - "encoding/hex" - "sort" ) // List of the possible tuple's status @@ -232,17 +229,6 @@ func checkUpdateTuple(db *LedgerDB, worker string, oldStatus string, newStatus s return nil } -// HashForKey to generate key for an asset -func HashForKey(objectType string, hashElements ...string) string { - toHash := objectType - sort.Strings(hashElements) - for _, element := range hashElements { - toHash += "," + element - } - sum := sha256.Sum256([]byte(toHash)) - return hex.EncodeToString(sum[:]) -} - func determineStatusFromInModels(statuses []string) string { if stringInSlice(StatusFailed, statuses) { return StatusFailed @@ -260,11 +246,11 @@ func determineStatusFromInModels(statuses []string) string { return StatusTodo } -func determineTupleStatus(db *LedgerDB, tupleStatus, computePlanID string) (string, error) { - if tupleStatus != StatusWaiting || computePlanID == "" { +func determineTupleStatus(db *LedgerDB, tupleStatus, computePlanKey string) (string, error) { + if tupleStatus != StatusWaiting || computePlanKey == "" { return tupleStatus, nil } - computePlan, err := db.GetComputePlan(computePlanID) + computePlan, err := db.GetComputePlan(computePlanKey) if err != nil { return "", err } diff --git a/chaincode/tuple_aggregate.go b/chaincode/tuple_aggregate.go index 285abf04..661945cc 100644 --- a/chaincode/tuple_aggregate.go +++ b/chaincode/tuple_aggregate.go @@ -40,7 +40,7 @@ func (tuple *Aggregatetuple) SetFromInput(db *LedgerDB, inp inputAggregatetuple) tuple.Creator = creator tuple.Metadata = inp.Metadata tuple.Tag = inp.Tag - tuple.ComputePlanID = inp.ComputePlanID + tuple.ComputePlanKey = inp.ComputePlanKey algo, err := db.GetAggregateAlgo(inp.AlgoKey) if err != nil { return errors.BadRequest(err, "could not retrieve algo with key %s", inp.AlgoKey) @@ -117,15 +117,15 @@ func (tuple *Aggregatetuple) SetFromParents(db *LedgerDB, inModels []string) err // AddToComputePlan set the aggregate tuple's parameters that determines if it's part of on ComputePlan and how. // It uses the inputAggregatetuple values as follow: -// - If neither ComputePlanID nor rank is set it returns immediately -// - If rank is 0 and ComputePlanID empty, it's start a new one using this traintuple key -// - If rank and ComputePlanID are set, it checks if there are coherent with previous ones and set it. +// - If neither ComputePlanKey nor rank is set it returns immediately +// - If rank is 0 and ComputePlanKey empty, it's start a new one using this traintuple key +// - If rank and ComputePlanKey are set, it checks if there are coherent with previous ones and set it. // Use checkComputePlanAvailability to ensure the compute plan exists and no other tuple is registered with the same worker/rank func (tuple *Aggregatetuple) AddToComputePlan(db *LedgerDB, inp inputAggregatetuple, traintupleKey string, checkComputePlanAvailability bool) error { - // check ComputePlanID and Rank and set it when required + // check ComputePlanKey and Rank and set it when required var err error if inp.Rank == "" { - if inp.ComputePlanID != "" { + if inp.ComputePlanKey != "" { return errors.BadRequest("invalid inputs, a ComputePlan should have a rank") } return nil @@ -134,14 +134,14 @@ func (tuple *Aggregatetuple) AddToComputePlan(db *LedgerDB, inp inputAggregatetu if err != nil { return err } - tuple.ComputePlanID = inp.ComputePlanID + tuple.ComputePlanKey = inp.ComputePlanKey - computePlan, err := db.GetComputePlan(inp.ComputePlanID) + computePlan, err := db.GetComputePlan(inp.ComputePlanKey) if err != nil { return err } computePlan.AddTuple(AggregatetupleType, traintupleKey, tuple.Status) - err = computePlan.Save(db, tuple.ComputePlanID) + err = computePlan.Save(db, tuple.ComputePlanKey) if err != nil { return err } @@ -150,11 +150,11 @@ func (tuple *Aggregatetuple) AddToComputePlan(db *LedgerDB, inp inputAggregatetu return nil } var ttKeys []string - ttKeys, err = db.GetIndexKeys("computePlan~computeplanid~worker~rank~key", []string{"computePlan", inp.ComputePlanID, tuple.Worker, inp.Rank}) + ttKeys, err = db.GetIndexKeys("computePlan~computeplankey~worker~rank~key", []string{"computePlan", inp.ComputePlanKey, tuple.Worker, inp.Rank}) if err != nil { return err } else if len(ttKeys) > 0 { - err = errors.BadRequest("ComputePlanID %s with worker %s rank %d already exists", inp.ComputePlanID, tuple.Worker, tuple.Rank) + err = errors.BadRequest("ComputePlanKey %s with worker %s rank %d already exists", inp.ComputePlanKey, tuple.Worker, tuple.Rank) return err } @@ -182,11 +182,11 @@ func (tuple *Aggregatetuple) Save(db *LedgerDB, aggregatetupleKey string) error return err } } - if tuple.ComputePlanID != "" { - if err := db.CreateIndex("computePlan~computeplanid~worker~rank~key", []string{"computePlan", tuple.ComputePlanID, tuple.Worker, strconv.Itoa(tuple.Rank), aggregatetupleKey}); err != nil { + if tuple.ComputePlanKey != "" { + if err := db.CreateIndex("computePlan~computeplankey~worker~rank~key", []string{"computePlan", tuple.ComputePlanKey, tuple.Worker, strconv.Itoa(tuple.Rank), aggregatetupleKey}); err != nil { return err } - if err := db.CreateIndex("algo~computeplanid~key", []string{"algo", tuple.ComputePlanID, tuple.AlgoKey}); err != nil { + if err := db.CreateIndex("algo~computeplankey~key", []string{"algo", tuple.ComputePlanKey, tuple.AlgoKey}); err != nil { return err } } @@ -307,7 +307,7 @@ func logFailAggregate(db *LedgerDB, args []string) (o outputAggregatetuple, err o.Fill(db, aggregatetuple) // Do not propagate failure if we are in a compute plan - if aggregatetuple.ComputePlanID != "" { + if aggregatetuple.ComputePlanKey != "" { return } // update depending tuples @@ -337,9 +337,9 @@ func logSuccessAggregate(db *LedgerDB, args []string) (o outputAggregatetuple, e return } - aggregatetuple.OutModel = &KeyHashDress{ + aggregatetuple.OutModel = &KeyChecksumAddress{ Key: inp.OutModel.Key, - Hash: inp.OutModel.Hash, + Checksum: inp.OutModel.Checksum, StorageAddress: inp.OutModel.StorageAddress} aggregatetuple.Log += inp.Log @@ -347,7 +347,7 @@ func logSuccessAggregate(db *LedgerDB, args []string) (o outputAggregatetuple, e if err != nil { return } - err = TryAddIntermediaryModel(db, aggregatetuple.ComputePlanID, aggregatetupleKey, aggregatetuple.OutModel.Key) + err = TryAddIntermediaryModel(db, aggregatetuple.ComputePlanKey, aggregatetupleKey, aggregatetuple.OutModel.Key) if err != nil { return } @@ -514,7 +514,7 @@ func (tuple *Aggregatetuple) commitStatusUpdate(db *LedgerDB, aggregatetupleKey if err := db.UpdateIndex(indexName, oldAttributes, newAttributes); err != nil { return err } - if err := UpdateComputePlanState(db, tuple.ComputePlanID, newStatus, aggregatetupleKey); err != nil { + if err := UpdateComputePlanState(db, tuple.ComputePlanKey, newStatus, aggregatetupleKey); err != nil { return err } logger.Infof("aggregatetuple %s status updated: %s (from=%s)", aggregatetupleKey, newStatus, oldStatus) diff --git a/chaincode/tuple_aggregate_test.go b/chaincode/tuple_aggregate_test.go index dcefe756..b4f2cde8 100644 --- a/chaincode/tuple_aggregate_test.go +++ b/chaincode/tuple_aggregate_test.go @@ -125,14 +125,14 @@ func TestTraintupleComputePlanCreationAggregate(t *testing.T) { // Add dataManager, dataSample and algo registerItem(t, *mockStub, "aggregateAlgo") - inpTraintuple := inputAggregatetuple{ComputePlanID: "someComputePlanID"} + inpTraintuple := inputAggregatetuple{ComputePlanKey: "someComputePlanKey"} args := inpTraintuple.createDefault() resp := mockStub.MockInvoke("42", args) require.EqualValues(t, 400, resp.Status, "should failed for missing rank") require.Contains(t, resp.Message, "invalid inputs, a ComputePlan should have a rank", "invalid error message") cpKey := RandomUUID() - inCP := inputComputePlan{ComputePlanID: cpKey} + inCP := inputComputePlan{Key: cpKey} resp = mockStub.MockInvoke("42", inCP.getArgs()) require.EqualValues(t, 200, resp.Status) @@ -140,9 +140,9 @@ func TestTraintupleComputePlanCreationAggregate(t *testing.T) { args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) require.EqualValues(t, 400, resp.Status, "should failed for invalid rank") - require.Contains(t, resp.Message, "Field validation for 'ComputePlanID' failed on the 'required_with' tag") + require.Contains(t, resp.Message, "Field validation for 'ComputePlanKey' failed on the 'required_with' tag") - inpTraintuple = inputAggregatetuple{Rank: "0", ComputePlanID: cpKey} + inpTraintuple = inputAggregatetuple{Rank: "0", ComputePlanKey: cpKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) assert.EqualValues(t, 200, resp.Status) @@ -175,11 +175,11 @@ func TestTraintupleMultipleCommputePlanCreationsAggregate(t *testing.T) { db := NewLedgerDB(mockStub) cpKey := RandomUUID() - inCP := inputComputePlan{ComputePlanID: cpKey} + inCP := inputComputePlan{Key: cpKey} resp := mockStub.MockInvoke("42", inCP.getArgs()) require.EqualValues(t, 200, resp.Status) - inpTraintuple := inputAggregatetuple{Rank: "0", ComputePlanID: cpKey} + inpTraintuple := inputAggregatetuple{Rank: "0", ComputePlanKey: cpKey} args := inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) assert.EqualValues(t, 200, resp.Status) @@ -192,33 +192,33 @@ func TestTraintupleMultipleCommputePlanCreationsAggregate(t *testing.T) { // Failed to add a traintuple with the same rank inpTraintuple = inputAggregatetuple{ - Key: RandomUUID(), - InModels: []string{key}, - Rank: "0", - ComputePlanID: cpKey} + Key: RandomUUID(), + InModels: []string{key}, + Rank: "0", + ComputePlanKey: cpKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) assert.EqualValues(t, 400, resp.Status, resp.Message, "should failed to add an aggregate tuple of the same rank") // Failed to add a traintuple to an unexisting CommputePlan inpTraintuple = inputAggregatetuple{ - Key: RandomUUID(), - InModels: []string{key}, - Rank: "1", - ComputePlanID: "notarealone"} + Key: RandomUUID(), + InModels: []string{key}, + Rank: "1", + ComputePlanKey: "notarealone"} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) - assert.EqualValues(t, 404, resp.Status, resp.Message, "should failed to add an aggregate tuple to an unexisting ComputePlanID") + assert.EqualValues(t, 404, resp.Status, resp.Message, "should failed to add an aggregate tuple to an unexisting ComputePlanKey") - // Succesfully add a traintuple to the same ComputePlanID + // Succesfully add a traintuple to the same ComputePlanKey inpTraintuple = inputAggregatetuple{ - Key: RandomUUID(), - InModels: []string{key}, - Rank: "1", - ComputePlanID: cpKey} + Key: RandomUUID(), + InModels: []string{key}, + Rank: "1", + ComputePlanKey: cpKey} args = inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", args) - assert.EqualValues(t, 200, resp.Status, resp.Message, "should be able do create an aggregate tuple with the same ComputePlanID") + assert.EqualValues(t, 200, resp.Status, resp.Message, "should be able do create an aggregate tuple with the same ComputePlanKey") err = json.Unmarshal(resp.Payload, &res) assert.NoError(t, err, "should unmarshal without problem") } @@ -233,7 +233,7 @@ func TestTraintupleAggregate(t *testing.T) { } args := inpTraintuple.createDefault() resp := mockStub.MockInvoke("42", args) - assert.EqualValuesf(t, 400, resp.Status, "when adding objective with invalid hash, status %d and message %s", resp.Status, resp.Message) + assert.EqualValuesf(t, 400, resp.Status, "when adding objective with invalid key, status %d and message %s", resp.Status, resp.Message) // Add traintuple with unexisting algo inpTraintuple = inputAggregatetuple{} @@ -258,9 +258,9 @@ func TestTraintupleAggregate(t *testing.T) { assert.NoError(t, err, "when unmarshalling queried aggregate tuple") expected := outputAggregatetuple{ Key: aggregatetupleKey, - Algo: &KeyHashDressName{ + Algo: &KeyChecksumAddressName{ Key: aggregateAlgoKey, - Hash: aggregateAlgoHash, + Checksum: aggregateAlgoChecksum, Name: aggregateAlgoName, StorageAddress: aggregateAlgoStorageAddress, }, @@ -344,9 +344,9 @@ func TestTraintupleAggregate(t *testing.T) { endTraintuple := outputAggregatetuple{} assert.NoError(t, json.Unmarshal(resp.Payload, &endTraintuple)) expected.Log = success.Log - expected.OutModel = &KeyHashDress{ + expected.OutModel = &KeyChecksumAddress{ Key: modelKey, - Hash: modelHash, + Checksum: modelChecksum, StorageAddress: modelAddress} expected.Status = traintupleStatus[1] assert.Exactly(t, expected, endTraintuple, "retreived Aggregatetuple does not correspond to what is expected") @@ -406,12 +406,12 @@ func TestInsertTraintupleTwiceAggregate(t *testing.T) { // create a aggregate tuple and start a ComplutePlan cpKey := RandomUUID() - inCP := inputComputePlan{ComputePlanID: cpKey} + inCP := inputComputePlan{Key: cpKey} resp = mockStub.MockInvoke("42", inCP.getArgs()) require.EqualValues(t, 200, resp.Status) inpTraintuple := inputAggregatetuple{ - Rank: "0", - ComputePlanID: cpKey, + Rank: "0", + ComputePlanKey: cpKey, } inpTraintuple.createDefault() resp = mockStub.MockInvoke("42", methodAndAssetToByte("createAggregatetuple", inpTraintuple)) @@ -424,7 +424,7 @@ func TestInsertTraintupleTwiceAggregate(t *testing.T) { // create a second aggregate tuple in the same ComputePlan inpTraintuple.Key = RandomUUID() inpTraintuple.Rank = "1" - inpTraintuple.ComputePlanID = tuple.ComputePlanID + inpTraintuple.ComputePlanKey = tuple.ComputePlanKey inpTraintuple.InModels = []string{_key.Key} resp = mockStub.MockInvoke("42", methodAndAssetToByte("createAggregatetuple", inpTraintuple)) assert.EqualValues(t, http.StatusOK, resp.Status) @@ -578,7 +578,7 @@ func TestQueryAggregatetuple(t *testing.T) { assert.Equal(t, traintupleKey, out.InModels[0].TraintupleKey) assert.Equal(t, compositeTraintupleKey, out.InModels[1].TraintupleKey) assert.Equal(t, aggregateAlgoName, out.Algo.Name) - assert.Equal(t, aggregateAlgoHash, out.Algo.Hash) + assert.Equal(t, aggregateAlgoChecksum, out.Algo.Checksum) assert.Equal(t, aggregateAlgoStorageAddress, out.Algo.StorageAddress) assert.Equal(t, StatusWaiting, out.Status) } diff --git a/chaincode/tuple_test.go b/chaincode/tuple_test.go index a9cb1985..bd78c7b3 100644 --- a/chaincode/tuple_test.go +++ b/chaincode/tuple_test.go @@ -103,16 +103,16 @@ func TestSpecifiqArgSeq(t *testing.T) { mockStub := NewMockStubWithRegisterNode("substra", scc) argSeq := [][]string{ // []string{"registerDataManager", "Titanic", "17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223", "http://owkin.substrabac:8000/data_manager/17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223/opener/", "csv", "48c89276972363250ea949c32809020e9d7fda786547a570bcaecedcc5092627", "http://owkin.substrabac:8000/data_manager/17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223/description/", "", "all"}, - []string{"registerDataManager", "\"{\\\"Name\\\":\\\"Titanic\\\",\\\"OpenerHash\\\":\\\"17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223\\\",\\\"OpenerStorageAddress\\\":\\\"http://owkin.substrabac:8000/data_manager/17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223/opener/\\\",\\\"Type\\\":\\\"csv\\\",\\\"DescriptionHash\\\":\\\"48c89276972363250ea949c32809020e9d7fda786547a570bcaecedcc5092627\\\",\\\"DescriptionStorageAddress\\\":\\\"http://owkin.substrabac:8000/data_manager/17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223/description/\\\",\\\"ObjectiveKey\\\":\\\"\\\",\\\"Permissions\\\":\\\"all\\\"}\""}, + []string{"registerDataManager", "\"{\\\"Name\\\":\\\"Titanic\\\",\\\"OpenerChecksum\\\":\\\"17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223\\\",\\\"OpenerStorageAddress\\\":\\\"http://owkin.substrabac:8000/data_manager/17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223/opener/\\\",\\\"Type\\\":\\\"csv\\\",\\\"DescriptionChecksum\\\":\\\"48c89276972363250ea949c32809020e9d7fda786547a570bcaecedcc5092627\\\",\\\"DescriptionStorageAddress\\\":\\\"http://owkin.substrabac:8000/data_manager/17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223/description/\\\",\\\"ObjectiveKey\\\":\\\"\\\",\\\"Permissions\\\":\\\"all\\\"}\""}, []string{"registerDataSample", "\"{\\\"Keys\\\":\\\"47f9af29d34d737acfb0e37d93bfa650979292297ed263e8536ef3d13f70c83e,df94060511117dd25da1d2b1846f9be17340128233c8b24694d5e780d909b22c,50b7a4b4f2541674958fd09a061276862e1e2ea4dbdd0e1af06e70051804e33b,1befb03ceed3ab7ec9fa4bebe9b681bbc7725a402e03f9e64f9f1677cf619183\\\",\\\"DataManagerKeys\\\":\\\"17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223\\\",\\\"TestOnly\\\":\\\"false\\\"}\""}, []string{"registerDataSample", "\"{\\\"Keys\\\":\\\"1a8532bd84d5ef785a4abe503a12bc7040c666a9f6264f982aa4ad77ff7217a8\\\",\\\"DataManagerKeys\\\":\\\"17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223\\\",\\\"TestOnly\\\":\\\"true\\\"}\""}, - []string{"registerObjective", "\"{\\\"Name\\\":\\\"Titanic: Machine Learning From Disaster\\\",\\\"DescriptionHash\\\":\\\"1158d2f5c0cf9f80155704ca0faa28823b145b42ebdba2ca38bd726a1377e1cb\\\",\\\"DescriptionStorageAddress\\\":\\\"http://owkin.substrabac:8000/objective/1158d2f5c0cf9f80155704ca0faa28823b145b42ebdba2ca38bd726a1377e1cb/description/\\\",\\\"MetricsName\\\":\\\"accuracy\\\",\\\"MetricsHash\\\":\\\"0bc13ad2e481c1a52959a228984bbee2e31271d567ea55a458e9ae92d481fedb\\\",\\\"MetricsStorageAddress\\\":\\\"http://owkin.substrabac:8000/objective/1158d2f5c0cf9f80155704ca0faa28823b145b42ebdba2ca38bd726a1377e1cb/metrics/\\\",\\\"TestDataset\\\":\\\"17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223:1a8532bd84d5ef785a4abe503a12bc7040c666a9f6264f982aa4ad77ff7217a8\\\",\\\"Permissions\\\":\\\"all\\\"}\""}, - []string{"registerAlgo", "\"{\\\"Name\\\":\\\"Constant death predictor\\\",\\\"Hash\\\":\\\"10a16f1b96beb3c07550103a9f15b3c2a77b15046cc7c70b762606590fb99de9\\\",\\\"StorageAddress\\\":\\\"http://owkin.substrabac:8000/algo/10a16f1b96beb3c07550103a9f15b3c2a77b15046cc7c70b762606590fb99de9/file/\\\",\\\"DescriptionHash\\\":\\\"1dae14e339c94ae04cc8846d353c07c8de96a38d6c5b5ee4486c4102ff011450\\\",\\\"DescriptionStorageAddress\\\":\\\"http://owkin.substrabac:8000/algo/10a16f1b96beb3c07550103a9f15b3c2a77b15046cc7c70b762606590fb99de9/description/\\\",\\\"Permissions\\\":\\\"all\\\"}\""}, + []string{"registerObjective", "\"{\\\"Name\\\":\\\"Titanic: Machine Learning From Disaster\\\",\\\"DescriptionChecksum\\\":\\\"1158d2f5c0cf9f80155704ca0faa28823b145b42ebdba2ca38bd726a1377e1cb\\\",\\\"DescriptionStorageAddress\\\":\\\"http://owkin.substrabac:8000/objective/1158d2f5c0cf9f80155704ca0faa28823b145b42ebdba2ca38bd726a1377e1cb/description/\\\",\\\"MetricsName\\\":\\\"accuracy\\\",\\\"MetricsChecksum\\\":\\\"0bc13ad2e481c1a52959a228984bbee2e31271d567ea55a458e9ae92d481fedb\\\",\\\"MetricsStorageAddress\\\":\\\"http://owkin.substrabac:8000/objective/1158d2f5c0cf9f80155704ca0faa28823b145b42ebdba2ca38bd726a1377e1cb/metrics/\\\",\\\"TestDataset\\\":\\\"17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223:1a8532bd84d5ef785a4abe503a12bc7040c666a9f6264f982aa4ad77ff7217a8\\\",\\\"Permissions\\\":\\\"all\\\"}\""}, + []string{"registerAlgo", "\"{\\\"Name\\\":\\\"Constant death predictor\\\",\\\"Checksum\\\":\\\"10a16f1b96beb3c07550103a9f15b3c2a77b15046cc7c70b762606590fb99de9\\\",\\\"StorageAddress\\\":\\\"http://owkin.substrabac:8000/algo/10a16f1b96beb3c07550103a9f15b3c2a77b15046cc7c70b762606590fb99de9/file/\\\",\\\"DescriptionChecksum\\\":\\\"1dae14e339c94ae04cc8846d353c07c8de96a38d6c5b5ee4486c4102ff011450\\\",\\\"DescriptionStorageAddress\\\":\\\"http://owkin.substrabac:8000/algo/10a16f1b96beb3c07550103a9f15b3c2a77b15046cc7c70b762606590fb99de9/description/\\\",\\\"Permissions\\\":\\\"all\\\"}\""}, []string{"createTraintuple", "\"{\\\"AlgoKey\\\":\\\"10a16f1b96beb3c07550103a9f15b3c2a77b15046cc7c70b762606590fb99de9\\\",\\\"ObjectiveKey\\\":\\\"1158d2f5c0cf9f80155704ca0faa28823b145b42ebdba2ca38bd726a1377e1cb\\\",\\\"InModels\\\":\\\"\\\",\\\"DataManagerKey\\\":\\\"17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223\\\",\\\"DataSampleKeys\\\":\\\"47f9af29d34d737acfb0e37d93bfa650979292297ed263e8536ef3d13f70c83e,df94060511117dd25da1d2b1846f9be17340128233c8b24694d5e780d909b22c,50b7a4b4f2541674958fd09a061276862e1e2ea4dbdd0e1af06e70051804e33b\\\",\\\"FLTask\\\":\\\"\\\",\\\"Rank\\\":\\\"\\\",\\\"Tag\\\":\\\"titanic v0\\\"}\""}, []string{"createTesttuple", "\"{\\\"TraintupleKey\\\":\\\"8daf7d448d0318dd8b06648cf32dde35f36171b308dec8675c8ff8e718acdac4\\\",\\\"DataManagerKey\\\":\\\"17dbc4ece248304cab7b1dd53ec7edf1ebf8a5e12ff77a26dc6e8da9db4da223\\\",\\\"DataSampleKeys\\\":\\\"1befb03ceed3ab7ec9fa4bebe9b681bbc7725a402e03f9e64f9f1677cf619183\\\",\\\"Tag\\\":\\\"titanic v0\\\"}\""}, []string{"createTesttuple", "\"{\\\"TraintupleKey\\\":\\\"8daf7d448d0318dd8b06648cf32dde35f36171b308dec8675c8ff8e718acdac4\\\",\\\"DataManagerKey\\\":\\\"\\\",\\\"DataSampleKeys\\\":\\\"\\\",\\\"Tag\\\":\\\"\\\"}\""}, []string{"logStartTrain", "\"{\\\"Key\\\":\\\"8daf7d448d0318dd8b06648cf32dde35f36171b308dec8675c8ff8e718acdac4\\\"}\""}, - []string{"logSuccessTrain", "\"{\\\"Key\\\":\\\"8daf7d448d0318dd8b06648cf32dde35f36171b308dec8675c8ff8e718acdac4\\\",\\\"Log\\\":\\\"Train - CPU:119.66 % - Mem:0.04 GB - GPU:0.00 % - GPU Mem:0.00 GB; \\\",\\\"OutModel\\\":{\\\"Hash\\\":\\\"6f6f2c318ff95ea7de9e4c01395b78b9217ddb134279275dae7842e7d4eb4c16\\\",\\\"StorageAddress\\\":\\\"http://owkin.substrabac:8000/model/6f6f2c318ff95ea7de9e4c01395b78b9217ddb134279275dae7842e7d4eb4c16/file/\\\"},\\\"Perf\\\":0.61610484}\""}, + []string{"logSuccessTrain", "\"{\\\"Key\\\":\\\"8daf7d448d0318dd8b06648cf32dde35f36171b308dec8675c8ff8e718acdac4\\\",\\\"Log\\\":\\\"Train - CPU:119.66 % - Mem:0.04 GB - GPU:0.00 % - GPU Mem:0.00 GB; \\\",\\\"OutModel\\\":{\\\"Checksum\\\":\\\"6f6f2c318ff95ea7de9e4c01395b78b9217ddb134279275dae7842e7d4eb4c16\\\",\\\"StorageAddress\\\":\\\"http://owkin.substrabac:8000/model/6f6f2c318ff95ea7de9e4c01395b78b9217ddb134279275dae7842e7d4eb4c16/file/\\\"},\\\"Perf\\\":0.61610484}\""}, []string{"logStartTest", "\"{\\\"Key\\\":\\\"81bad50d76898ba6ea5af9d0a4816726bd46b947730a1bc2dd1d6755e8ab682b\\\"}\""}, []string{"logSuccessTest", "\"{\\\"Key\\\":\\\"81bad50d76898ba6ea5af9d0a4816726bd46b947730a1bc2dd1d6755e8ab682b\\\",\\\"Log\\\":\\\"Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; \\\",\\\"Perf\\\":0.6179775}\""}, }