Skip to content

Commit

Permalink
Merge pull request #44 from robmarkcole/black-format
Browse files Browse the repository at this point in the history
isort then black
  • Loading branch information
robmarkcole committed Dec 10, 2020
2 parents a9c5962 + fa4624f commit 6c4d3f2
Show file tree
Hide file tree
Showing 33 changed files with 2,231 additions and 1,143 deletions.
7 changes: 3 additions & 4 deletions deepstack/init.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
import sqlite3
from sqlite3 import Cursor,Error
import os
import sqlite3
from sqlite3 import Cursor, Error

DATA_DIR = "/datastore"

CREATE_TABLE = "CREATE TABLE IF NOT EXISTS TB_EMBEDDINGS(userid TEXT PRIMARY KEY, embedding TEXT NOT NULL)"
CREATE_TABLE2 = "CREATE TABLE IF NOT EXISTS TB_EMBEDDINGS2(userid TEXT PRIMARY KEY, embedding TEXT NOT NULL)"
conn = sqlite3.connect(DATA_DIR+"/faceembedding.db")
conn = sqlite3.connect(DATA_DIR + "/faceembedding.db")
cursor = conn.cursor()
cursor.execute(CREATE_TABLE)
cursor.execute(CREATE_TABLE2)
conn.commit()
conn.close()

45 changes: 24 additions & 21 deletions deepstack/intelligencelayer/shared/commons/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,82 +2,85 @@
import torch.nn as nn
import torch.nn.functional as F

def load_model(model,path):

def load_model(model, path):
checkpoint = torch.load(path, map_location=lambda storage, loc: storage)

try:
model.load_state_dict(checkpoint)

except:
copy = dict()
for x, y in zip(model.state_dict(), checkpoint):
new_name = y[y.index(x):]
new_name = y[y.index(x) :]
copy[new_name] = checkpoint[y]

def l2_norm(input,axis=1):
norm = torch.norm(input,2,axis,True)

def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output


def compute_distance(embeddings, embeddings2):

diff = embeddings.unsqueeze(-1) - embeddings2.transpose(1,0).unsqueeze(0)
distance = torch.sum(torch.pow(diff,2),dim=1)
diff = embeddings.unsqueeze(-1) - embeddings2.transpose(1, 0).unsqueeze(0)
distance = torch.sum(torch.pow(diff, 2), dim=1)

return distance


class _GlobalPoolNd(nn.Module):
def __init__(self,flatten=True):
def __init__(self, flatten=True):
"""
:param flatten:
"""
super(_GlobalPoolNd,self).__init__()
super(_GlobalPoolNd, self).__init__()
self.flatten = flatten

def pool(self,input):
def pool(self, input):
"""
:param input:
:return:
"""
raise NotImplementedError()

def forward(self,input):
def forward(self, input):
"""
:param input:
:return:
"""
input = self.pool(input)
size_0 = input.size(1)
return input.view(-1,size_0) if self.flatten else input
return input.view(-1, size_0) if self.flatten else input


class GlobalAvgPool2d(_GlobalPoolNd):
def __init__(self, flatten=True):
"""
:param flatten:
"""
super(GlobalAvgPool2d,self).__init__(flatten)
super(GlobalAvgPool2d, self).__init__(flatten)

def pool(self, input):
return F.adaptive_avg_pool2d(input,1)
return F.adaptive_avg_pool2d(input, 1)



class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)


class UpSampleInterpolate(nn.Module):
def __init__(self,scale_factor):
super(UpSampleInterpolate,self).__init__()
def __init__(self, scale_factor):
super(UpSampleInterpolate, self).__init__()

self.scale_factor = scale_factor

def forward(self,x):

return F.interpolate(x,scale_factor=self.scale_factor,mode="nearest")

def forward(self, x):

return F.interpolate(x, scale_factor=self.scale_factor, mode="nearest")
112 changes: 65 additions & 47 deletions deepstack/intelligencelayer/shared/detection.py
Original file line number Diff line number Diff line change
@@ -1,34 +1,36 @@

import torch
import time
import json
import io
import _thread as thread
from multiprocessing import Process
from PIL import Image,UnidentifiedImageError
import torch.nn.functional as F
import ast
import io
import json
import os
import sqlite3
import numpy as np
import warnings
import sys
import os
import time
import warnings
from multiprocessing import Process

import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, UnidentifiedImageError

sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../"))

from process import YOLODetector
from shared import SharedOptions
import argparse
import traceback

import torchvision.transforms as transforms
import traceback
from PIL import UnidentifiedImageError
import argparse
from process import YOLODetector
from shared import SharedOptions

parser = argparse.ArgumentParser()
parser.add_argument("--model",type=str,default=None)
parser.add_argument("--name",type=str,default=None)
parser.add_argument("--model", type=str, default=None)
parser.add_argument("--name", type=str, default=None)

opt = parser.parse_args()


def objectdetection(thread_name: str, delay: float):

MODE = SharedOptions.MODE
Expand All @@ -40,49 +42,50 @@ def objectdetection(thread_name: str, delay: float):
if opt.name == None:
IMAGE_QUEUE = "detection_queue"
else:
IMAGE_QUEUE = opt.name+"_queue"
IMAGE_QUEUE = opt.name + "_queue"

if opt.model == None:
model_path = os.path.join(SHARED_APP_DIR,SharedOptions.SETTINGS.DETECTION_MODEL)
model_path = os.path.join(
SHARED_APP_DIR, SharedOptions.SETTINGS.DETECTION_MODEL
)
else:
model_path = opt.model

if MODE == "High":

reso = SharedOptions.SETTINGS.DETECTION_HIGH

elif MODE == "Medium":

reso = SharedOptions.SETTINGS.DETECTION_MEDIUM

elif MODE == "Low":

reso = SharedOptions.SETTINGS.DETECTION_LOW

detector = YOLODetector(model_path,reso,cuda=CUDA_MODE)
detector = YOLODetector(model_path, reso, cuda=CUDA_MODE)
while True:
queue = db.lrange(IMAGE_QUEUE,0,0)
queue = db.lrange(IMAGE_QUEUE, 0, 0)

db.ltrim(IMAGE_QUEUE, len(queue), -1)

db.ltrim(IMAGE_QUEUE,len(queue), -1)

if len(queue) > 0:

for req_data in queue:

req_data = json.JSONDecoder().decode(req_data)

req_data = json.JSONDecoder().decode(req_data)

img_id = req_data["imgid"]
req_id = req_data["reqid"]
req_type = req_data["reqtype"]
threshold = float(req_data["minconfidence"])

try:

img = os.path.join(TEMP_PATH,img_id)
det = detector.predict(img,threshold)
img = os.path.join(TEMP_PATH, img_id)

det = detector.predict(img, threshold)

outputs = []

for *xyxy, conf, cls in reversed(det):
Expand All @@ -94,32 +97,47 @@ def objectdetection(thread_name: str, delay: float):

label = detector.names[int(cls.item())]

detection = {"confidence":score,"label":label, "x_min":int(x_min), "y_min":int(y_min),"x_max":int(x_max), "y_max":int(y_max)}
detection = {
"confidence": score,
"label": label,
"x_min": int(x_min),
"y_min": int(y_min),
"x_max": int(x_max),
"y_max": int(y_max),
}

outputs.append(detection)

output = {"success":True,"predictions":outputs}
output = {"success": True, "predictions": outputs}

except UnidentifiedImageError:
err_trace = traceback.format_exc()
print(err_trace,file=sys.stderr,flush=True)
print(err_trace, file=sys.stderr, flush=True)

output = {
"success": False,
"error": "invalid image file",
"code": 400,
}

output = {"success":False, "error":"invalid image file","code":400}

except Exception:

err_trace = traceback.format_exc()
print(err_trace,file=sys.stderr,flush=True)

output = {"success":False, "error":"error occured on the server","code":500}

print(err_trace, file=sys.stderr, flush=True)

output = {
"success": False,
"error": "error occured on the server",
"code": 500,
}

finally:
db.set(req_id,json.dumps(output))
db.set(req_id, json.dumps(output))
if os.path.exists(TEMP_PATH + img_id):
os.remove(img)

time.sleep(delay)

p = Process(target=objectdetection,args=("",SharedOptions.SLEEP_TIME))
p.start()

p = Process(target=objectdetection, args=("", SharedOptions.SLEEP_TIME))
p.start()
Loading

0 comments on commit 6c4d3f2

Please sign in to comment.