Skip to content
Permalink
main
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.image as Image
import numpy as np
# GLOBALS
model = ""
movenet = ""
interpreter = ""
# NOTE: Code snippets sampes from Tensorflow lite lightning MoveNet version 4, subject to Apache 2.0 Licence
# https://tfhub.dev/google/lite-model/movenet/singlepose/lightning/tflite/float16/4
# Any changed will be noted.
def initialise_model():
#model = hub.load("https://tfhub.dev/google/movenet/singlepose/lightning/4")
#movenet = model.signatures['serving_default']
print("initialise")
global interpreter
#CHANGED: model has different name and is local to program
interpreter = tf.lite.Interpreter(model_path = "lite-model_movenet_singlepose_lightning_tflite_float16_4.tflite")
interpreter.allocate_tensors()
#CHANGED: scripts contain both normal and lite version of MoveNet
def pose_PredictonTF(imageData):
"""
Estimates the posing positions of a single human in a JPEG picture using Tensorflow MoveNet.
:param imageData: (JPEG-Encoded bytes)
:return: a [1,1,17,3] tensor containing 17 key points of the body, with percentage coordinates and confidence.
"""
# Image = tf.io.read_file(image_path)
image = tf.compat.v1.image.decode_jpeg(imageData)
image = tf.expand_dims(image, axis=0)
# Resize and pad the Image to keep the aspect ratio and fit the expected size.
image = tf.cast(tf.image.resize_with_pad(image, 192, 192), dtype=tf.int32)
# run model inference
outputs = movenet(image)
# output is a [1,1,17,3] sensor
# keypoints = outputs['output_0']
# print(keypoints)
return outputs
def posePredictionTFLite(imageData):
image = tf.compat.v1.image.decode_jpeg(imageData)
image = tf.expand_dims(image, axis=0)
image = tf.cast(tf.image.resize_with_pad(image, 192, 192), dtype=tf.uint8)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'],image.numpy())
interpreter.invoke()
return interpreter.get_tensor(output_details[0]['index'])
#CHANGED: temporary data used for quick testing
def TestingPointsOnImage():
test = np.array([[[[0.5052221, 0.42522937, 0.7141144],
[0.48090276, 0.45370695, 0.5738922],
[0.47875905, 0.40008497, 0.6808091],
[0.4980592, 0.4830124, 0.5379905],
[0.49261594, 0.36388832, 0.8307032],
[0.60409665, 0.551592, 0.6490847],
[0.6021359, 0.29204538, 0.8377292],
[0.73887765, 0.66975343, 0.6410032],
[0.7459737, 0.17954277, 0.56632763],
[0.6070323, 0.7081243, 0.738752],
[0.6068721, 0.1500265, 0.7462287],
[0.84355897, 0.5062511, 0.5052821],
[0.8494219, 0.33797163, 0.462009],
[0.7733456, 0.5940913, 0.04863108],
[0.7795013, 0.2247358, 0.0506536],
[0.6130973, 0.71571106, 0.1039405],
[0.598618, 0.13673246, 0.18669602]]]])
testnp = np.resize(test,(17,3))
print(testnp)
x = []
y = []
c = []
img = np.asarray(Image.imread("D:/fruta/Pictures/Camera Roll/testing thing.jpg"))
print(img.shape)
for i in range(17):
if test[0,0,i,2] > 0.3:
y.append(test[0, 0, i, 0] * (img.shape[0]))
x.append(test[0, 0, i, 1] * (img.shape[1]))
c.append(test[0, 0, i, 2])
#print(x)
#print(y)
#print(c)
plt.figure()
plt.imshow(img)
plt.scatter(x,y)
plt.show()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print(tf.config.list_physical_devices())
initialise_model()
print(posePredictionTFLite(tf.io.read_file("D:/fruta/Pictures/Camera Roll/testing thing.jpg")))