Spaces:
Build error
Build error
Commit ·
660735b
0
Parent(s):
Duplicate from Wootang01/sketch_classifier
Browse filesCo-authored-by: David Woo <[email protected]>
- .gitattributes +27 -0
- README.md +13 -0
- app.py +43 -0
- class_names.txt +100 -0
- pytorch_model.bin +3 -0
- requirements.txt +2 -0
.gitattributes
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Sketch_classifier
|
| 3 |
+
emoji: 📈
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: red
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 2.8.14
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
duplicated_from: Wootang01/sketch_classifier
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
app.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
LABELS = Path('class_names.txt').read_text().splitlines()
|
| 7 |
+
title = "Sketch Classifier"
|
| 8 |
+
description = "This machine has vision. To test the machine, if you sketch something below, the machine will attempt to classify your sketch. It can guess from 100 objects and it will present to you its top three guesses. Beside each guess, the length of the bar indicates the confidence with which the machine has identified your sketch. The longer the bar, the more confident the machine is."
|
| 9 |
+
|
| 10 |
+
model = nn.Sequential(
|
| 11 |
+
nn.Conv2d(1, 32, 3, padding='same'),
|
| 12 |
+
nn.ReLU(),
|
| 13 |
+
nn.MaxPool2d(2),
|
| 14 |
+
nn.Conv2d(32, 64, 3, padding='same'),
|
| 15 |
+
nn.ReLU(),
|
| 16 |
+
nn.MaxPool2d(2),
|
| 17 |
+
nn.Conv2d(64, 128, 3, padding='same'),
|
| 18 |
+
nn.ReLU(),
|
| 19 |
+
nn.MaxPool2d(2),
|
| 20 |
+
nn.Flatten(),
|
| 21 |
+
nn.Linear(1152, 256),
|
| 22 |
+
nn.ReLU(),
|
| 23 |
+
nn.Linear(256, len(LABELS)),
|
| 24 |
+
)
|
| 25 |
+
state_dict = torch.load('pytorch_model.bin', map_location='cpu')
|
| 26 |
+
model.load_state_dict(state_dict, strict=False)
|
| 27 |
+
model.eval()
|
| 28 |
+
|
| 29 |
+
def predict(img):
|
| 30 |
+
x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.
|
| 31 |
+
with torch.no_grad():
|
| 32 |
+
out = model(x)
|
| 33 |
+
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
| 34 |
+
values, indices = torch.topk(probabilities, 3)
|
| 35 |
+
confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}
|
| 36 |
+
return confidences
|
| 37 |
+
|
| 38 |
+
gr.Interface(fn=predict,
|
| 39 |
+
inputs="sketchpad",
|
| 40 |
+
outputs="label",
|
| 41 |
+
title=title,
|
| 42 |
+
description=description,
|
| 43 |
+
live=True).launch(debug=True)
|
class_names.txt
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
airplane
|
| 2 |
+
alarm_clock
|
| 3 |
+
anvil
|
| 4 |
+
apple
|
| 5 |
+
axe
|
| 6 |
+
baseball
|
| 7 |
+
baseball_bat
|
| 8 |
+
basketball
|
| 9 |
+
beard
|
| 10 |
+
bed
|
| 11 |
+
bench
|
| 12 |
+
bicycle
|
| 13 |
+
bird
|
| 14 |
+
book
|
| 15 |
+
bread
|
| 16 |
+
bridge
|
| 17 |
+
broom
|
| 18 |
+
butterfly
|
| 19 |
+
camera
|
| 20 |
+
candle
|
| 21 |
+
car
|
| 22 |
+
cat
|
| 23 |
+
ceiling_fan
|
| 24 |
+
cell_phone
|
| 25 |
+
chair
|
| 26 |
+
circle
|
| 27 |
+
clock
|
| 28 |
+
cloud
|
| 29 |
+
coffee_cup
|
| 30 |
+
cookie
|
| 31 |
+
cup
|
| 32 |
+
diving_board
|
| 33 |
+
donut
|
| 34 |
+
door
|
| 35 |
+
drums
|
| 36 |
+
dumbbell
|
| 37 |
+
envelope
|
| 38 |
+
eye
|
| 39 |
+
eyeglasses
|
| 40 |
+
face
|
| 41 |
+
fan
|
| 42 |
+
flower
|
| 43 |
+
frying_pan
|
| 44 |
+
grapes
|
| 45 |
+
hammer
|
| 46 |
+
hat
|
| 47 |
+
headphones
|
| 48 |
+
helmet
|
| 49 |
+
hot_dog
|
| 50 |
+
ice_cream
|
| 51 |
+
key
|
| 52 |
+
knife
|
| 53 |
+
ladder
|
| 54 |
+
laptop
|
| 55 |
+
light_bulb
|
| 56 |
+
lightning
|
| 57 |
+
line
|
| 58 |
+
lollipop
|
| 59 |
+
microphone
|
| 60 |
+
moon
|
| 61 |
+
mountain
|
| 62 |
+
moustache
|
| 63 |
+
mushroom
|
| 64 |
+
pants
|
| 65 |
+
paper_clip
|
| 66 |
+
pencil
|
| 67 |
+
pillow
|
| 68 |
+
pizza
|
| 69 |
+
power_outlet
|
| 70 |
+
radio
|
| 71 |
+
rainbow
|
| 72 |
+
rifle
|
| 73 |
+
saw
|
| 74 |
+
scissors
|
| 75 |
+
screwdriver
|
| 76 |
+
shorts
|
| 77 |
+
shovel
|
| 78 |
+
smiley_face
|
| 79 |
+
snake
|
| 80 |
+
sock
|
| 81 |
+
spider
|
| 82 |
+
spoon
|
| 83 |
+
square
|
| 84 |
+
star
|
| 85 |
+
stop_sign
|
| 86 |
+
suitcase
|
| 87 |
+
sun
|
| 88 |
+
sword
|
| 89 |
+
syringe
|
| 90 |
+
t-shirt
|
| 91 |
+
table
|
| 92 |
+
tennis_racquet
|
| 93 |
+
tent
|
| 94 |
+
tooth
|
| 95 |
+
traffic_light
|
| 96 |
+
tree
|
| 97 |
+
triangle
|
| 98 |
+
umbrella
|
| 99 |
+
wheel
|
| 100 |
+
wristwatch
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:effb6ea6f1593c09e8247944028ed9c309b5ff1cef82ba38b822bee2ca4d0f3c
|
| 3 |
+
size 1656903
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
torch
|