aarodi commited on
Commit
bce2657
·
1 Parent(s): 981eec3

Delete app copy.py

Browse files
Files changed (1) hide show
  1. app copy.py +0 -247
app copy.py DELETED
@@ -1,247 +0,0 @@
1
- import gradio as gr
2
- from PIL import Image
3
- import onnxruntime as ort
4
- import torchvision.transforms as transforms
5
- import json
6
- import os
7
- import numpy as np
8
- import pandas as pd
9
- import random
10
- from huggingface_hub import snapshot_download, HfApi
11
- from transformers import CLIPTokenizer
12
-
13
- # --- Config ---
14
- HUB_REPO_ID = "CDL-AMLRT/OpenArenaLeaderboard"
15
- HF_TOKEN = os.environ.get("HF_TOKEN")
16
- LOCAL_JSON = "leaderboard.json"
17
- HUB_JSON = "leaderboard.json"
18
- MODEL_PATH = "mobilenet_v2_fake_detector.onnx"
19
- CLIP_IMAGE_ENCODER_PATH = "clip_image_encoder.onnx"
20
- CLIP_TEXT_ENCODER_PATH = "clip_text_encoder.onnx"
21
- PROMPT_CSV_PATH = "generate2_1.csv"
22
- PROMPT_MATCH_THRESHOLD = 10 # percent
23
-
24
- # --- Download leaderboard + model checkpoint from HF Hub ---
25
- def load_assets():
26
- try:
27
- snapshot_download(
28
- repo_id=HUB_REPO_ID,
29
- local_dir=".",
30
- repo_type="dataset",
31
- token=HF_TOKEN,
32
- allow_patterns=[HUB_JSON, MODEL_PATH, CLIP_IMAGE_ENCODER_PATH, CLIP_TEXT_ENCODER_PATH, PROMPT_CSV_PATH]
33
- )
34
- except Exception as e:
35
- print(f"Failed to load assets from HF Hub: {e}")
36
-
37
- load_assets()
38
-
39
- # --- Load prompts from CSV ---
40
- def load_prompts():
41
- try:
42
- df = pd.read_csv(PROMPT_CSV_PATH)
43
- if "prompt" in df.columns:
44
- return df["prompt"].dropna().tolist()
45
- else:
46
- print("CSV missing 'prompt' column.")
47
- return []
48
- except Exception as e:
49
- print(f"Failed to load prompts: {e}")
50
- return []
51
-
52
- PROMPT_LIST = load_prompts()
53
-
54
- def load_initial_state():
55
- sorted_scores = sorted(leaderboard_scores.items(), key=lambda x: x[1], reverse=True)
56
- leaderboard_table = [[name, points] for name, points in sorted_scores]
57
- return gr.update(value=get_random_prompt()), leaderboard_table
58
-
59
-
60
- # --- Load leaderboard ---
61
- def load_leaderboard():
62
- try:
63
- with open(HUB_JSON, "r") as f:
64
- return json.load(f)
65
- except Exception as e:
66
- print(f"Failed to read leaderboard: {e}")
67
- return {}
68
-
69
-
70
- leaderboard_scores = load_leaderboard()
71
-
72
- # --- Save and push to HF Hub ---
73
- def save_leaderboard():
74
- try:
75
- with open(HUB_JSON, "w", encoding="utf-8") as f:
76
- json.dump(leaderboard_scores, f, ensure_ascii=False)
77
-
78
- if HF_TOKEN is None:
79
- print("HF_TOKEN not set. Skipping push to hub.")
80
- return
81
-
82
- api = HfApi()
83
- api.upload_file(
84
- path_or_fileobj=HUB_JSON,
85
- path_in_repo=HUB_JSON,
86
- repo_id=HUB_REPO_ID,
87
- repo_type="dataset",
88
- token=HF_TOKEN,
89
- commit_message="Update leaderboard"
90
- )
91
- except Exception as e:
92
- print(f"Failed to save leaderboard to HF Hub: {e}")
93
-
94
- # --- Load ONNX models ---
95
- session = ort.InferenceSession(MODEL_PATH, providers=["CPUExecutionProvider"])
96
- input_name = session.get_inputs()[0].name
97
-
98
- clip_image_sess = ort.InferenceSession(CLIP_IMAGE_ENCODER_PATH, providers=["CPUExecutionProvider"])
99
- clip_text_sess = ort.InferenceSession(CLIP_TEXT_ENCODER_PATH, providers=["CPUExecutionProvider"])
100
- clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
101
-
102
- transform = transforms.Compose([
103
- transforms.Resize((224, 224)),
104
- transforms.ToTensor(),
105
- transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711])
106
- ])
107
-
108
- def compute_prompt_match(image: Image.Image, prompt: str) -> float:
109
- try:
110
- img_tensor = transform(image).unsqueeze(0).numpy().astype(np.float32)
111
- image_features = clip_image_sess.run(None, {clip_image_sess.get_inputs()[0].name: img_tensor})[0][0]
112
- image_features /= np.linalg.norm(image_features)
113
-
114
- inputs = clip_tokenizer(prompt, return_tensors="np", padding="max_length", truncation=True, max_length=77)
115
- input_ids = inputs["input_ids"]
116
- attention_mask = inputs["attention_mask"]
117
- text_features = clip_text_sess.run(None, {
118
- clip_text_sess.get_inputs()[0].name: input_ids,
119
- clip_text_sess.get_inputs()[1].name: attention_mask
120
- })[0][0]
121
- text_features /= np.linalg.norm(text_features)
122
-
123
- sim = np.dot(image_features, text_features)
124
- return round(sim * 100, 2)
125
- except Exception as e:
126
- print(f"CLIP ONNX match failed: {e}")
127
- return 0.0
128
-
129
- # --- Main prediction logic ---
130
- def detect_with_model(image: Image.Image, prompt: str, username: str):
131
- if not username.strip():
132
- return "Please enter your name.", None, [], gr.update(visible=True), gr.update(visible=False), username
133
-
134
- prompt_score = compute_prompt_match(image, prompt)
135
- if prompt_score < PROMPT_MATCH_THRESHOLD:
136
- message = f"⚠️ Prompt match too low ({round(prompt_score, 2)}%). Please generate an image that better matches the prompt."
137
- return message, None, [], gr.update(visible=True), gr.update(visible=False), username
138
-
139
- image_tensor = transforms.Resize((224, 224))(image)
140
- image_tensor = transforms.ToTensor()(image_tensor).unsqueeze(0).numpy().astype(np.float32)
141
- outputs = session.run(None, {input_name: image_tensor})
142
- prob = round(1 / (1 + np.exp(-outputs[0][0][0])), 2)
143
- prediction = "Real" if prob > 0.5 else "Fake"
144
-
145
- score = 1 if prediction == "Real" else 0
146
- confidence = round(prob * 100, 2) if prediction == "Real" else round((1 - prob) * 100, 2)
147
-
148
- message = f"🔍 Prediction: {prediction} ({round(confidence, 2)}% confidence)\n🧐 Prompt match: {prompt_score}%"
149
-
150
- if prediction == "Real":
151
- leaderboard_scores[username] = leaderboard_scores.get(username, 0) + score
152
- message += "\n🎉 Nice! You fooled the AI. +1 point!"
153
- else:
154
- message += "\n😅 The AI caught you this time. Try again!"
155
-
156
- save_leaderboard()
157
-
158
- sorted_scores = sorted(leaderboard_scores.items(), key=lambda x: x[1], reverse=True)
159
- leaderboard_table = [[name, points] for name, points in sorted_scores]
160
-
161
- return (
162
- message,
163
- image,
164
- leaderboard_table,
165
- gr.update(visible=False),
166
- gr.update(visible=True),
167
- username
168
- )
169
-
170
- # --- UI Layout ---
171
- def get_random_prompt():
172
- return random.choice(PROMPT_LIST) if PROMPT_LIST else "A synthetic scene with dramatic lighting"
173
-
174
- with gr.Blocks(css=".gr-button {font-size: 16px !important}") as demo:
175
- gr.Markdown("## 🌝 OpenFake Arena")
176
- gr.Markdown("Welcome to the OpenFake Arena!\n\n**Your mission:** Generate a synthetic image for the prompt, upload it, and try to fool the AI detector into thinking it’s real.\n\n**Rules:**\n- Only synthetic images allowed!\n- No cheating with real photos.\n\nMake it wild. Make it weird. Most of all — make it fun.")
177
-
178
- with gr.Group(visible=True) as input_section:
179
- username_input = gr.Textbox(label="Your Name", placeholder="Enter your name", interactive=True)
180
- model_input = gr.Textbox(label="Model Used", placeholder="Name of the model used to generate the image", interactive=True)
181
-
182
- with gr.Row():
183
- prompt_input = gr.Textbox(
184
- label="Prompt to use",
185
- placeholder="e.g., ...",
186
- value="",
187
- lines=2
188
- )
189
-
190
- with gr.Row():
191
- image_input = gr.Image(type="pil", label="Upload Synthetic Image")
192
-
193
- with gr.Row():
194
- submit_btn = gr.Button("Upload")
195
-
196
- try_again_btn = gr.Button("Try Again", visible=False)
197
-
198
- with gr.Group():
199
- gr.Markdown("### 🎯 Result")
200
- with gr.Row():
201
- prediction_output = gr.Textbox(label="Prediction", interactive=False)
202
- image_output = gr.Image(label="Submitted Image", show_label=False)
203
-
204
- with gr.Group():
205
- gr.Markdown("### 🏆 Leaderboard")
206
- leaderboard = gr.Dataframe(
207
- headers=["Username", "Score"],
208
- datatype=["str", "number"],
209
- interactive=False,
210
- row_count=5,
211
- visible=True
212
- )
213
-
214
- submit_btn.click(
215
- fn=detect_with_model,
216
- inputs=[image_input, prompt_input, username_input],
217
- outputs=[
218
- prediction_output,
219
- image_output,
220
- leaderboard,
221
- input_section,
222
- try_again_btn,
223
- username_input
224
- ]
225
- )
226
-
227
- try_again_btn.click(
228
- fn=lambda name: ("", None, [], gr.update(visible=True), gr.update(visible=False), name, gr.update(value=get_random_prompt())),
229
- inputs=[username_input],
230
- outputs=[
231
- prediction_output,
232
- image_output,
233
- leaderboard,
234
- input_section,
235
- try_again_btn,
236
- username_input,
237
- prompt_input
238
- ]
239
- )
240
-
241
- demo.load(
242
- fn=load_initial_state,
243
- outputs=[prompt_input, leaderboard]
244
- )
245
-
246
- if __name__ == "__main__":
247
- demo.launch()