nellaep commited on
Commit
61224b1
·
verified ·
1 Parent(s): 5446e24

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +15 -0
  2. anisol.py +66 -0
  3. requirements.txt +4 -0
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ # Set working directory
4
+ WORKDIR /app
5
+
6
+ # Install dependencies
7
+ COPY requirements.txt .
8
+ RUN pip install --no-cache-dir -r requirements.txt
9
+
10
+ # Copy application code
11
+ COPY app.py .
12
+
13
+ # Expose port and run app
14
+ EXPOSE 7860
15
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
anisol.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """AniSol.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1DiHMlxQx3QEAYivNMOl3olC8jj9mutCZ
8
+ """
9
+
10
+ from fastapi import FastAPI, Request
11
+ from pydantic import BaseModel
12
+ from transformers import AutoTokenizer, AutoModelForCausalLM
13
+ from fastapi.middleware.cors import CORSMiddleware
14
+ import torch
15
+
16
+ app = FastAPI()
17
+
18
+ app.add_middleware(
19
+ CORSMiddleware,
20
+ allow_origins=["*"],
21
+ allow_credentials=True,
22
+ allow_methods=["*"],
23
+ allow_headers=["*"],
24
+ )
25
+
26
+ model_name = "nellaep/AniSolSenseiModel"
27
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
28
+ model = AutoModelForCausalLM.from_pretrained(model_name)
29
+ model.eval()
30
+
31
+ sensitive_keywords = [
32
+ "suicide", "kill myself", "end my life", "self harm", "cutting",
33
+ "i want to die", "i want to disappear", "hurt myself", "life isn’t worth it",
34
+ "i can’t take it anymore", "no reason to live", "i hate living", "die", "i give up"
35
+ ]
36
+
37
+ hotline_message = (
38
+ "Your life is valuable twin. If you're feeling overwhelmed, please reach out for help.\n"
39
+ "Call or text 988 (U.S. Suicide & Crisis Lifeline) for free, 24/7 support.\n"
40
+ )
41
+
42
+ class InputText(BaseModel):
43
+ input: str
44
+
45
+ @app.post("/generate")
46
+ async def generate_response(data: InputText):
47
+ user_input = data.input.lower()
48
+
49
+ if any(keyword in user_input for keyword in sensitive_keywords):
50
+ return {
51
+ "response": hotline_message
52
+ }
53
+
54
+ prompt = f"Input: {data.input}\nSensei: Kakashi\nOutput:"
55
+ inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
56
+ with torch.no_grad():
57
+ output = model.generate(
58
+ **inputs,
59
+ max_new_tokens=100,
60
+ pad_token_id=tokenizer.eos_token_id
61
+ )
62
+
63
+ decoded = tokenizer.decode(output[0], skip_special_tokens=True)
64
+ response = decoded.split("Output:")[-1].strip()
65
+
66
+ return {"response": response}
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ fastapi
2
+ transformers
3
+ torch
4
+ uvicorn