botsi commited on
Commit
c55b826
·
verified ·
1 Parent(s): 41e7437

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -19,8 +19,8 @@ from system_prompt_config import construct_input_prompt
19
  import json
20
  import atexit
21
 
22
- from huggingface_hub import login
23
- HF_TOKEN = os.getenv('HF_TOKEN')
24
 
25
  # From 70B code
26
  system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
@@ -58,8 +58,8 @@ if torch.cuda.is_available():
58
  '''
59
 
60
 
61
- #if not torch.cuda.is_available():
62
- # DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
63
 
64
 
65
  if torch.cuda.is_available():
@@ -97,10 +97,10 @@ def generate(
97
  # conversation.append({"role": "system", "content": system_prompt})
98
 
99
  # Construct the input prompt using the functions from the system_prompt_config module
100
- # input_prompt = construct_input_prompt(chat_history, message)
101
 
102
  # Convert input prompt to tensor
103
- # input_ids = tokenizer(input_prompt, return_tensors="pt").to(model.device)
104
 
105
 
106
  for user, assistant in chat_history:
 
19
  import json
20
  import atexit
21
 
22
+ #from huggingface_hub import login
23
+ #HF_TOKEN = os.getenv('HF_TOKEN')
24
 
25
  # From 70B code
26
  system_message = "\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
 
58
  '''
59
 
60
 
61
+ if not torch.cuda.is_available():
62
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
63
 
64
 
65
  if torch.cuda.is_available():
 
97
  # conversation.append({"role": "system", "content": system_prompt})
98
 
99
  # Construct the input prompt using the functions from the system_prompt_config module
100
+ input_prompt = construct_input_prompt(chat_history, message)
101
 
102
  # Convert input prompt to tensor
103
+ input_ids = tokenizer(input_prompt, return_tensors="pt").to(model.device)
104
 
105
 
106
  for user, assistant in chat_history: