MojoHz commited on
Commit
f8cada4
·
verified ·
1 Parent(s): 96f1cb1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -40
app.py CHANGED
@@ -1,41 +1,20 @@
1
  # -*- coding: utf-8 -*-
2
  """saivv_protoype"""
3
 
4
- # Install libraries (if not installed by requirements.txt)
5
- try:
6
- import speech_recognition as sr
7
- except ImportError:
8
- pip install speechrecognition
9
-
10
- try:
11
- import pytesseract
12
- except ImportError:
13
- pip install pytesseract
14
-
15
- try:
16
- import gradio as gr
17
- except ImportError:
18
- pip install gradio
19
-
20
  import cv2 # For image processing with OpenCV
21
  import pytesseract # For Optical Character Recognition (OCR) on receipts
22
  import gradio as gr # For creating the Gradio interface
 
23
 
24
- # Only necessary pip installs; Hugging Face Spaces handles requirements.txt installs
25
- try:
26
- from torch import cuda, bfloat16
27
- import transformers
28
- from transformers import AutoTokenizer
29
- import torch
30
- from langchain.llms import HuggingFacePipeline
31
- from langchain.chains import RetrievalQA
32
- from langchain.vectorstores import chroma
33
- except ImportError:
34
- pip install langchain langchain-community langchain-core transformers
35
- pip install bitsandbytes accelerate
36
 
 
 
37
  model_id = 'HuggingFaceH4/zephyr-7b-beta'
38
-
39
  device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
40
  bnb_config = transformers.BitsAndBytesConfig(
41
  load_in_4bit=True,
@@ -71,15 +50,9 @@ query_pipeline = transformers.pipeline(
71
  device_map="auto"
72
  )
73
 
74
- from IPython.display import display, Markdown
75
- def colorize_text(text):
76
- for word, color in zip(["Reasoning", "Question", "Answer", "Total time"], ["blue", "red", "green", "magenta"]):
77
- text = text.replace(f"{word}:", f"\n\n**<font color='{color}'>{word}:</font>**")
78
- return text
79
-
80
  llm = HuggingFacePipeline(pipeline=query_pipeline)
81
 
82
- # Define structured user profile and question
83
  user_profile = """
84
  User Profile:
85
  Age: 40, Gender: Non-Binary, Marital Status: Divorced, Income Level: Medium ($2733),
@@ -92,14 +65,18 @@ Home Shopping: $235.68, Others: $253.45
92
  """
93
 
94
  question = "Based on this data, can I buy a Lamborghini?"
95
-
96
- # Combine structured data into prompt
97
  prompt = f"{user_profile}\n\nQuestion: {question}"
98
 
99
- # Send prompt to LLM
100
  response = llm(prompt=prompt)
101
 
102
- # Display the result with Markdown formatting
 
 
 
 
 
 
103
  full_response = f"**Question:** {question}\n\n**Answer:** {response}"
104
  display(Markdown(colorize_text(full_response)))
105
 
 
1
  # -*- coding: utf-8 -*-
2
  """saivv_protoype"""
3
 
4
+ # Import necessary libraries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import cv2 # For image processing with OpenCV
6
  import pytesseract # For Optical Character Recognition (OCR) on receipts
7
  import gradio as gr # For creating the Gradio interface
8
+ import speech_recognition as sr # For voice recognition
9
 
10
+ # Model setup (using transformers)
11
+ import torch
12
+ from transformers import AutoTokenizer
13
+ from langchain.llms import HuggingFacePipeline
 
 
 
 
 
 
 
 
14
 
15
+ # Initialize device and model config
16
+ from torch import cuda, bfloat16
17
  model_id = 'HuggingFaceH4/zephyr-7b-beta'
 
18
  device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
19
  bnb_config = transformers.BitsAndBytesConfig(
20
  load_in_4bit=True,
 
50
  device_map="auto"
51
  )
52
 
 
 
 
 
 
 
53
  llm = HuggingFacePipeline(pipeline=query_pipeline)
54
 
55
+ # User profile setup
56
  user_profile = """
57
  User Profile:
58
  Age: 40, Gender: Non-Binary, Marital Status: Divorced, Income Level: Medium ($2733),
 
65
  """
66
 
67
  question = "Based on this data, can I buy a Lamborghini?"
 
 
68
  prompt = f"{user_profile}\n\nQuestion: {question}"
69
 
70
+ # Get response from LLM
71
  response = llm(prompt=prompt)
72
 
73
+ # Display result
74
+ from IPython.display import display, Markdown
75
+ def colorize_text(text):
76
+ for word, color in zip(["Reasoning", "Question", "Answer", "Total time"], ["blue", "red", "green", "magenta"]):
77
+ text = text.replace(f"{word}:", f"\n\n**<font color='{color}'>{word}:</font>**")
78
+ return text
79
+
80
  full_response = f"**Question:** {question}\n\n**Answer:** {response}"
81
  display(Markdown(colorize_text(full_response)))
82