Spaces:
Sleeping
Sleeping
Commit
·
5fafef7
1
Parent(s):
bb01969
add secret
Browse files
app.py
CHANGED
@@ -49,6 +49,9 @@ os.makedirs(LOG_DIR, exist_ok=True, mode=0o777)
|
|
49 |
os.environ["HF_HOME"] = os.path.join(PERSISTENT_PATH, ".huggingface")
|
50 |
os.makedirs(os.environ["HF_HOME"], exist_ok=True, mode=0o777)
|
51 |
|
|
|
|
|
|
|
52 |
# Logging Setup
|
53 |
logging.basicConfig(
|
54 |
filename=os.path.join(LOG_DIR, "app.log"),
|
@@ -61,15 +64,22 @@ logger = logging.getLogger(__name__)
|
|
61 |
model = None
|
62 |
|
63 |
def initialize_model():
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
global model
|
65 |
try:
|
66 |
if model is None:
|
67 |
model_cache = os.path.join(PERSISTENT_PATH, "models")
|
68 |
os.makedirs(model_cache, exist_ok=True, mode=0o777)
|
69 |
-
|
|
|
70 |
logger.info(f"Initialized model: {EMBEDDING_MODEL_NAME}")
|
71 |
return True
|
72 |
-
except requests.exceptions.
|
73 |
logger.error(f"Connection error during model download: {str(e)}\n{traceback.format_exc()}")
|
74 |
return False
|
75 |
except Exception as e:
|
@@ -116,7 +126,7 @@ def get_model():
|
|
116 |
@spaces.GPU
|
117 |
def process_files(files):
|
118 |
if not files:
|
119 |
-
return "Please upload one or more
|
120 |
|
121 |
try:
|
122 |
if not initialize_model():
|
@@ -124,7 +134,7 @@ def process_files(files):
|
|
124 |
|
125 |
valid_files = [f for f in files if f.name.lower().endswith('.txt')]
|
126 |
if not valid_files:
|
127 |
-
return "No
|
128 |
|
129 |
all_chunks = []
|
130 |
processed_files = 0
|
@@ -180,7 +190,7 @@ def semantic_search(query, top_k=5):
|
|
180 |
global model
|
181 |
if model is None:
|
182 |
return "Model not initialized. Please process files first."
|
183 |
-
|
184 |
try:
|
185 |
# Load saved embeddings and chunks from OUTPUTS_DIR
|
186 |
embeddings_file = os.path.join(OUTPUTS_DIR, "embeddings.npy")
|
@@ -277,7 +287,7 @@ def create_gradio_interface():
|
|
277 |
step=1,
|
278 |
label="Number of results to return"
|
279 |
)
|
280 |
-
search_button = gr.Button("
|
281 |
results_output = gr.Textbox(
|
282 |
label="Search Results",
|
283 |
lines=10,
|
@@ -289,14 +299,14 @@ def create_gradio_interface():
|
|
289 |
outputs=results_output
|
290 |
)
|
291 |
|
292 |
-
download_button = gr.Button("
|
293 |
download_button.click(
|
294 |
fn=download_results,
|
295 |
outputs=[gr.File(label="Download Results")]
|
296 |
)
|
297 |
|
298 |
with gr.Tab("Outputs"):
|
299 |
-
browse_button = gr.Button("
|
300 |
browse_button.click(
|
301 |
fn=browse_outputs,
|
302 |
outputs=[gr.Textbox(label="Browse Status")]
|
@@ -306,4 +316,4 @@ def create_gradio_interface():
|
|
306 |
|
307 |
if __name__ == "__main__":
|
308 |
demo = create_gradio_interface()
|
309 |
-
demo.launch(server_name="0.0.0.0")
|
|
|
49 |
os.environ["HF_HOME"] = os.path.join(PERSISTENT_PATH, ".huggingface")
|
50 |
os.makedirs(os.environ["HF_HOME"], exist_ok=True, mode=0o777)
|
51 |
|
52 |
+
# Set Hugging Face token
|
53 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
54 |
+
|
55 |
# Logging Setup
|
56 |
logging.basicConfig(
|
57 |
filename=os.path.join(LOG_DIR, "app.log"),
|
|
|
64 |
model = None
|
65 |
|
66 |
def initialize_model():
|
67 |
+
"""
|
68 |
+
Initialize the sentence transformer model.
|
69 |
+
|
70 |
+
Returns:
|
71 |
+
bool: Whether the model was successfully initialized.
|
72 |
+
"""
|
73 |
global model
|
74 |
try:
|
75 |
if model is None:
|
76 |
model_cache = os.path.join(PERSISTENT_PATH, "models")
|
77 |
os.makedirs(model_cache, exist_ok=True, mode=0o777)
|
78 |
+
# Use the HF_TOKEN to load the model
|
79 |
+
model = SentenceTransformer(EMBEDDING_MODEL_NAME, cache_folder=model_cache, use_auth_token=HF_TOKEN)
|
80 |
logger.info(f"Initialized model: {EMBEDDING_MODEL_NAME}")
|
81 |
return True
|
82 |
+
except requests.exceptions.RequestException as e:
|
83 |
logger.error(f"Connection error during model download: {str(e)}\n{traceback.format_exc()}")
|
84 |
return False
|
85 |
except Exception as e:
|
|
|
126 |
@spaces.GPU
|
127 |
def process_files(files):
|
128 |
if not files:
|
129 |
+
return "Please upload one or more.txt files.", "", ""
|
130 |
|
131 |
try:
|
132 |
if not initialize_model():
|
|
|
134 |
|
135 |
valid_files = [f for f in files if f.name.lower().endswith('.txt')]
|
136 |
if not valid_files:
|
137 |
+
return "No.txt files found. Please upload valid.txt files.", "", ""
|
138 |
|
139 |
all_chunks = []
|
140 |
processed_files = 0
|
|
|
190 |
global model
|
191 |
if model is None:
|
192 |
return "Model not initialized. Please process files first."
|
193 |
+
|
194 |
try:
|
195 |
# Load saved embeddings and chunks from OUTPUTS_DIR
|
196 |
embeddings_file = os.path.join(OUTPUTS_DIR, "embeddings.npy")
|
|
|
287 |
step=1,
|
288 |
label="Number of results to return"
|
289 |
)
|
290 |
+
search_button = gr.Button(" Search")
|
291 |
results_output = gr.Textbox(
|
292 |
label="Search Results",
|
293 |
lines=10,
|
|
|
299 |
outputs=results_output
|
300 |
)
|
301 |
|
302 |
+
download_button = gr.Button(" Download Results")
|
303 |
download_button.click(
|
304 |
fn=download_results,
|
305 |
outputs=[gr.File(label="Download Results")]
|
306 |
)
|
307 |
|
308 |
with gr.Tab("Outputs"):
|
309 |
+
browse_button = gr.Button(" Browse Outputs")
|
310 |
browse_button.click(
|
311 |
fn=browse_outputs,
|
312 |
outputs=[gr.Textbox(label="Browse Status")]
|
|
|
316 |
|
317 |
if __name__ == "__main__":
|
318 |
demo = create_gradio_interface()
|
319 |
+
demo.launch(server_name="0.0.0.0")
|