sdfsdh commited on
Commit
57db413
·
verified ·
1 Parent(s): 805ca54

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +39 -87
src/streamlit_app.py CHANGED
@@ -1,95 +1,47 @@
1
- import os
2
  import streamlit as st
3
  from transformers import pipeline
4
- import glob
5
- import asyncio
6
  import torch
7
 
8
- # 환경 변수 설정
9
- os.environ["STREAMLIT_SERVER_ENABLE_STATIC_SERVE"] = "false"
10
- os.environ["HF_HOME"] = "/tmp/hf_home"
11
- os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_home/models"
12
- os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/hf_home/hub"
13
- os.environ["STREAMLIT_CONFIG_DIR"] = "/tmp/.streamlit"
14
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
15
- os.environ["PYTHONWARNINGS"] = "ignore::FutureWarning"
16
- os.environ["HOME"] = "/tmp" # 중요: HOME 환경변수도 설정
17
-
18
- # 디렉토리 생성 및 권한 설정
19
- cache_dirs = [
20
- "/tmp/hf_home",
21
- "/tmp/hf_home/models",
22
- "/tmp/hf_home/hub",
23
- "/tmp/.streamlit"
24
- ]
25
-
26
- def fix_permissions(path):
27
- for root, dirs, files in os.walk(path):
28
- for d in dirs:
29
- os.chmod(os.path.join(root, d), 0o777)
30
- for f in files:
31
- os.chmod(os.path.join(root, f), 0o777)
32
-
33
- for dir_path in cache_dirs:
34
- os.makedirs(dir_path, exist_ok=True)
35
- os.chmod(dir_path, 0o777)
36
- fix_permissions(dir_path)
37
-
38
- # 락 파일 삭제
39
- lock_files = glob.glob("/tmp/hf_home/**/*.lock", recursive=True)
40
- for lock_file in lock_files:
41
- try:
42
- os.remove(lock_file)
43
- except Exception:
44
- pass
45
-
46
- # PyTorch JIT 설정 (문제 있다면 주석 처리)
47
- try:
48
- torch._C._jit_set_profiling_executor(False)
49
- torch._C._jit_set_profiling_mode(False)
50
- except Exception:
51
- pass
52
-
53
- def ensure_event_loop():
54
- try:
55
- asyncio.get_running_loop()
56
- except RuntimeError:
57
- asyncio.set_event_loop(asyncio.new_event_loop())
58
 
59
  @st.cache_resource
60
  def load_model():
61
- ensure_event_loop()
62
- return pipeline(
63
- 'text-generation',
64
- model='TinyLlama/TinyLlama-1.1B-Chat-v1.0',
65
- device_map="auto",
66
- torch_dtype="auto",
67
- cache_dir="/tmp/hf_home/models"
68
- )
69
-
70
- def main():
71
- st.title("🚀 TinyLlama 테스트")
72
- prompt = st.text_input("프롬프트 입력:")
 
 
 
 
 
 
 
 
 
73
 
74
- if st.button("실행"):
75
- try:
76
- pipe = load_model()
77
- output = pipe(prompt, max_new_tokens=200)[0]['generated_text']
78
- st.write(output)
79
- except Exception as e:
80
- st.error(f"오류 발생: {str(e)}")
81
- st.info("""
82
- **추가 조치 방법**
83
- 1. 페이지 새로고침
84
- 2. 터미널에서 다음 명령 실행:
85
- ```
86
- rm -rf /tmp/hf_home/**/*.lock
87
- ```
88
- 3. 모델 캐시 수동 삭제:
89
- ```
90
- rm -rf /tmp/hf_home/models
91
- ```
92
- """)
93
-
94
- if __name__ == "__main__":
95
- main()
 
 
1
  import streamlit as st
2
  from transformers import pipeline
 
 
3
  import torch
4
 
5
+ st.title("🤖 작은 LLM 모델 로드 확인")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  @st.cache_resource
8
  def load_model():
9
+ """작은 LLM 모델 로드"""
10
+ try:
11
+ # DistilGPT-2 (작은 모델, 약 82MB)
12
+ model = pipeline(
13
+ "text-generation",
14
+ model="distilgpt2",
15
+ device=0 if torch.cuda.is_available() else -1
16
+ )
17
+ return model, "✅ 모델 로드 성공!"
18
+ except Exception as e:
19
+ return None, f" 모델 로드 실패: {str(e)}"
20
+
21
+ # 모델 로드
22
+ with st.spinner("모델 로딩 중..."):
23
+ model, status = load_model()
24
+
25
+ st.write(status)
26
+
27
+ if model:
28
+ # 간단한 텍스트 생성 테스트
29
+ st.subheader("텍스트 생성 테스트")
30
 
31
+ prompt = st.text_input("프롬프트 입력:", "Hello, I am")
32
+
33
+ if st.button("생성"):
34
+ if prompt:
35
+ with st.spinner("생성 중..."):
36
+ result = model(prompt, max_length=50, num_return_sequences=1)
37
+ st.write("**생성 결과:**")
38
+ st.write(result[0]['generated_text'])
39
+ else:
40
+ st.warning("프롬프트를 입력해주세요.")
41
+
42
+ # 시스템 정보
43
+ st.sidebar.write("**시스템 정보:**")
44
+ st.sidebar.write(f"CUDA 사용 가능: {torch.cuda.is_available()}")
45
+ if torch.cuda.is_available():
46
+ st.sidebar.write(f"GPU: {torch.cuda.get_device_name(0)}")
47
+ st.sidebar.write(f"PyTorch 버전: {torch.__version__}")