RohitCSharp commited on
Commit
d3a1880
·
verified ·
1 Parent(s): 153de4b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -11
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- import tempfile, requests, os
3
  from langchain.chains import LLMChain
4
  from langchain.prompts import PromptTemplate
5
  from langchain.chat_models import ChatOpenAI
@@ -8,7 +8,6 @@ from bs4 import BeautifulSoup
8
  from PIL import Image, ImageDraw, ImageFont
9
  import ffmpeg
10
  import textwrap
11
- import subprocess
12
 
13
  # OpenAI LLM
14
  llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.3)
@@ -40,7 +39,7 @@ def get_uploaded_logo():
40
  # Create image slides from text chunks
41
  def create_slides(text, duration, output_folder, max_lines=6):
42
  font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
43
- font = ImageFont.truetype(font_path, 48)
44
  logo_path = get_uploaded_logo()
45
 
46
  chunks = textwrap.wrap(text, width=40)
@@ -85,18 +84,26 @@ def url_to_av_summary(url, duration):
85
  frame_dir = tempfile.mkdtemp()
86
  slides = create_slides(summary, duration, frame_dir)
87
 
88
- slide_inputs = []
89
- for path, t in slides:
90
- slide_inputs.extend(["-t", str(t), "-loop", "1", "-i", path])
 
 
 
91
 
92
- filter_complex = "".join([f"[{i}:v]" for i in range(len(slides))]) + f"concat=n={len(slides)}:v=1[outv]"
93
  concat_img = os.path.join(frame_dir, "video_input.mp4")
94
-
95
- cmd = ["ffmpeg", "-y"] + slide_inputs + ["-filter_complex", filter_complex, "-map", "[outv]", concat_img]
96
- subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
 
97
 
98
  final_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
99
- ffmpeg.input(concat_img).output(audio_path, final_video, vcodec='libx264', acodec='aac', pix_fmt='yuv420p', shortest=None).run(overwrite_output=True, quiet=True)
 
 
 
 
 
100
 
101
  return summary, final_video
102
 
 
1
  import gradio as gr
2
+ import tempfile, requests, os, subprocess
3
  from langchain.chains import LLMChain
4
  from langchain.prompts import PromptTemplate
5
  from langchain.chat_models import ChatOpenAI
 
8
  from PIL import Image, ImageDraw, ImageFont
9
  import ffmpeg
10
  import textwrap
 
11
 
12
  # OpenAI LLM
13
  llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.3)
 
39
  # Create image slides from text chunks
40
  def create_slides(text, duration, output_folder, max_lines=6):
41
  font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf"
42
+ font = ImageFont.truetype(font_path, 64)
43
  logo_path = get_uploaded_logo()
44
 
45
  chunks = textwrap.wrap(text, width=40)
 
84
  frame_dir = tempfile.mkdtemp()
85
  slides = create_slides(summary, duration, frame_dir)
86
 
87
+ concat_txt_path = os.path.join(frame_dir, "slides.txt")
88
+ with open(concat_txt_path, "w") as f:
89
+ for path, t in slides:
90
+ f.write(f"file '{path}'\n")
91
+ f.write(f"duration {t}\n")
92
+ f.write(f"file '{slides[-1][0]}'\n")
93
 
 
94
  concat_img = os.path.join(frame_dir, "video_input.mp4")
95
+ subprocess.run([
96
+ "ffmpeg", "-y", "-f", "concat", "-safe", "0", "-i", concat_txt_path,
97
+ "-vsync", "vfr", "-pix_fmt", "yuv420p", concat_img
98
+ ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
99
 
100
  final_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
101
+ video_input = ffmpeg.input(concat_img)
102
+ audio_input = ffmpeg.input(audio_path)
103
+
104
+ ffmpeg.output(video_input, audio_input, final_video,
105
+ vcodec='libx264', acodec='aac', pix_fmt='yuv420p', shortest=None
106
+ ).run(overwrite_output=True, quiet=True)
107
 
108
  return summary, final_video
109