hysts HF Staff commited on
Commit
7a4063f
·
1 Parent(s): 5458b42
Files changed (4) hide show
  1. README.md +1 -29
  2. app.py +24 -20
  3. requirements.txt +1 -1
  4. style.css +3 -0
README.md CHANGED
@@ -4,35 +4,7 @@ emoji: 🦀
4
  colorFrom: green
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
  ---
11
-
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio`, `streamlit`, or `static`
28
-
29
- `sdk_version` : _string_
30
- Only applicable for `streamlit` SDK.
31
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
-
33
- `app_file`: _string_
34
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
35
- Path is relative to the root of the repository.
36
-
37
- `pinned`: _boolean_
38
- Whether the Space stays on top of your list.
 
4
  colorFrom: green
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 3.34.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -6,17 +6,14 @@ import functools
6
  import os
7
  import pathlib
8
  import tarfile
9
- import urllib
10
 
11
  import cv2
12
  import gradio as gr
13
  import huggingface_hub
14
  import numpy as np
15
 
16
- TITLE = 'nagadomi/lbpcascade_animeface'
17
- DESCRIPTION = 'This is an unofficial demo for https://github.com/nagadomi/lbpcascade_animeface.'
18
-
19
- HF_TOKEN = os.getenv('HF_TOKEN')
20
 
21
 
22
  def load_sample_image_paths() -> list[pathlib.Path]:
@@ -25,8 +22,7 @@ def load_sample_image_paths() -> list[pathlib.Path]:
25
  dataset_repo = 'hysts/sample-images-TADNE'
26
  path = huggingface_hub.hf_hub_download(dataset_repo,
27
  'images.tar.gz',
28
- repo_type='dataset',
29
- use_auth_token=HF_TOKEN)
30
  with tarfile.open(path) as f:
31
  f.extractall()
32
  return sorted(image_dir.glob('*'))
@@ -41,14 +37,14 @@ def load_model() -> cv2.CascadeClassifier:
41
 
42
 
43
  def detect(image_path: str, detector: cv2.CascadeClassifier) -> np.ndarray:
44
- image_path = cv2.imread(image_path)
45
- gray = cv2.cvtColor(image_path, cv2.COLOR_BGR2GRAY)
46
  preds = detector.detectMultiScale(gray,
47
  scaleFactor=1.1,
48
  minNeighbors=5,
49
  minSize=(24, 24))
50
 
51
- res = image_path.copy()
52
  for x, y, w, h in preds:
53
  cv2.rectangle(res, (x, y), (x + w, y + h), (0, 255, 0), 2)
54
  return res[:, :, ::-1]
@@ -58,13 +54,21 @@ image_paths = load_sample_image_paths()
58
  examples = [[path.as_posix()] for path in image_paths]
59
 
60
  detector = load_model()
61
- func = functools.partial(detect, detector=detector)
62
-
63
- gr.Interface(
64
- fn=func,
65
- inputs=gr.Image(label='Input', type='filepath'),
66
- outputs=gr.Image(label='Output', type='numpy'),
67
- examples=examples,
68
- title=TITLE,
69
- description=DESCRIPTION,
70
- ).queue().launch(show_api=False)
 
 
 
 
 
 
 
 
 
6
  import os
7
  import pathlib
8
  import tarfile
9
+ import urllib.request
10
 
11
  import cv2
12
  import gradio as gr
13
  import huggingface_hub
14
  import numpy as np
15
 
16
+ DESCRIPTION = '# [nagadomi/lbpcascade_animeface](https://github.com/nagadomi/lbpcascade_animeface)'
 
 
 
17
 
18
 
19
  def load_sample_image_paths() -> list[pathlib.Path]:
 
22
  dataset_repo = 'hysts/sample-images-TADNE'
23
  path = huggingface_hub.hf_hub_download(dataset_repo,
24
  'images.tar.gz',
25
+ repo_type='dataset')
 
26
  with tarfile.open(path) as f:
27
  f.extractall()
28
  return sorted(image_dir.glob('*'))
 
37
 
38
 
39
  def detect(image_path: str, detector: cv2.CascadeClassifier) -> np.ndarray:
40
+ image = cv2.imread(image_path)
41
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
42
  preds = detector.detectMultiScale(gray,
43
  scaleFactor=1.1,
44
  minNeighbors=5,
45
  minSize=(24, 24))
46
 
47
+ res = image.copy()
48
  for x, y, w, h in preds:
49
  cv2.rectangle(res, (x, y), (x + w, y + h), (0, 255, 0), 2)
50
  return res[:, :, ::-1]
 
54
  examples = [[path.as_posix()] for path in image_paths]
55
 
56
  detector = load_model()
57
+ fn = functools.partial(detect, detector=detector)
58
+
59
+ with gr.Blocks(css='style.css') as demo:
60
+ gr.Markdown(DESCRIPTION)
61
+ with gr.Row():
62
+ with gr.Column():
63
+ image = gr.Image(label='Input', type='filepath')
64
+ run_button = gr.Button('Run')
65
+ with gr.Column():
66
+ result = gr.Image(label='Result')
67
+
68
+ gr.Examples(examples=examples,
69
+ inputs=image,
70
+ outputs=result,
71
+ fn=fn,
72
+ cache_examples=os.getenv('CACHE_EXAMPLES') == '1')
73
+ run_button.click(fn=fn, inputs=image, outputs=result, api_name='predict')
74
+ demo.queue(max_size=15).launch()
requirements.txt CHANGED
@@ -1 +1 @@
1
- opencv-python-headless>=4.5.5.62
 
1
+ opencv-python-headless>=4.7.0.72
style.css ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }