integrate with huggingface (#3)
Browse files- integrate with huggingface (5ad69f0c63c1820bf9c9486fac0127ded199c980)
Co-authored-by: Hafedh Hichri <[email protected]>
- .gitattributes +1 -0
- BEN2_demo_pictures/model_comparison.png +0 -0
- README.md +19 -16
- model.safetensors +3 -0
.gitattributes
CHANGED
@@ -38,3 +38,4 @@ BEN2_demo_pictures/grid_example2.png filter=lfs diff=lfs merge=lfs -text
|
|
38 |
BEN2_demo_pictures/grid_example3.png filter=lfs diff=lfs merge=lfs -text
|
39 |
BEN2_demo_pictures/grid_example6.png filter=lfs diff=lfs merge=lfs -text
|
40 |
BEN2_demo_pictures/grid_example7.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
38 |
BEN2_demo_pictures/grid_example3.png filter=lfs diff=lfs merge=lfs -text
|
39 |
BEN2_demo_pictures/grid_example6.png filter=lfs diff=lfs merge=lfs -text
|
40 |
BEN2_demo_pictures/grid_example7.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
BEN2_demo_pictures/model_comparison.png filter=lfs diff=lfs merge=lfs -text
|
BEN2_demo_pictures/model_comparison.png
CHANGED
![]() |
![]() |
Git LFS Details
|
README.md
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
pipeline_tag: image-segmentation
|
|
|
4 |
tags:
|
5 |
- BEN2
|
6 |
- background-remove
|
@@ -11,6 +12,8 @@ tags:
|
|
11 |
- background
|
12 |
- remove background
|
13 |
- pytorch
|
|
|
|
|
14 |
---
|
15 |
|
16 |
# BEN2: Background Erase Network
|
@@ -37,10 +40,16 @@ BEN2 was trained on the DIS5k and our 22K proprietary segmentation dataset. Our
|
|
37 |
- Follow us on X: https://x.com/PramaResearch/
|
38 |
|
39 |
|
40 |
-
##
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
```python
|
43 |
-
import
|
44 |
from PIL import Image
|
45 |
import torch
|
46 |
|
@@ -49,9 +58,9 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
49 |
|
50 |
file = "./image.png" # input image
|
51 |
|
52 |
-
model =
|
|
|
53 |
|
54 |
-
model.loadcheckpoints("./BEN2_Base.pth")
|
55 |
image = Image.open(file)
|
56 |
foreground = model.inference(image, refine_foreground=False,) #Refine foreground is an extract postprocessing step that increases inference time but can improve matting edges. The default value is False.
|
57 |
|
@@ -63,7 +72,7 @@ foreground.save("./foreground.png")
|
|
63 |
## Batch image processing
|
64 |
|
65 |
```python
|
66 |
-
import
|
67 |
from PIL import Image
|
68 |
import torch
|
69 |
|
@@ -72,9 +81,9 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
72 |
|
73 |
|
74 |
|
75 |
-
model =
|
|
|
76 |
|
77 |
-
model.loadcheckpoints("./BEN2_Base.pth")
|
78 |
|
79 |
file1 = "./image1.png" # input image1
|
80 |
file2 = "./image2.png" # input image2
|
@@ -102,7 +111,7 @@ sudo apt install ffmpeg
|
|
102 |
```
|
103 |
|
104 |
```python
|
105 |
-
import
|
106 |
from PIL import Image
|
107 |
import torch
|
108 |
|
@@ -111,11 +120,8 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
111 |
|
112 |
video_path = "/path_to_your_video.mp4"# input video
|
113 |
|
114 |
-
model =
|
115 |
-
|
116 |
-
model.loadcheckpoints("./BEN2_Base.pth")
|
117 |
-
|
118 |
-
|
119 |
|
120 |
|
121 |
model.segment_video(
|
@@ -146,6 +152,3 @@ RMBG 2.0 did not preserve the DIS 5k validation dataset
|
|
146 |
![Example 7](BEN2_demo_pictures/grid_example7.png)
|
147 |
|
148 |
|
149 |
-
## Installation
|
150 |
-
1. Clone Repo
|
151 |
-
2. Install requirements.txt
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
pipeline_tag: image-segmentation
|
4 |
+
library_name: ben2
|
5 |
tags:
|
6 |
- BEN2
|
7 |
- background-remove
|
|
|
12 |
- background
|
13 |
- remove background
|
14 |
- pytorch
|
15 |
+
- model_hub_mixin
|
16 |
+
- pytorch_model_hub_mixin
|
17 |
---
|
18 |
|
19 |
# BEN2: Background Erase Network
|
|
|
40 |
- Follow us on X: https://x.com/PramaResearch/
|
41 |
|
42 |
|
43 |
+
## Installation
|
44 |
+
|
45 |
+
```
|
46 |
+
pip install -e "git+https://github.com/PramaLLC/BEN2.git#egg=ben2"
|
47 |
+
```
|
48 |
+
|
49 |
+
## Quick start code
|
50 |
|
51 |
```python
|
52 |
+
from ben2 import BEN_Base
|
53 |
from PIL import Image
|
54 |
import torch
|
55 |
|
|
|
58 |
|
59 |
file = "./image.png" # input image
|
60 |
|
61 |
+
model = BEN_Base.from_pretrained("PramaLLC/BEN2")
|
62 |
+
model.to(device).eval()
|
63 |
|
|
|
64 |
image = Image.open(file)
|
65 |
foreground = model.inference(image, refine_foreground=False,) #Refine foreground is an extract postprocessing step that increases inference time but can improve matting edges. The default value is False.
|
66 |
|
|
|
72 |
## Batch image processing
|
73 |
|
74 |
```python
|
75 |
+
from ben2 import BEN_Base
|
76 |
from PIL import Image
|
77 |
import torch
|
78 |
|
|
|
81 |
|
82 |
|
83 |
|
84 |
+
model = BEN_Base.from_pretrained("PramaLLC/BEN2")
|
85 |
+
model.to(device).eval()
|
86 |
|
|
|
87 |
|
88 |
file1 = "./image1.png" # input image1
|
89 |
file2 = "./image2.png" # input image2
|
|
|
111 |
```
|
112 |
|
113 |
```python
|
114 |
+
from ben2 import BEN_Base
|
115 |
from PIL import Image
|
116 |
import torch
|
117 |
|
|
|
120 |
|
121 |
video_path = "/path_to_your_video.mp4"# input video
|
122 |
|
123 |
+
model = BEN_Base.from_pretrained("PramaLLC/BEN2")
|
124 |
+
model.to(device).eval()
|
|
|
|
|
|
|
125 |
|
126 |
|
127 |
model.segment_video(
|
|
|
152 |
![Example 7](BEN2_demo_pictures/grid_example7.png)
|
153 |
|
154 |
|
|
|
|
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea8b7907176a09667c86343dc7d00de6a6d871076cb90bb5f753618fd6fb3ebb
|
3 |
+
size 380577976
|