Martim-Ramos-Neural commited on
Commit
0b5ae0b
·
1 Parent(s): b202b74
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -15,19 +15,16 @@ from huggingface_hub import snapshot_download
15
  weights_dir = './allegro_weights'
16
  os.makedirs(weights_dir, exist_ok=True)
17
 
18
- # Check if running in a shared UI environment
19
- is_shared_ui = "rhymes-ai-Allegro-textToVideo" in os.environ.get('SPACE_ID', "")
20
- is_gpu_associated = torch.cuda.is_available()
21
 
22
  # Download the necessary model files if not in shared UI
23
- if not is_shared_ui:
24
- print(f"Downloading models to {weights_dir}...")
25
- snapshot_download(
26
- repo_id='rhymes-ai/Allegro',
27
- local_dir=weights_dir,
28
- allow_patterns=['**'] # Download all required files
29
- )
30
- print(f"Downloaded models to {weights_dir}.")
31
 
32
  # Check if the directories exist
33
  required_dirs = ['vae', 'text_encoder', 'tokenizer', 'scheduler', 'transformer']
@@ -38,9 +35,7 @@ if missing_dirs:
38
  else:
39
  print(f"All required directories are present in {weights_dir}.")
40
 
41
- if is_gpu_associated:
42
- gpu_info = getoutput('nvidia-smi')
43
- print(f"GPU Info: {gpu_info}")
44
 
45
  # Check directory structure
46
  required_dirs = ['vae', 'text_encoder', 'tokenizer', 'scheduler', 'transformer']
@@ -50,6 +45,11 @@ if missing_dirs:
50
 
51
  @spaces.GPU(duration=120) # Request GPU for the entire process
52
  def process_pipeline(user_prompt, guidance_scale, num_sampling_steps, seed, enable_cpu_offload):
 
 
 
 
 
53
  # Define dtype
54
  dtype = torch.bfloat16
55
 
 
15
  weights_dir = './allegro_weights'
16
  os.makedirs(weights_dir, exist_ok=True)
17
 
 
 
 
18
 
19
  # Download the necessary model files if not in shared UI
20
+
21
+ print(f"Downloading models to {weights_dir}...")
22
+ snapshot_download(
23
+ repo_id='rhymes-ai/Allegro',
24
+ local_dir=weights_dir,
25
+ allow_patterns=['**']
26
+
27
+ print(f"Downloaded models to {weights_dir}.")
28
 
29
  # Check if the directories exist
30
  required_dirs = ['vae', 'text_encoder', 'tokenizer', 'scheduler', 'transformer']
 
35
  else:
36
  print(f"All required directories are present in {weights_dir}.")
37
 
38
+
 
 
39
 
40
  # Check directory structure
41
  required_dirs = ['vae', 'text_encoder', 'tokenizer', 'scheduler', 'transformer']
 
45
 
46
  @spaces.GPU(duration=120) # Request GPU for the entire process
47
  def process_pipeline(user_prompt, guidance_scale, num_sampling_steps, seed, enable_cpu_offload):
48
+ # is_gpu_associated = torch.cuda.is_available()
49
+ # if is_gpu_associated:
50
+ # gpu_info = getoutput('nvidia-smi')
51
+ # print(f"GPU Info: {gpu_info}")
52
+
53
  # Define dtype
54
  dtype = torch.bfloat16
55