fffiloni commited on
Commit
6ebc8e0
·
verified ·
1 Parent(s): d4b2e40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -124,7 +124,7 @@ def infer(genre_txt_content, lyrics_txt_content):
124
 
125
  empty_output_folder(output_dir)
126
 
127
- # Command and arguments
128
  command = [
129
  "python", "infer.py",
130
  "--stage1_model", "m-a-p/YuE-s1-7B-anneal-en-cot",
@@ -132,20 +132,24 @@ def infer(genre_txt_content, lyrics_txt_content):
132
  "--genre_txt", f"{genre_txt_path}",
133
  "--lyrics_txt", f"{lyrics_txt_path}",
134
  "--run_n_segments", "2",
135
- "--stage2_batch_size", "4",
136
  "--output_dir", f"{output_dir}",
137
  "--cuda_idx", "0",
138
  "--max_new_tokens", "3000",
139
  "--disable_offload_model"
140
  ]
141
 
142
- # Set up environment variables for CUDA
143
- env = os.environ.copy() # Copy current environment
144
  env.update({
145
  "CUDA_VISIBLE_DEVICES": "0",
 
146
  "CUDA_HOME": "/usr/local/cuda",
147
  "PATH": f"/usr/local/cuda/bin:{env.get('PATH', '')}",
148
- "LD_LIBRARY_PATH": f"/usr/local/cuda/lib64:{env.get('LD_LIBRARY_PATH', '')}"
 
 
 
149
  })
150
 
151
  # Execute the command
 
124
 
125
  empty_output_folder(output_dir)
126
 
127
+ # Command and arguments with optimized settings
128
  command = [
129
  "python", "infer.py",
130
  "--stage1_model", "m-a-p/YuE-s1-7B-anneal-en-cot",
 
132
  "--genre_txt", f"{genre_txt_path}",
133
  "--lyrics_txt", f"{lyrics_txt_path}",
134
  "--run_n_segments", "2",
135
+ "--stage2_batch_size", "8", # Increased from 4 to 8
136
  "--output_dir", f"{output_dir}",
137
  "--cuda_idx", "0",
138
  "--max_new_tokens", "3000",
139
  "--disable_offload_model"
140
  ]
141
 
142
+ # Set up environment variables for CUDA with optimized settings
143
+ env = os.environ.copy()
144
  env.update({
145
  "CUDA_VISIBLE_DEVICES": "0",
146
+ "PYTORCH_CUDA_ALLOC_CONF": "max_split_size_mb:512",
147
  "CUDA_HOME": "/usr/local/cuda",
148
  "PATH": f"/usr/local/cuda/bin:{env.get('PATH', '')}",
149
+ "LD_LIBRARY_PATH": f"/usr/local/cuda/lib64:{env.get('LD_LIBRARY_PATH', '')}",
150
+ "PYTORCH_CUDA_ALLOC_CONF": "max_split_size_mb:512,garbage_collection_threshold:0.8", # Added garbage collection threshold
151
+ "TORCH_DISTRIBUTED_DEBUG": "DETAIL", # Added for better debugging
152
+ "CUDA_LAUNCH_BLOCKING": "0" # Ensure asynchronous CUDA operations
153
  })
154
 
155
  # Execute the command