applied-ai-018 commited on
Commit
21ee636
·
verified ·
1 Parent(s): 01f0422

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. .tok_single_dir.sh.swo +0 -0
  3. create_multi_dataset.sh +50 -0
  4. data_all_eng_slimpj/test1.log +0 -0
  5. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_0.sh +25 -0
  6. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_1.sh +25 -0
  7. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_10.sh +25 -0
  8. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_11.sh +25 -0
  9. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_12.sh +25 -0
  10. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_13.sh +25 -0
  11. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_14.sh +25 -0
  12. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_15.sh +25 -0
  13. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_16.sh +25 -0
  14. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_17.sh +25 -0
  15. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_18.sh +25 -0
  16. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_19.sh +25 -0
  17. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_2.sh +25 -0
  18. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_20.sh +25 -0
  19. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_21.sh +25 -0
  20. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_22.sh +25 -0
  21. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_23.sh +25 -0
  22. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_24.sh +25 -0
  23. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_25.sh +25 -0
  24. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_26.sh +25 -0
  25. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_27.sh +25 -0
  26. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_28.sh +25 -0
  27. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_29.sh +25 -0
  28. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_3.sh +25 -0
  29. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_30.sh +25 -0
  30. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_31.sh +25 -0
  31. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_32.sh +25 -0
  32. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_33.sh +25 -0
  33. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_34.sh +25 -0
  34. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_35.sh +25 -0
  35. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_36.sh +25 -0
  36. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_37.sh +25 -0
  37. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_38.sh +25 -0
  38. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_39.sh +25 -0
  39. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_4.sh +25 -0
  40. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_5.sh +25 -0
  41. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_6.sh +25 -0
  42. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_7.sh +25 -0
  43. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_8.sh +25 -0
  44. data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_9.sh +25 -0
  45. data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_0.yaml +104 -0
  46. data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_1.yaml +104 -0
  47. data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_10.yaml +104 -0
  48. data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_12.yaml +104 -0
  49. data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_13.yaml +104 -0
  50. data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_14.yaml +104 -0
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ data_all_eng_slimpj/tokenizer234/tokenizer3/_text_document.idx filter=lfs diff=lfs merge=lfs -text
61
+ data_all_eng_slimpj/tokenizer234/tokenizer4/_text_document.idx filter=lfs diff=lfs merge=lfs -text
.tok_single_dir.sh.swo ADDED
Binary file (12.3 kB). View file
 
create_multi_dataset.sh ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ apt-get update -y
3
+ apt-get install zstd -y
4
+ DIR="/mnt/weka/peacock/idc/wet-data/output/toxic_filtered_without_bloom_new/"
5
+ TRAIN_DIR=$DIR
6
+ OUTPUT_TRAIN_DIR="/mnt/weka/peacock/idc/hineng/data_multi/"
7
+ #rm -rf $OUTPUT_TRAIN_DIR
8
+ #DATASET_NAME="multi"
9
+ #FINAL_DIR="/mnt/weka/peacock/idc/datasets/$DATASET_NAME/"
10
+ #mkdir -p $FINAL_DIR $OUTPUT_TRAIN_DIR
11
+ #langs=($(ls -1 /mnt/weka/peacock/idc/wet-data/output/toxic_filtered_without_bloom_new/2024-10/ | awk '{split($0, a,"_"); print a[1]}' | uniq))
12
+ #for lang in ${langs[@]}; do
13
+ # files=($(ls $TRAIN_DIR/*/$lang*))
14
+ # OUTPUT_LANG_DIR=$OUTPUT_TRAIN_DIR/$lang
15
+ # FINAL_LANG_DIR=$FINAL_DIR/$lang
16
+ # mkdir -p $OUTPUT_LANG_DIR $FINAL_LANG_DIR
17
+ # for f in ${files[@]}; do
18
+ # yr_name=$(echo $f | rev | cut -d/ -f 2 | rev)
19
+ # new_name=$(echo $f | sed "s/\//_/g")
20
+ # echo $yr_name
21
+ # cmd="unzstd $f --stdout > $OUTPUT_LANG_DIR/${yr_name}_$(basename $f).jsonl"
22
+ # echo $cmd
23
+ # eval $cmd
24
+ # done;
25
+ ##final_cmd="cat $OUTPUT_LANG_DIR/*.jsonl > $FINAL_LANG_DIR/final_en.jsonl"
26
+ ##echo $final_cmd
27
+ ##eval $final_cmd
28
+ #done;
29
+
30
+ DATASET_NAME="multi"
31
+ FINAL_DIR="/mnt/weka/peacock/idc/datasets/$DATASET_NAME/"
32
+ year_dir=($(ls $TRAIN_DIR))
33
+ langs=("ta" "te" "kn" "ml")
34
+ for l in ${langs[@]}; do
35
+ for y in ${year_dir[@]}; do
36
+ echo $y
37
+ files=($(ls $TRAIN_DIR/$y/$l*))
38
+ OUTPUT_LANG_DIR=$OUTPUT_TRAIN_DIR/$l
39
+ FINAL_LANG_DIR=$FINAL_DIR/$l
40
+ mkdir -p $OUTPUT_LANG_DIR $FINAL_LANG_DIR
41
+ for f in ${files[@]}; do
42
+ yr_name=$(echo $f | rev | cut -d/ -f 2 | rev)
43
+ new_name=$(echo $f | sed "s/\//_/g")
44
+ echo $yr_name
45
+ cmd="unzstd $f --stdout > $OUTPUT_LANG_DIR/${yr_name}_$(basename $f).jsonl"
46
+ echo $cmd
47
+ eval $cmd
48
+ done;
49
+ done;
50
+ done;
data_all_eng_slimpj/test1.log ADDED
The diff for this file is too large to render. See raw diff
 
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_0.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer0/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalaa \
18
+ --output-prefix $FINAL_DIR/tokenizer0/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_1.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer1/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalab \
18
+ --output-prefix $FINAL_DIR/tokenizer1/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_10.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer10/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalak \
18
+ --output-prefix $FINAL_DIR/tokenizer10/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_11.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer11/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalal \
18
+ --output-prefix $FINAL_DIR/tokenizer11/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_12.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer12/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalam \
18
+ --output-prefix $FINAL_DIR/tokenizer12/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_13.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer13/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalan \
18
+ --output-prefix $FINAL_DIR/tokenizer13/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_14.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer14/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalao \
18
+ --output-prefix $FINAL_DIR/tokenizer14/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_15.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer15/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalap \
18
+ --output-prefix $FINAL_DIR/tokenizer15/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_16.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer16/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalaq \
18
+ --output-prefix $FINAL_DIR/tokenizer16/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_17.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer17/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalar \
18
+ --output-prefix $FINAL_DIR/tokenizer17/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_18.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer18/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalas \
18
+ --output-prefix $FINAL_DIR/tokenizer18/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_19.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer19/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalat \
18
+ --output-prefix $FINAL_DIR/tokenizer19/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_2.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer2/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalac \
18
+ --output-prefix $FINAL_DIR/tokenizer2/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_20.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer20/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalau \
18
+ --output-prefix $FINAL_DIR/tokenizer20/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_21.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer21/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalav \
18
+ --output-prefix $FINAL_DIR/tokenizer21/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_22.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer22/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalaw \
18
+ --output-prefix $FINAL_DIR/tokenizer22/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_23.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer23/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalax \
18
+ --output-prefix $FINAL_DIR/tokenizer23/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_24.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer24/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalay \
18
+ --output-prefix $FINAL_DIR/tokenizer24/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_25.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer25/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalaz \
18
+ --output-prefix $FINAL_DIR/tokenizer25/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_26.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer26/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalba \
18
+ --output-prefix $FINAL_DIR/tokenizer26/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_27.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer27/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbb \
18
+ --output-prefix $FINAL_DIR/tokenizer27/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_28.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer28/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbc \
18
+ --output-prefix $FINAL_DIR/tokenizer28/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_29.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer29/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbd \
18
+ --output-prefix $FINAL_DIR/tokenizer29/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_3.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer3/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalad \
18
+ --output-prefix $FINAL_DIR/tokenizer3/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_30.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer30/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbe \
18
+ --output-prefix $FINAL_DIR/tokenizer30/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_31.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer31/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbf \
18
+ --output-prefix $FINAL_DIR/tokenizer31/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_32.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer32/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbg \
18
+ --output-prefix $FINAL_DIR/tokenizer32/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_33.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer33/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbh \
18
+ --output-prefix $FINAL_DIR/tokenizer33/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_34.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer34/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbi \
18
+ --output-prefix $FINAL_DIR/tokenizer34/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_35.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer35/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbj \
18
+ --output-prefix $FINAL_DIR/tokenizer35/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_36.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer36/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbk \
18
+ --output-prefix $FINAL_DIR/tokenizer36/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_37.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer37/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbl \
18
+ --output-prefix $FINAL_DIR/tokenizer37/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_38.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer38/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbm \
18
+ --output-prefix $FINAL_DIR/tokenizer38/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_39.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer39/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalbn \
18
+ --output-prefix $FINAL_DIR/tokenizer39/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_4.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer4/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalae \
18
+ --output-prefix $FINAL_DIR/tokenizer4/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_5.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer5/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalaf \
18
+ --output-prefix $FINAL_DIR/tokenizer5/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_6.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer6/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalag \
18
+ --output-prefix $FINAL_DIR/tokenizer6/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_7.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer7/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalah \
18
+ --output-prefix $FINAL_DIR/tokenizer7/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_8.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer8/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalai \
18
+ --output-prefix $FINAL_DIR/tokenizer8/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_9.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -m
3
+
4
+ echo "above deepspeed"
5
+ FINAL_DIR=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english/
6
+ #TOKENIZER=facebook/nllb-200-distilled-600M
7
+ #TOKENIZER_TYPE=HuggingFaceTokenizer
8
+ TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
9
+ TOKENIZER=$TOKENIZER_PATH/all.model
10
+ VOCAB_FILE=$TOKENIZER_PATH/all.vocab
11
+ TOKENIZER_TYPE=SentencePieceTokenizer
12
+ mkdir -p $FINAL_DIR/tokenizer9/
13
+ cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
14
+ echo "inside deepspeed"
15
+ pwd
16
+ python3 tools/preprocess_data.py \
17
+ --input /mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/split40//finalaj \
18
+ --output-prefix $FINAL_DIR/tokenizer9/ \
19
+ --tokenizer-model $TOKENIZER \
20
+ --vocab-file $VOCAB_FILE \
21
+ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
22
+ --append-eod --workers 8 # --partitions 16 #--chunk-size 50
23
+
24
+ # --merge-file $MERGES_FILE \
25
+
data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_0.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-english-5-0
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-english-5-0
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-english-5-0-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_0.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_1.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-english-5-1
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-english-5-1
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-english-5-1-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_1.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_10.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-english-5-10
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-english-5-10
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-english-5-10-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_10.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_12.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-english-5-12
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-english-5-12
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-english-5-12-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_12.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_13.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-english-5-13
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-english-5-13
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-english-5-13-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_13.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock
data_all_eng_slimpj/tok_english-5/tok_jobs/tokenizer_14.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apiVersion: kubeflow.org/v2beta1
2
+ kind: MPIJob
3
+ metadata:
4
+ name: peacock-tokenizer-english-5-14
5
+ namespace: peacock
6
+ labels:
7
+ app: peacock-tokenizer-english-5-14
8
+ spec:
9
+ slotsPerWorker: 8
10
+ runPolicy:
11
+ backoffLimit: 1
12
+ cleanPodPolicy: Running
13
+ mpiReplicaSpecs:
14
+ Launcher:
15
+ replicas: 1
16
+ template:
17
+ spec:
18
+ hostIPC: true
19
+ volumes:
20
+ - name: work-dir
21
+ persistentVolumeClaim:
22
+ claimName: peacock-fs-pvc
23
+ containers:
24
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
25
+ name: peacock-tokenizer-english-5-14-launcher
26
+ imagePullPolicy: IfNotPresent
27
+ volumeMounts:
28
+ - name: work-dir
29
+ mountPath: /mnt/weka/peacock
30
+ command: ["/bin/bash", "-c"]
31
+ args:
32
+ - >-
33
+
34
+ export SYNAPSE_VERSION="1.15.1";
35
+ export WORKER_DIR="/mnt/weka/peacock/experiments/llama";
36
+
37
+ export MEGATRON_SETUP_CMD="$WORKER_DIR/launch/setup.sh"
38
+ export TOKENIZER_CMD=/mnt/weka/peacock/idc/hineng/data_all_eng_slimpj/tok_english-5/tok_files/tokenizer_14.sh
39
+ HOSTSFILE=$OMPI_MCA_orte_default_hostfile;
40
+ echo "HOSTSFILE=$HOSTSFILE";
41
+ MASTER_ADDR="$(head -n 1 $HOSTSFILE | sed -n s/[[:space:]]slots.*//p)";
42
+ NUM_NODES=$(wc -l < $HOSTSFILE);
43
+ CARDS_PER_NODE=8;
44
+ N_CARDS=$((NUM_NODES*CARDS_PER_NODE));
45
+ echo "MPI_ROOT=$MPI_ROOT";
46
+ echo "N_CARDS=$N_CARDS";
47
+ echo "MASTER_ADDR=$MASTER_ADDR";
48
+ sleep 20;
49
+
50
+
51
+ mpirun -np $N_CARDS -npernode 8 \
52
+ --tag-output \
53
+ --allow-run-as-root \
54
+ --prefix $MPI_ROOT \
55
+ -x WORKER_DIR=$WORKER_DIR \
56
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $MEGATRON_SETUP_CMD;
57
+
58
+ mpirun -np $N_CARDS -npernode 8 \
59
+ --tag-output \
60
+ --allow-run-as-root \
61
+ --prefix $MPI_ROOT \
62
+ -x WORKER_DIR=$WORKER_DIR \
63
+ -x SYNAPSE_VERSION=$SYNAPSE_VERSION $TOKENIZER_CMD;
64
+
65
+
66
+ Worker:
67
+ replicas: 1
68
+ template:
69
+ spec:
70
+ volumes:
71
+ - name: work-dir
72
+ persistentVolumeClaim:
73
+ claimName: peacock-fs-pvc
74
+ tolerations:
75
+ - key: "habana.ai/gaudi"
76
+ operator: "Exists"
77
+ effect: "NoSchedule"
78
+ - key: "k8s/namespace"
79
+ operator: "Equal"
80
+ value: "peacock"
81
+ effect: "NoSchedule"
82
+ hostIPC: true
83
+ containers:
84
+ - image: vault.habana.ai/gaudi-docker/1.15.1/ubuntu22.04/habanalabs/pytorch-installer-2.2.0:latest
85
+ name: peacock-llama-worker
86
+ imagePullPolicy: IfNotPresent
87
+ securityContext:
88
+ capabilities:
89
+ add:
90
+ - SYSLOG
91
+ resources:
92
+ limits:
93
+ habana.ai/gaudi: 8
94
+ hugepages-2Mi: 300Gi
95
+ memory: 700Gi
96
+ cpu: 150
97
+ requests:
98
+ habana.ai/gaudi: 8
99
+ hugepages-2Mi: 300Gi
100
+ memory: 700Gi
101
+ cpu: 150
102
+ volumeMounts:
103
+ - name: work-dir
104
+ mountPath: /mnt/weka/peacock