File size: 1,630 Bytes
21ee636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#DIR="/data/wet-data/output/toxic_filtered_without_bloom_new/2024-10/"
#TRAIN_DIR=$DIR
#OUTPUT_TRAIN_DIR="/data/hineng/data_multi/"
#DATASET_NAME="multi"
#FINAL_DIR="/data/datasets/$DATASET_NAME/"
#mkdir -p $FINAL_DIR $OUTPUT_TRAIN_DIR
#langs=($(ls -1 /data/wet-data/output/toxic_filtered_without_bloom_new/2024-10/ | awk '{split($0, a,"_"); print a[1]}' | uniq))
#for lang in ${langs[@]}; do
#	files=($(ls $TRAIN_DIR/$lang*))
#	OUTPUT_LANG_DIR=$OUTPUT_TRAIN_DIR/$lang
#	FINAL_LANG_DIR=$FINAL_DIR/$lang
#	mkdir -p $OUTPUT_LANG_DIR $FINAL_LANG_DIR
#	for f in ${files[@]}; do
#		new_name=$(echo $f | sed "s/\//_/g")
#		cmd="unzstd $f --stdout > $OUTPUT_LANG_DIR/$(basename $f).jsonl"
#		echo $cmd
#		eval $cmd
#	done;
#final_cmd="cat $OUTPUT_LANG_DIR/*.jsonl > $FINAL_LANG_DIR/final_en.jsonl"
#echo $final_cmd
#eval $final_cmd
#done;	
FINAL_DIR=/data/datasets/hindi_english_arxiv_bengali/
TOKENIZER=google/gemma-7b
TOKENIZER_TYPE=HuggingFaceTokenizer
#TOKENIZER=GPT2BPETokenizer
VOCAB_FILE=
MERGES_FILE=
ip_address=$(ifconfig | grep "inet " | grep -Fv "127.0.0.1" | grep -Fv "172.17.0.1" | awk '{print $2}')
mkdir -p $FINAL_DIR/tokenizer/

filename=$(cat $FINAL_DIR/splitfile |  grep $ip_address | awk '{print $1}')

cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/
python3 tools/preprocess_data.py \
 --input $FINAL_DIR/$filename \
 --output-prefix $FINAL_DIR/tokenizer/output_$ip_adress \
  --tokenizer_model_file $TOKENIZER \
    --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
    --append-eod --workers 8 #--chunk-size 50   

  #--vocab-file $VOCAB_FILE  \
  # --merge-file $MERGES_FILE \