#DIR="/data/wet-data/output/toxic_filtered_without_bloom_new/2024-10/" #TRAIN_DIR=$DIR #OUTPUT_TRAIN_DIR="/data/hineng/data_multi/" #DATASET_NAME="multi" #FINAL_DIR="/data/datasets/$DATASET_NAME/" #mkdir -p $FINAL_DIR $OUTPUT_TRAIN_DIR #langs=($(ls -1 /data/wet-data/output/toxic_filtered_without_bloom_new/2024-10/ | awk '{split($0, a,"_"); print a[1]}' | uniq)) #for lang in ${langs[@]}; do # files=($(ls $TRAIN_DIR/$lang*)) # OUTPUT_LANG_DIR=$OUTPUT_TRAIN_DIR/$lang # FINAL_LANG_DIR=$FINAL_DIR/$lang # mkdir -p $OUTPUT_LANG_DIR $FINAL_LANG_DIR # for f in ${files[@]}; do # new_name=$(echo $f | sed "s/\//_/g") # cmd="unzstd $f --stdout > $OUTPUT_LANG_DIR/$(basename $f).jsonl" # echo $cmd # eval $cmd # done; #final_cmd="cat $OUTPUT_LANG_DIR/*.jsonl > $FINAL_LANG_DIR/final_en.jsonl" #echo $final_cmd #eval $final_cmd #done; FINAL_DIR=/data/datasets/hindi_english_arxiv_bengali/ TOKENIZER=google/gemma-7b TOKENIZER_TYPE=HuggingFaceTokenizer #TOKENIZER=GPT2BPETokenizer VOCAB_FILE= MERGES_FILE= ip_address=$(ifconfig | grep "inet " | grep -Fv "127.0.0.1" | grep -Fv "172.17.0.1" | awk '{print $2}') mkdir -p $FINAL_DIR/tokenizer/ filename=$(cat $FINAL_DIR/splitfile | grep $ip_address | awk '{print $1}') cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/ python3 tools/preprocess_data.py \ --input $FINAL_DIR/$filename \ --output-prefix $FINAL_DIR/tokenizer/output_$ip_adress \ --tokenizer_model_file $TOKENIZER \ --dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\ --append-eod --workers 8 #--chunk-size 50 #--vocab-file $VOCAB_FILE \ # --merge-file $MERGES_FILE \