File size: 764 Bytes
21ee636 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
#!/bin/bash
FINAL_DIR=/mnt/weka/peacock/idc/datasets/hindi_bengali_merged_unshuffled
#TOKENIZER=facebook/nllb-200-distilled-600M
#TOKENIZER_TYPE=HuggingFaceTokenizer
TOKENIZER_PATH=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k
TOKENIZER=$TOKENIZER_PATH/all.model
VOCAB_FILE=$TOKENIZER_PATH/all.vocab
TOKENIZER_TYPE=SentencePieceTokenizer
mkdir -p $FINAL_DIR/tokenizer/
cd /mnt/weka/peacock/experiments/llama/Megatron-DeepSpeed
python3 tools/preprocess_data.py \
--input $FINAL_DIR/shuffled/final.json \
--output-prefix $FINAL_DIR/tokenizer/ \
--tokenizer-model $TOKENIZER \
--vocab-file $VOCAB_FILE \
--dataset-impl mmap --tokenizer-type $TOKENIZER_TYPE\
--append-eod --workers 8 #--chunk-size 50
# --merge-file $MERGES_FILE \
|