applied-ai-018's picture
Add files using upload-large-folder tool
21ee636 verified
raw
history blame
1.06 kB
DIR="/data/wet-data/output/toxic_filtered_without_bloom_new/2024-10/"
TRAIN_DIR=$DIR
OUTPUT_TRAIN_DIR="/data/hineng/data_all_eng_slimpj/"
DATASET_NAME="english"
FINAL_DIR="/data/datasets/$DATASET_NAME/"
mkdir -p $FINAL_DIR $OUTPUT_TRAIN_DIR
files=($(ls /data/enfm-dataprocessing/Data_Info/SlimPajama-627B-DC/train/*/*))
for f in ${files[@]}; do
echo $f
new_name=$(echo $f | sed "s/\//_/g")
cmd="unzstd $f --stdout > $OUTPUT_TRAIN_DIR/output$new_name.jsonl"
echo $cmd
eval $cmd
done;
final_cmd="cat $OUTPUT_TRAIN_DIR/*.jsonl > $OUTPUT_TRAIN_DIR/final_complete_en.jsonl"
eval $final_cmd
#mkdir -p $FINAL_DIR/tokenizer/
#cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/
#python3 tools/preprocess_data.py \
# --input $FINAL_DIR/final.jsonl \
# --output-prefix $FINAL_DIR/tokenizer/ \
# --vocab-file /sml1/datasets/gpt2/vocab.json \
# --merge-file /sml1/datasets/gpt2/merges.txt \
# --dataset-impl mmap --tokenizer-type GPT2BPETokenizer \
# --append-eod --workers 8 --chunk-size 50 >tokenizer.out 2>tokenizer.err