File size: 1,061 Bytes
21ee636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
DIR="/data/wet-data/output/toxic_filtered_without_bloom_new/2024-10/"
TRAIN_DIR=$DIR
OUTPUT_TRAIN_DIR="/data/hineng/data_all_eng_slimpj/"
DATASET_NAME="english"
FINAL_DIR="/data/datasets/$DATASET_NAME/"
mkdir -p $FINAL_DIR $OUTPUT_TRAIN_DIR
files=($(ls /data/enfm-dataprocessing/Data_Info/SlimPajama-627B-DC/train/*/*))

for f in ${files[@]}; do
	echo $f
	new_name=$(echo $f | sed "s/\//_/g")
	cmd="unzstd $f --stdout > $OUTPUT_TRAIN_DIR/output$new_name.jsonl"
	echo $cmd
	eval $cmd
done;
final_cmd="cat $OUTPUT_TRAIN_DIR/*.jsonl > $OUTPUT_TRAIN_DIR/final_complete_en.jsonl"
eval $final_cmd

#mkdir -p $FINAL_DIR/tokenizer/
#cd /Model-References/PyTorch/nlp/DeepSpeedExamples/Megatron-DeepSpeed/
#python3 tools/preprocess_data.py \
# --input $FINAL_DIR/final.jsonl \
# --output-prefix $FINAL_DIR/tokenizer/ \
#  --vocab-file /sml1/datasets/gpt2/vocab.json  \
#   --merge-file /sml1/datasets/gpt2/merges.txt \
#    --dataset-impl mmap --tokenizer-type GPT2BPETokenizer  \
#    --append-eod --workers 8 --chunk-size 50  >tokenizer.out 2>tokenizer.err