from transformers import RobertaTokenizerFast
# split the dna seq by len
# AAGTGGCAGA -----> AA, GT, GG, CA, GA
class DNATokenizerFast(RobertaTokenizerFast):
def __init__(self, vocab_file=None, merges_file=None, k_mer=2, stride=1,
errors="replace",
bos_token="",
eos_token="",
sep_token="",
cls_token="",
unk_token="",
pad_token="",
mask_token="",
add_prefix_space=False,
**kwargs
):
self.k_mer = k_mer
self.stride = stride
self.model_max_length = 1000000
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
def cut_and_encode(self, sequence, add_special_tokens):
seq_len = ((int)((len(sequence)-self.k_mer) / self.stride)) * self.stride
tokens = [sequence[i:i + self.k_mer] for i in range(0, seq_len + 1, self.stride)]
token_ids = [self._convert_token_to_id(token) for token in tokens]
if add_special_tokens:
token_ids = [self.cls_token_id] + token_ids + [self.eos_token_id]
return tokens, token_ids
def _convert_token_to_id(self, token):
index = self._tokenizer.token_to_id(token)
if index:
return index
if token == '':
return self.pad_token_id
return self.unk_token_id
def __call__(self, seq_list, add_special_tokens=False):
token_ids_list = []
for seq in seq_list:
_, token_ids = self.cut_and_encode(seq, add_special_tokens)
token_ids_list.append(token_ids)
return {"input_ids": token_ids_list}