tajik-text-segmentation / annotations_parser.py
sobir-hf's picture
strip sentence ends when parsing
f17f737
import os
import re
def parse_annotated_text(text):
# Regular expression pattern to parse YEDDA format
yedda_pattern = re.compile(r'(\[\@(.*?)\#(\w+)\*\])', re.DOTALL)
# This variable will keep track of the number of characters removed
chars_removed = 0
# This will store the spans of the entities in the original text
spans_in_original_text = []
# Buffer to store content without annotations
buffer = []
# Index to track last end position
last_end = 0
# Store labels
labels = []
# Loop through each match
for match in yedda_pattern.finditer(text):
# The entire match
full_match = match.group(0)
# Capture group 2 (entity)
entity = match.group(2)
# Capture group 2 (label)
label = match.group(3)
# Start position of the match in the modified string
start = match.start()
# End position of the match in the modified string
end = match.end()
labels.append(label)
# Append the text before the annotation to the buffer
buffer.append(text[last_end:start])
# Remove trailing spaces after entity
entity = entity.rstrip()
buffer.append(entity)
# Calculate the start and end spans in the original text
original_start = start - chars_removed
original_end = original_start + len(entity)
assert original_end > original_start, text
# Store the spans
spans_in_original_text.append((original_start, original_end))
# update the chars_removed counter
chars_removed += len(full_match) - len(entity)
# Update last_end
last_end = end
# Append remaining content after the last match
buffer.append(text[last_end:])
# Join buffer parts to get content without annotations
content_without_annotations = "".join(buffer)
return {
'text': content_without_annotations,
'spans': spans_in_original_text,
'labels': labels
}
def preprocess_text(text: str):
# remove extra spaces
text = text.strip()
text = re.sub(r'\n+', '\n', text)
text = re.sub(r' +', ' ', text)
text = text.replace(' \n', '\n')
text = text.replace('\n ', '\n')
return text
def load_yedda_annotations(directory):
# List to store all the annotations from all files
all_annotations = []
# Iterate through each file in the given directory
for filename in os.listdir(directory):
# Check if the file has the '.ann' extension
if filename.endswith(".ann"):
# Construct the full file path
file_path = os.path.join(directory, filename)
# Open and read the file
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
# Preprocess text
content = preprocess_text(content)
parsed = parse_annotated_text(content)
file_annotations = {
'file': filename,
'annotated_text': content,
'text': parsed['text'],
'spans': parsed['spans'],
'labels': parsed['labels'],
}
all_annotations.append(file_annotations)
return all_annotations
def convert_to_ann(annotatations):
text = annotatations['text']
buffer = []
i = 0
for (j_start, j_end), label in zip(annotatations['spans'], annotatations['labels']):
buffer += text[i:j_start]
buffer += [f'[@{text[j_start:j_end]}#{label}*]']
i = j_end
buffer += [text[i:]]
return ''.join(buffer)
if __name__ == '__main__':
directory_path = 'annotations' # The directory containing .ann files
annotations = load_yedda_annotations(directory_path)
counter = 0
for file_annotation in annotations:
counter += len(file_annotation['labels'])
print('File:', file_annotation['file'])
print('Text[:100]:', repr(file_annotation['text'][:100]))
print('Number of labels:', len(file_annotation['labels']))
assert len(file_annotation['labels']) == len(file_annotation['spans'])
print('Average labeled sentence length:', sum(end-start for start,end in file_annotation['spans']) / len(file_annotation['spans']))
print('--------------------------------')
print('Total number of files:', len(annotations))
print('Total label count:', counter)