Model Card for Model ID

from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
from typing import List, Optional
import numpy as np
from datetime import datetime

class TextStreamer:
    def __init__(self, tokenizer, output_file=None):
        self.tokenizer = tokenizer
        self.current_tokens = []
        self.output_file = output_file
        self.full_response = ""
        
    def put(self, value):
        if isinstance(value, torch.Tensor):
            value = value.cpu().numpy()
            if len(value.shape) > 1:
                value = value[0]
            
        if isinstance(value, np.ndarray):
            value = value.tolist()
            
        if isinstance(value, list):
            if isinstance(value[0], list):
                value = value[0]
            self.current_tokens.extend(value)
        else:
            self.current_tokens.append(value)
            
        tokens_to_decode = [int(token) for token in self.current_tokens]
        text = self.tokenizer.decode(tokens_to_decode, skip_special_tokens=True)
        
        if len(self.current_tokens) > len(value):
            previous_text = self.tokenizer.decode(
                [int(token) for token in self.current_tokens[:-len(value) if isinstance(value, list) else -1]], 
                skip_special_tokens=True
            )
            new_text = text[len(previous_text):]
        else:
            new_text = text
            
        if new_text:
            print(new_text, end="", flush=True)
            self.full_response += new_text

    def end(self):
        print("")
        if self.output_file:
            with open(self.output_file, 'a', encoding='utf-8') as f:
                f.write(f"\n--- Response generated at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ---\n")
                f.write(self.full_response)
                f.write("\n\n")
        return self.full_response

class Translator:
    DEFAULT_SYSTEM_PROMPT = """
    You are a skilled linguistic expert specializing in cross-lingual translation. Your task is to perform accurate and detailed translations, moving from a given source language to a specified destination language. 
    You will perform the translation by thinking and reasoning step-by-step by and demonstrating the linguistic transformation process while maintaining the source context.

    # Output Format

    Your translation responses should be structured as follows:

    ```
    <think>
    [Detailed thinking and reasoning process, including the analysis and breakdown of the sentence]
    </think>
    <translation>
    [Final translated sentence based on the step-by-step reasoning]
    </translation>
    ```

    Stick to the above formate and exclose the final translation in <translation>{translated sentence}</translation>
    """

    def __init__(
        self,
        model_name: str,
        system_prompt: Optional[str] = None,
        device_map: str = "auto",
        torch_dtype: str = "auto"
    ):
        """
        Initialize the translator with a model and tokenizer.
        
        Args:
            model_name: Path or name of the model to load
            system_prompt: Optional custom system prompt
            device_map: Device mapping strategy for model loading
            torch_dtype: Torch data type for model
        """
        self.model = AutoModelForCausalLM.from_pretrained(
            model_name,
            torch_dtype=torch_dtype,
            device_map=device_map
        )
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.system_prompt = system_prompt or self.DEFAULT_SYSTEM_PROMPT

    def translate(
        self,
        text: str,
        max_new_tokens: int = 2048,
        temperature: float = 0.1,
        top_p: float = 0.7,
        output_file: Optional[str] = None
    ) -> str:
        """
        Translate the given text using the loaded model.
        
        Args:
            text: Text to translate
            max_new_tokens: Maximum number of tokens to generate
            temperature: Temperature for generation
            top_p: Top-p sampling parameter
            output_file: Optional file to save the translation
            
        Returns:
            str: The translated text
        """
        # Prepare messages
        messages = [
            {"role": "system", "content": self.system_prompt},
            {"role": "user", "content": text}
        ]

        # Apply chat template
        prompt = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        # Tokenize input
        model_inputs = self.tokenizer([prompt], return_tensors="pt").to(self.model.device)

        # Create streamer
        streamer = TextStreamer(self.tokenizer, output_file=output_file)
        
        # Generate with streaming
        self.model.generate(
            **model_inputs,
            max_new_tokens=max_new_tokens,
            temperature=temperature,
            top_p=top_p,
            streamer=streamer
        )

        return streamer.end()

    def __call__(self, *args, **kwargs) -> str:
        """
        Make the translator callable directly.
        """
        return self.translate(*args, **kwargs)

# %%
translator = Translator(
    model_name="CoT-Translator/Llama-3b-Reasoning-Translate"
)

# %%
# Use it multiple times
texts = [
    # "संक्रमित चमगादड़ निपाह विषाणु को सूअरों जैसे अन्य जानवरों में भी फैला सकते हैं। .translate from hindi to english",
    "how are you doing today and what is your name .translate from english to hindi",
    # "सफरचंदसाठी आजचा दिवस खरोखर चांगला आहे आणि मला खूप मजा येत आहे. translate from marathi to english"
    # "Today's day is really good for Safar Chand and I'm having a lot of fun. translate from english to marathi"
]

for text in texts:
    print(f"\nTranslating: {text}")
    translation = translator(
        text,
        output_file="translation_responses_llama_translate.txt"
    )
    print(f"Complete translation: {translation}\n")
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model is not currently available via any of the supported third-party Inference Providers, and HF Inference API was unable to determine this model’s pipeline type.