import torch from transformers import AutoModelForCausalLM, AutoTokenizer # Load the HelpingAI2.5-2B model model = AutoModelForCausalLM.from_pretrained("OEvortex/HelpingAI2.5-2B") # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained("OEvortex/HelpingAI2.5-2B") # Define the chat input chat = [ { "role": "system", "content": "You are HelpingAI, an emotional AI. Always answer my questions in the HelpingAI style." }, { "role": "user", "content": "GIVE ME YOUR INTRO" } ] inputs = tokenizer.apply_chat_template( chat, add_generation_prompt=True, return_tensors="pt" ).to(model.device) # Generate text outputs = model.generate( inputs, max_new_tokens=256, do_sample=True, temperature=0.6, top_p=0.9, ) response = outputs[0][inputs.shape[-1]:] print(tokenizer.decode(response, skip_special_tokens=True))