JacobLinCool commited on
Commit
7226b69
·
1 Parent(s): bd35af6

feat: mistral_7b_ielts_evaluator

Browse files
Files changed (2) hide show
  1. app.py +2 -0
  2. model/mistral_7b_ielts_evaluator.py +32 -0
app.py CHANGED
@@ -2,10 +2,12 @@ from typing import *
2
  import gradio as gr
3
  from model.IELTS_essay_scoring import grade_IELTS_essay_scoring
4
  from model.Engessay_grading_ML import grade_Engessay_grading_ML
 
5
 
6
  models = {
7
  "IELTS_essay_scoring": grade_IELTS_essay_scoring,
8
  "Engessay_grading_ML": grade_Engessay_grading_ML,
 
9
  }
10
 
11
 
 
2
  import gradio as gr
3
  from model.IELTS_essay_scoring import grade_IELTS_essay_scoring
4
  from model.Engessay_grading_ML import grade_Engessay_grading_ML
5
+ from model.mistral_7b_ielts_evaluator import grade_mistral_7b_ielts_evaluator
6
 
7
  models = {
8
  "IELTS_essay_scoring": grade_IELTS_essay_scoring,
9
  "Engessay_grading_ML": grade_Engessay_grading_ML,
10
+ "mistral_7b_ielts_evaluator": grade_mistral_7b_ielts_evaluator,
11
  }
12
 
13
 
model/mistral_7b_ielts_evaluator.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import numpy as np
4
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
5
+ import spaces
6
+
7
+ model_name = "chillies/mistral-7b-ielts-evaluator-q4"
8
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+
11
+
12
+ @spaces.GPU()
13
+ @torch.no_grad()
14
+ def grade_mistral_7b_ielts_evaluator(question: str, answer: str) -> Tuple[float, str]:
15
+ text = f"{question} {answer}"
16
+
17
+ inputs = tokenizer(
18
+ text,
19
+ return_tensors="pt",
20
+ padding=True,
21
+ truncation=True,
22
+ )
23
+
24
+ outputs = model(**inputs)
25
+ score = outputs.logits.argmax(dim=-1).item()
26
+ print(score)
27
+
28
+ overall_score = float(score)
29
+
30
+ comment = ""
31
+
32
+ return overall_score, comment