Owos commited on
Commit
3200c52
·
1 Parent(s): cc51e28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -23
app.py CHANGED
@@ -213,29 +213,30 @@ def set_seed():
213
 
214
 
215
 
216
- # with st.sidebar:
217
-
218
- # st.image("Koya_Presentation-removebg-preview.png")
219
- # st.subheader("Abstract")
220
- # st.markdown(
221
- # """
222
- # <div style="text-align: justify">
223
- # <h6> Pretrained large language models (LLMs) are widely used for various downstream tasks in different languages. However, selecting the best
224
- # LLM (from a large set of potential LLMs) for a given downstream task and language is a challenging and computationally expensive task, making
225
- # the efficient use of LLMs difficult for low-compute communities. To address this challenge, we present Koya, a recommender system built to assist
226
- # researchers and practitioners in choosing the right LLM for their task and language, without ever having to finetune the LLMs. Koya is built with
227
- # the Koya Pseudo-Perplexity (KPPPL), our adaptation of the pseudo perplexity, and ranks LLMs in order of compatibility with the language of interest,
228
- # making it easier and cheaper to choose the most compatible LLM. By evaluating Koya using five pretrained LLMs and three African languages
229
- # (Yoruba, Kinyarwanda, and Amharic), we show an average recommender accuracy of 95%, demonstrating its effectiveness. Koya aims to offer
230
- # an easy to use (through a simple web interface accessible at https://huggingface.co/spaces/koya-recommender/system), cost-effective, fast and
231
- # efficient tool to assist researchers and practitioners with low or limited compute access.</h6>
232
- # </div>
233
-
234
- # """,
235
- # unsafe_allow_html=True
236
- # )
237
- # url = "https://drive.google.com/file/d/1eWat34ot3j8onIeKDnJscKalp2oYnn8O/view"
238
- # st.write("check out the paper [here](%s)" % url)
 
239
 
240
  footer()
241
 
@@ -270,3 +271,13 @@ if run:
270
  my_bar.progress(index + 1 / len(selected_models))
271
  scores = sort_dictionary(scores)
272
  st.write("Our recommendation is:", scores)
 
 
 
 
 
 
 
 
 
 
 
213
 
214
 
215
 
216
+ with st.sidebar:
217
+
218
+ st.image("Koya_Presentation-removebg-preview.png")
219
+ st.subheader("Abstract")
220
+ st.markdown(
221
+ """
222
+ <div style="text-align: justify">
223
+ <h6> Pretrained large language models (LLMs) are widely used for various downstream tasks in different languages. However, selecting the best
224
+ LLM (from a large set of potential LLMs) for a given downstream task and language is a challenging and computationally expensive task, making
225
+ the efficient use of LLMs difficult for low-compute communities. To address this challenge, we present Koya, a recommender system built to assist
226
+ researchers and practitioners in choosing the right LLM for their task and language, without ever having to finetune the LLMs. Koya is built with
227
+ the Koya Pseudo-Perplexity (KPPPL), our adaptation of the pseudo perplexity, and ranks LLMs in order of compatibility with the language of interest,
228
+ making it easier and cheaper to choose the most compatible LLM. By evaluating Koya using five pretrained LLMs and three African languages
229
+ (Yoruba, Kinyarwanda, and Amharic), we show an average recommender accuracy of 95%, demonstrating its effectiveness. Koya aims to offer
230
+ an easy to use (through a simple web interface accessible at https://huggingface.co/spaces/koya-recommender/system), cost-effective, fast and
231
+ efficient tool to assist researchers and practitioners with low or limited compute access.</h6>
232
+ </div>
233
+
234
+ """,
235
+ unsafe_allow_html=True
236
+ )
237
+ url = "https://drive.google.com/file/d/1eWat34ot3j8onIeKDnJscKalp2oYnn8O/view"
238
+ st.write("check out the paper [here](%s)" % url)
239
+
240
 
241
  footer()
242
 
 
271
  my_bar.progress(index + 1 / len(selected_models))
272
  scores = sort_dictionary(scores)
273
  st.write("Our recommendation is:", scores)
274
+
275
+ st.write("""Pretrained large language models (LLMs) are widely used for various downstream tasks in different languages. However, selecting the best
276
+ LLM (from a large set of potential LLMs) for a given downstream task and language is a challenging and computationally expensive task, making
277
+ the efficient use of LLMs difficult for low-compute communities. To address this challenge, we present Koya, a recommender system built to assist
278
+ researchers and practitioners in choosing the right LLM for their task and language, without ever having to finetune the LLMs. Koya is built with
279
+ the Koya Pseudo-Perplexity (KPPPL), our adaptation of the pseudo perplexity, and ranks LLMs in order of compatibility with the language of interest,
280
+ making it easier and cheaper to choose the most compatible LLM. By evaluating Koya using five pretrained LLMs and three African languages
281
+ (Yoruba, Kinyarwanda, and Amharic), we show an average recommender accuracy of 95%, demonstrating its effectiveness. Koya aims to offer
282
+ an easy to use (through a simple web interface accessible at https://huggingface.co/spaces/koya-recommender/system), cost-effective, fast and
283
+ efficient tool to assist researchers and practitioners with low or limited compute access.""")