BeardedMonster commited on
Commit
332a731
1 Parent(s): 31c1d30

update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -71,12 +71,12 @@ top_k = st.sidebar.slider("Top-K: Controls model's sampling space.", min_value=1
71
  top_p = st.sidebar.slider("Top-P", min_value=0.1, max_value=1.0, value=top_p)
72
  repetition_penalty = st.sidebar.slider("Repetition Penalty: Discourages token repitition during generation.", min_value=1.0, max_value=10.0, value=repetition_penalty)
73
  length_penalty = st.sidebar.slider("Length Penalty: Discourages poor output as token length grows.", min_value=0.1, max_value=10.0, value=length_penalty)
74
-
75
 
76
  generation_config = {
77
  "max_length": max_length,
78
  "num_beams": num_beams,
79
- "do_sample": True,
80
  "temperature": temperature,
81
  "top_k": top_k,
82
  "top_p": top_p,
 
71
  top_p = st.sidebar.slider("Top-P", min_value=0.1, max_value=1.0, value=top_p)
72
  repetition_penalty = st.sidebar.slider("Repetition Penalty: Discourages token repitition during generation.", min_value=1.0, max_value=10.0, value=repetition_penalty)
73
  length_penalty = st.sidebar.slider("Length Penalty: Discourages poor output as token length grows.", min_value=0.1, max_value=10.0, value=length_penalty)
74
+ do_sample = st.sidebar.checkbox("Do_sample: Enable sampling for richer generation.")
75
 
76
  generation_config = {
77
  "max_length": max_length,
78
  "num_beams": num_beams,
79
+ "do_sample": bool(do_sample),
80
  "temperature": temperature,
81
  "top_k": top_k,
82
  "top_p": top_p,