Alexandr "MrSteyk" German commited on
Commit
cdb0880
1 Parent(s): caff12e

cleanup brainfarts

Browse files
Files changed (1) hide show
  1. app.py +3 -33
app.py CHANGED
@@ -104,26 +104,6 @@ def insert_fn(inpt: str, max_tokens, min_tokens, alpha_f, alpha_p, num_tokens_in
104
  print(e)
105
  yield ("Error...", gr.Text.update(value=str(e), visible=True))
106
 
107
- # def classify_fn_inner(inpt, clas):
108
- # state = rwkv_rs.State(model)
109
- # tokens = tokenizer.encode(f"This is an example of {clas} text: {inpt}").ids
110
- # for i in tokens[:-2]:
111
- # model.forward_token_preproc(i, state)
112
- # # state_2 = state.copy()
113
-
114
- # logit_x_1 = softmax(model.forward_token(tokens[-2], state))
115
- # logit_y_1 = softmax(model.forward_token(tokens[-1], state))
116
- # # shapep = logit_x_1.shape[0] * 0.9
117
- # # s = np.sort(logit_y_1)[::-1]
118
- # # c = s[np.argmax(np.cumsum(s) > 0.9)]
119
- # # logit_y_1[logit_y_1 < c] = 0
120
- # loss_1 = -np.sum(logit_y_1 * np.log(logit_x_1)) / logit_x_1.shape[0]
121
-
122
- # # I forgor that I do not return the preproc shit...
123
- # # logit_x_2 = model.forward_token_preproc(tokens[-2], state_2)
124
- # # logit_y_2 = model.forward_token_preproc(tokens[-1], state_2)
125
- # return (loss_1, None)
126
-
127
  def classify_fn_inner2(inpt, clas):
128
  state = rwkv_rs.State(model)
129
  tokens = tokenizer.encode(f"This is an example of {clas} text:").ids
@@ -135,7 +115,7 @@ def classify_fn_inner2(inpt, clas):
135
  for i in tokens[:-1]:
136
  logits.append(model.forward_token(i, state))
137
  logit_x = [softmax(i) for i in logits]
138
- loss = -np.sum([ x[y] for x, y in zip(logit_x, tokens[1:]) ]) / len(logit_x)
139
 
140
  return loss
141
 
@@ -143,23 +123,13 @@ def softmax(x):
143
  e = np.exp(x - np.max(x))
144
  return e / e.sum()
145
 
146
- # TODO: maybe make a function with pos/neg inputs?
147
  def classify_fn(inpt, clas, clasneg):
148
- # loss_1, loss_2 = classify_fn_inner(inpt, clas)
149
- # loss_1_neg, loss_2_neg = classify_fn_inner(inpt, clasneg)
150
-
151
- # print(loss_1, loss_1_neg, end=' | ')
152
- # # We negate the loss because we want to know who's closer to 0
153
- # loss_1, loss_1_neg = softmax([-loss_1, -loss_1_neg])
154
- # print(loss_1, loss_1_neg)
155
-
156
  loss_3 = classify_fn_inner2(inpt, clas)
157
  loss_3_neg = classify_fn_inner2(inpt, clasneg)
158
- print(loss_3, loss_3_neg, end=' | ')
159
  loss_3, loss_3_neg = softmax([-loss_3, -loss_3_neg])
160
- print(loss_3, loss_3_neg)
161
 
162
- # return ({"v1_pos": loss_1, "v1_neg": loss_1_neg, "v3_pos": loss_3, "v3_neg": loss_3_neg})
163
  return ({"+": loss_3, "-": loss_3_neg})
164
 
165
  def generator_wrap(l, fn):
 
104
  print(e)
105
  yield ("Error...", gr.Text.update(value=str(e), visible=True))
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  def classify_fn_inner2(inpt, clas):
108
  state = rwkv_rs.State(model)
109
  tokens = tokenizer.encode(f"This is an example of {clas} text:").ids
 
115
  for i in tokens[:-1]:
116
  logits.append(model.forward_token(i, state))
117
  logit_x = [softmax(i) for i in logits]
118
+ loss = -np.sum([ np.log(x[y]) for x, y in zip(logit_x, tokens[1:]) ]) / len(logit_x)
119
 
120
  return loss
121
 
 
123
  e = np.exp(x - np.max(x))
124
  return e / e.sum()
125
 
 
126
  def classify_fn(inpt, clas, clasneg):
 
 
 
 
 
 
 
 
127
  loss_3 = classify_fn_inner2(inpt, clas)
128
  loss_3_neg = classify_fn_inner2(inpt, clasneg)
129
+ # print(loss_3, loss_3_neg, end=' | ')
130
  loss_3, loss_3_neg = softmax([-loss_3, -loss_3_neg])
131
+ # print(loss_3, loss_3_neg)
132
 
 
133
  return ({"+": loss_3, "-": loss_3_neg})
134
 
135
  def generator_wrap(l, fn):