vikram-fresche commited on
Commit
0e056a8
·
verified ·
1 Parent(s): f72bb15

handler_v3 (#3)

Browse files

- added custom handler v2 (6f3af40a0447f43e9c44fc0553ea15c72ddae113)

Files changed (1) hide show
  1. handler.py +5 -5
handler.py CHANGED
@@ -53,7 +53,7 @@ class EndpointHandler:
53
  """
54
  try:
55
  logger.info("Processing new request")
56
- logger.debug(f"Input data: {data}")
57
 
58
  messages = data.get("messages", [])
59
  if not messages:
@@ -65,18 +65,18 @@ class EndpointHandler:
65
  logger.info(f"Generation parameters: {gen_params}")
66
 
67
  # Apply the chat template
68
- logger.debug("Applying chat template")
69
  prompt = self.tokenizer.apply_chat_template(
70
  messages,
71
  tokenize=False,
72
  add_generation_prompt=True
73
  )
74
- logger.debug(f"Generated prompt: {prompt}")
75
 
76
  # Tokenize the prompt
77
- logger.debug("Tokenizing input")
78
  inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
79
- logger.debug(f"Input shape: {inputs.input_ids.shape}")
80
 
81
  # Generate response
82
  logger.info("Generating response")
 
53
  """
54
  try:
55
  logger.info("Processing new request")
56
+ logger.info(f"Input data: {data}")
57
 
58
  messages = data.get("messages", [])
59
  if not messages:
 
65
  logger.info(f"Generation parameters: {gen_params}")
66
 
67
  # Apply the chat template
68
+ logger.info("Applying chat template")
69
  prompt = self.tokenizer.apply_chat_template(
70
  messages,
71
  tokenize=False,
72
  add_generation_prompt=True
73
  )
74
+ logger.info(f"Generated prompt: {prompt}")
75
 
76
  # Tokenize the prompt
77
+ logger.info("Tokenizing input")
78
  inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
79
+ logger.info(f"Input shape: {inputs.input_ids.shape}")
80
 
81
  # Generate response
82
  logger.info("Generating response")