repo_id
stringlengths 15
89
| file_path
stringlengths 27
180
| content
stringlengths 1
2.23M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/integration-tests/conftest.py | import sys
import subprocess
import contextlib
import pytest
import asyncio
import os
import docker
import json
import math
import time
import random
from docker.errors import NotFound
from typing import Optional, List, Dict
from syrupy.extensions.json import JSONSnapshotExtension
from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError
from text_generation import AsyncClient
from text_generation.types import Response, Details, InputToken, Token, BestOfSequence
DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
HUGGING_FACE_HUB_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN", None)
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data")
class ResponseComparator(JSONSnapshotExtension):
rtol = 0.2
def serialize(
self,
data,
*,
exclude=None,
matcher=None,
):
if isinstance(data, List):
data = [d.dict() for d in data]
data = self._filter(
data=data, depth=0, path=(), exclude=exclude, matcher=matcher
)
return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + "\n"
def matches(
self,
*,
serialized_data,
snapshot_data,
) -> bool:
def convert_data(data):
data = json.loads(data)
if isinstance(data, Dict):
return Response(**data)
if isinstance(data, List):
return [Response(**d) for d in data]
raise NotImplementedError
def eq_token(token: Token, other: Token) -> bool:
return (
token.id == other.id
and token.text == other.text
and math.isclose(token.logprob, other.logprob, rel_tol=self.rtol)
and token.special == other.special
)
def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool:
try:
return (
prefill_token.id == other.id
and prefill_token.text == other.text
and (
math.isclose(
prefill_token.logprob, other.logprob, rel_tol=self.rtol
)
if prefill_token.logprob is not None
else prefill_token.logprob == other.logprob
)
)
except TypeError:
return False
def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool:
return (
details.finish_reason == other.finish_reason
and details.generated_tokens == other.generated_tokens
and details.seed == other.seed
and len(details.prefill) == len(other.prefill)
and all(
[
eq_prefill_token(d, o)
for d, o in zip(details.prefill, other.prefill)
]
)
and len(details.tokens) == len(other.tokens)
and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
)
def eq_details(details: Details, other: Details) -> bool:
return (
details.finish_reason == other.finish_reason
and details.generated_tokens == other.generated_tokens
and details.seed == other.seed
and len(details.prefill) == len(other.prefill)
and all(
[
eq_prefill_token(d, o)
for d, o in zip(details.prefill, other.prefill)
]
)
and len(details.tokens) == len(other.tokens)
and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
and (
len(details.best_of_sequences)
if details.best_of_sequences is not None
else 0
)
== (
len(other.best_of_sequences)
if other.best_of_sequences is not None
else 0
)
and (
all(
[
eq_best_of(d, o)
for d, o in zip(
details.best_of_sequences, other.best_of_sequences
)
]
)
if details.best_of_sequences is not None
else details.best_of_sequences == other.best_of_sequences
)
)
def eq_response(response: Response, other: Response) -> bool:
return response.generated_text == other.generated_text and eq_details(
response.details, other.details
)
serialized_data = convert_data(serialized_data)
snapshot_data = convert_data(snapshot_data)
if not isinstance(serialized_data, List):
serialized_data = [serialized_data]
if not isinstance(snapshot_data, List):
snapshot_data = [snapshot_data]
return len(snapshot_data) == len(serialized_data) and all(
[eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)]
)
class GenerousResponseComparator(ResponseComparator):
# Needed for GPTQ with exllama which has serious numerical fluctuations.
rtol = 0.75
class LauncherHandle:
def __init__(self, port: int):
self.client = AsyncClient(f"http://localhost:{port}")
def _inner_health(self):
raise NotImplementedError
async def health(self, timeout: int = 60):
assert timeout > 0
for _ in range(timeout):
if not self._inner_health():
raise RuntimeError("Launcher crashed")
try:
await self.client.generate("test")
return
except (ClientConnectorError, ClientOSError, ServerDisconnectedError) as e:
time.sleep(1)
raise RuntimeError("Health check failed")
class ContainerLauncherHandle(LauncherHandle):
def __init__(self, docker_client, container_name, port: int):
super(ContainerLauncherHandle, self).__init__(port)
self.docker_client = docker_client
self.container_name = container_name
def _inner_health(self) -> bool:
container = self.docker_client.containers.get(self.container_name)
return container.status in ["running", "created"]
class ProcessLauncherHandle(LauncherHandle):
def __init__(self, process, port: int):
super(ProcessLauncherHandle, self).__init__(port)
self.process = process
def _inner_health(self) -> bool:
return self.process.poll() is None
@pytest.fixture
def response_snapshot(snapshot):
return snapshot.use_extension(ResponseComparator)
@pytest.fixture
def generous_response_snapshot(snapshot):
return snapshot.use_extension(GenerousResponseComparator)
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="module")
def launcher(event_loop):
@contextlib.contextmanager
def local_launcher(
model_id: str,
num_shard: Optional[int] = None,
quantize: Optional[str] = None,
trust_remote_code: bool = False,
use_flash_attention: bool = True,
dtype: Optional[str] = None,
):
port = random.randint(8000, 10_000)
master_port = random.randint(10_000, 20_000)
shard_uds_path = (
f"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server"
)
args = [
"text-generation-launcher",
"--model-id",
model_id,
"--port",
str(port),
"--master-port",
str(master_port),
"--shard-uds-path",
shard_uds_path,
]
env = os.environ
if num_shard is not None:
args.extend(["--num-shard", str(num_shard)])
if quantize is not None:
args.append("--quantize")
args.append(quantize)
if dtype is not None:
args.append("--dtype")
args.append(dtype)
if trust_remote_code:
args.append("--trust-remote-code")
env["LOG_LEVEL"] = "info,text_generation_router=debug"
if not use_flash_attention:
env["USE_FLASH_ATTENTION"] = "false"
with subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
) as process:
yield ProcessLauncherHandle(process, port)
process.terminate()
process.wait(60)
launcher_output = process.stdout.read().decode("utf-8")
print(launcher_output, file=sys.stderr)
process.stdout.close()
process.stderr.close()
if not use_flash_attention:
del env["USE_FLASH_ATTENTION"]
@contextlib.contextmanager
def docker_launcher(
model_id: str,
num_shard: Optional[int] = None,
quantize: Optional[str] = None,
trust_remote_code: bool = False,
use_flash_attention: bool = True,
dtype: Optional[str] = None,
):
port = random.randint(8000, 10_000)
args = ["--model-id", model_id, "--env"]
if num_shard is not None:
args.extend(["--num-shard", str(num_shard)])
if quantize is not None:
args.append("--quantize")
args.append(quantize)
if dtype is not None:
args.append("--dtype")
args.append(dtype)
if trust_remote_code:
args.append("--trust-remote-code")
client = docker.from_env()
container_name = f"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}"
try:
container = client.containers.get(container_name)
container.stop()
container.wait()
except NotFound:
pass
gpu_count = num_shard if num_shard is not None else 1
env = {"LOG_LEVEL": "info,text_generation_router=debug"}
if not use_flash_attention:
env["USE_FLASH_ATTENTION"] = "false"
if HUGGING_FACE_HUB_TOKEN is not None:
env["HUGGING_FACE_HUB_TOKEN"] = HUGGING_FACE_HUB_TOKEN
volumes = []
if DOCKER_VOLUME:
volumes = [f"{DOCKER_VOLUME}:/data"]
container = client.containers.run(
DOCKER_IMAGE,
command=args,
name=container_name,
environment=env,
auto_remove=False,
detach=True,
device_requests=[
docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]])
],
volumes=volumes,
ports={"80/tcp": port},
shm_size="1G",
)
yield ContainerLauncherHandle(client, container.name, port)
if not use_flash_attention:
del env["USE_FLASH_ATTENTION"]
try:
container.stop()
container.wait()
except NotFound:
pass
container_output = container.logs().decode("utf-8")
print(container_output, file=sys.stderr)
container.remove()
if DOCKER_IMAGE is not None:
return docker_launcher
return local_launcher
@pytest.fixture(scope="module")
def generate_load():
async def generate_load_inner(
client: AsyncClient, prompt: str, max_new_tokens: int, n: int
) -> List[Response]:
futures = [
client.generate(
prompt, max_new_tokens=max_new_tokens, decoder_input_details=True
)
for _ in range(n)
]
return await asyncio.gather(*futures)
return generate_load_inner
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_awq.py | import pytest
@pytest.fixture(scope="module")
def flash_llama_awq_handle(launcher):
with launcher(
"abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq",
num_shard=1,
quantize="awq",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_awq(flash_llama_awq_handle):
await flash_llama_awq_handle.health(300)
return flash_llama_awq_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_awq(flash_llama_awq, response_snapshot):
response = await flash_llama_awq.generate(
"What is Deep Learning?", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
)
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_awq_all_params(flash_llama_awq, response_snapshot):
response = await flash_llama_awq.generate(
"What is Deep Learning?",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_awq_load(flash_llama_awq, generate_load, response_snapshot):
responses = await generate_load(
flash_llama_awq, "What is Deep Learning?", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all(
[
r.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
for r in responses
]
)
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_mpt.py | import pytest
@pytest.fixture(scope="module")
def mpt_sharded_handle(launcher):
with launcher("mosaicml/mpt-7b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def mpt_sharded(mpt_sharded_handle):
await mpt_sharded_handle.health(300)
return mpt_sharded_handle.client
@pytest.mark.asyncio
async def test_mpt(mpt_sharded, response_snapshot):
response = await mpt_sharded.generate(
"What is Deep Learning?",
max_new_tokens=17,
decoder_input_details=True,
)
assert response.details.generated_tokens == 17
assert (
response.generated_text
== " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
)
assert response == response_snapshot
@pytest.mark.asyncio
async def test_mpt_load(mpt_sharded, generate_load, response_snapshot):
responses = await generate_load(
mpt_sharded,
"What is Deep Learning?",
max_new_tokens=17,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert (
responses[0].generated_text
== " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
)
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_neox.py | import pytest
@pytest.fixture(scope="module")
def flash_neox_handle(launcher):
with launcher("stabilityai/stablelm-tuned-alpha-3b", num_shard=1) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_neox(flash_neox_handle):
await flash_neox_handle.health(300)
return flash_neox_handle.client
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_neox(flash_neox, response_snapshot):
response = await flash_neox.generate(
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_flash_neox_load(flash_neox, generate_load, response_snapshot):
responses = await generate_load(
flash_neox,
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert len(generated_texts) == 4
assert all(
[text == generated_texts[0] for text in generated_texts]
), generated_texts
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_llama.py | import pytest
@pytest.fixture(scope="module")
def flash_llama_handle(launcher):
with launcher("huggingface/llama-7b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama(flash_llama_handle):
await flash_llama_handle.health(300)
return flash_llama_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama(flash_llama, response_snapshot):
response = await flash_llama.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_all_params(flash_llama, response_snapshot):
response = await flash_llama.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 5
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_load(flash_llama, generate_load, response_snapshot):
responses = await generate_load(flash_llama, "Test request", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_starcoder.py | import pytest
@pytest.fixture(scope="module")
def flash_starcoder_handle(launcher):
with launcher("bigcode/starcoder", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_starcoder(flash_starcoder_handle):
await flash_starcoder_handle.health(300)
return flash_starcoder_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_starcoder(flash_starcoder, response_snapshot):
response = await flash_starcoder.generate(
"def print_hello", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_starcoder_default_params(flash_starcoder, response_snapshot):
response = await flash_starcoder.generate(
"def print_hello",
max_new_tokens=60,
temperature=0.2,
top_p=0.95,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 60
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_starcoder_load(flash_starcoder, generate_load, response_snapshot):
responses = await generate_load(
flash_starcoder, "def print_hello", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_mistral.py | import pytest
@pytest.fixture(scope="module")
def flash_mistral_handle(launcher):
with launcher("mistralai/Mistral-7B-Instruct-v0.1") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_mistral(flash_mistral_handle):
await flash_mistral_handle.health(300)
return flash_mistral_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_mistral(flash_mistral, response_snapshot):
response = await flash_mistral.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response.generated_text == ": Let n = 10 - 1"
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_mistral_all_params(flash_mistral, response_snapshot):
response = await flash_mistral.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_mistral_load(flash_mistral, generate_load, response_snapshot):
responses = await generate_load(
flash_mistral, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"{[r.generated_text for r in responses]}"
assert responses[0].generated_text == ": Let n = 10 - 1"
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_neox_sharded.py | import pytest
@pytest.fixture(scope="module")
def flash_neox_sharded_handle(launcher):
with launcher("OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_neox_sharded(flash_neox_sharded_handle):
await flash_neox_sharded_handle.health(300)
return flash_neox_sharded_handle.client
@pytest.mark.asyncio
async def test_flash_neox(flash_neox_sharded, response_snapshot):
response = await flash_neox_sharded.generate(
"<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
async def test_flash_neox_load(flash_neox_sharded, generate_load, response_snapshot):
responses = await generate_load(
flash_neox_sharded,
"<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_awq_sharded.py | import pytest
@pytest.fixture(scope="module")
def flash_llama_awq_handle_sharded(launcher):
with launcher(
"abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq",
num_shard=2,
quantize="awq",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_awq_sharded(flash_llama_awq_handle_sharded):
await flash_llama_awq_handle_sharded.health(300)
return flash_llama_awq_handle_sharded.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_awq_sharded(flash_llama_awq_sharded, response_snapshot):
response = await flash_llama_awq_sharded.generate(
"What is Deep Learning?", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
)
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_awq_load_sharded(
flash_llama_awq_sharded, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_awq_sharded, "What is Deep Learning?", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all(
[
r.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
for r in responses
]
)
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_neox_sharded.py | import pytest
@pytest.fixture(scope="module")
def neox_sharded_handle(launcher):
with launcher(
"OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2, use_flash_attention=False
) as handle:
yield handle
@pytest.fixture(scope="module")
async def neox_sharded(neox_sharded_handle):
await neox_sharded_handle.health(300)
return neox_sharded_handle.client
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox(neox_sharded, response_snapshot):
response = await neox_sharded.generate(
"<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox_load(neox_sharded, generate_load, response_snapshot):
responses = await generate_load(
neox_sharded,
"<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_medusa.py | import pytest
@pytest.fixture(scope="module")
def flash_medusa_handle(launcher):
with launcher("FasterDecoding/medusa-vicuna-7b-v1.3", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_medusa(flash_medusa_handle):
await flash_medusa_handle.health(300)
return flash_medusa_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_medusa_simple(flash_medusa, response_snapshot):
response = await flash_medusa.generate(
"What is Deep Learning?", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_medusa_all_params(flash_medusa, response_snapshot):
response = await flash_medusa.generate(
"What is Deep Learning?",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_medusa_load(flash_medusa, generate_load, response_snapshot):
responses = await generate_load(
flash_medusa, "What is Deep Learning?", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"{[r.generated_text for r in responses]}"
assert (
responses[0].generated_text == "\nDeep learning is a subset of machine learning"
)
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_bloom_560m.py | import pytest
@pytest.fixture(scope="module")
def bloom_560_handle(launcher):
with launcher("bigscience/bloom-560m") as handle:
yield handle
@pytest.fixture(scope="module")
async def bloom_560(bloom_560_handle):
await bloom_560_handle.health(240)
return bloom_560_handle.client
@pytest.mark.asyncio
async def test_bloom_560m(bloom_560, response_snapshot):
response = await bloom_560.generate(
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
top_p=0.9,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
async def test_bloom_560m_all_params(bloom_560, response_snapshot):
response = await bloom_560.generate(
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
async def test_bloom_560m_load(bloom_560, generate_load, response_snapshot):
responses = await generate_load(
bloom_560,
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_t5_sharded.py | import pytest
@pytest.fixture(scope="module")
def t5_sharded_handle(launcher):
with launcher("google/flan-t5-xxl", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def t5_sharded(t5_sharded_handle):
await t5_sharded_handle.health(300)
return t5_sharded_handle.client
@pytest.mark.asyncio
async def test_t5_sharded(t5_sharded, response_snapshot):
response = await t5_sharded.generate(
"Please answer the following question. What is the boiling point of Nitrogen?",
max_new_tokens=10,
decoder_input_details=True,
)
assert response == response_snapshot
@pytest.mark.asyncio
async def test_t5_sharded_load(t5_sharded, generate_load, response_snapshot):
responses = await generate_load(
t5_sharded,
"Please answer the following question. What is the boiling point of Nitrogen?",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_llama_gptq.py | import pytest
@pytest.fixture(scope="module")
def flash_llama_gptq_handle(launcher):
with launcher("huggingface/llama-7b-gptq", num_shard=2, quantize="gptq") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_gptq(flash_llama_gptq_handle):
await flash_llama_gptq_handle.health(300)
return flash_llama_gptq_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_gptq(flash_llama_gptq, response_snapshot):
response = await flash_llama_gptq.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_gptq_all_params(flash_llama_gptq, response_snapshot):
response = await flash_llama_gptq.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_gptq_load(
flash_llama_gptq, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_gptq, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_santacoder.py | import pytest
@pytest.fixture(scope="module")
def flash_santacoder_handle(launcher):
with launcher("bigcode/santacoder") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_santacoder(flash_santacoder_handle):
await flash_santacoder_handle.health(300)
return flash_santacoder_handle.client
@pytest.mark.asyncio
async def test_flash_santacoder(flash_santacoder, response_snapshot):
response = await flash_santacoder.generate(
"def print_hello", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
async def test_flash_santacoder_load(
flash_santacoder, generate_load, response_snapshot
):
responses = await generate_load(
flash_santacoder, "def print_hello", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_mt0_base.py | import pytest
@pytest.fixture(scope="module")
def mt0_base_handle(launcher):
with launcher("bigscience/mt0-base") as handle:
yield handle
@pytest.fixture(scope="module")
async def mt0_base(mt0_base_handle):
await mt0_base_handle.health(300)
return mt0_base_handle.client
@pytest.mark.asyncio
async def test_mt0_base(mt0_base, response_snapshot):
response = await mt0_base.generate(
"Why is the sky blue?",
max_new_tokens=10,
top_p=0.9,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 5
assert response == response_snapshot
@pytest.mark.asyncio
async def test_mt0_base_all_params(mt0_base, response_snapshot):
response = await mt0_base.generate(
"Why is the sky blue?",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 9
assert response == response_snapshot
@pytest.mark.asyncio
async def test_mt0_base_load(mt0_base, generate_load, response_snapshot):
responses = await generate_load(
mt0_base,
"Why is the sky blue?",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_idefics.py | import pytest
@pytest.fixture(scope="module")
def idefics_handle(launcher):
with launcher(
"HuggingFaceM4/idefics-9b-instruct", num_shard=2, dtype="float16"
) as handle:
yield handle
@pytest.fixture(scope="module")
async def idefics(idefics_handle):
await idefics_handle.health(300)
return idefics_handle.client
@pytest.mark.asyncio
async def test_idefics(idefics, response_snapshot):
response = await idefics.generate(
"User:![](https://temp-5681.s3.us-west-2.amazonaws.com/chicken_on_money.png)Can you tell me a very short story based on the image?",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
async def test_idefics_load(idefics, generate_load, response_snapshot):
responses = await generate_load(
idefics,
"User:![](https://temp-5681.s3.us-west-2.amazonaws.com/chicken_on_money.png)Can you tell me a very short story based on the image?",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert len(generated_texts) == 4
assert generated_texts, all(
[text == generated_texts[0] for text in generated_texts]
)
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_neox.py | import pytest
@pytest.fixture(scope="module")
def neox_handle(launcher):
with launcher(
"stabilityai/stablelm-tuned-alpha-3b", num_shard=1, use_flash_attention=False
) as handle:
yield handle
@pytest.fixture(scope="module")
async def neox(neox_handle):
await neox_handle.health(300)
return neox_handle.client
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox(neox, response_snapshot):
response = await neox.generate(
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.skip
@pytest.mark.asyncio
async def test_neox_load(neox, generate_load, response_snapshot):
responses = await generate_load(
neox,
"<|USER|>What's your mood today?<|ASSISTANT|>",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert len(generated_texts) == 4
assert generated_texts, all(
[text == generated_texts[0] for text in generated_texts]
)
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_falcon.py | import pytest
@pytest.fixture(scope="module")
def flash_falcon_handle(launcher):
with launcher("tiiuae/falcon-7b", trust_remote_code=True) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_falcon(flash_falcon_handle):
await flash_falcon_handle.health(300)
return flash_falcon_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_falcon(flash_falcon, response_snapshot):
response = await flash_falcon.generate(
"Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_falcon_all_params(flash_falcon, response_snapshot):
response = await flash_falcon.generate(
"Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_falcon_load(flash_falcon, generate_load, response_snapshot):
responses = await generate_load(
flash_falcon,
"Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron:",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py | import pytest
@pytest.fixture(scope="module")
def bloom_560m_sharded_handle(launcher):
with launcher("bigscience/bloom-560m", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def bloom_560m_sharded(bloom_560m_sharded_handle):
await bloom_560m_sharded_handle.health(240)
return bloom_560m_sharded_handle.client
@pytest.mark.asyncio
async def test_bloom_560m_sharded(bloom_560m_sharded, response_snapshot):
response = await bloom_560m_sharded.generate(
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
top_p=0.9,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
async def test_bloom_560m_sharded_load(
bloom_560m_sharded, generate_load, response_snapshot
):
responses = await generate_load(
bloom_560m_sharded,
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests | hf_public_repos/text-generation-inference/integration-tests/models/test_flash_starcoder_gptq.py | import pytest
@pytest.fixture(scope="module")
def flash_starcoder_gptq_handle(launcher):
with launcher("Narsil/starcoder-gptq", num_shard=2, quantize="gptq") as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_starcoder_gptq(flash_starcoder_gptq_handle):
await flash_starcoder_gptq_handle.health(300)
return flash_starcoder_gptq_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_starcoder_gptq(flash_starcoder_gptq, generous_response_snapshot):
response = await flash_starcoder_gptq.generate(
"def geometric_mean(L: List[float]):",
max_new_tokens=20,
decoder_input_details=True,
)
assert response.details.generated_tokens == 20
assert response == generous_response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_starcoder_gptq_default_params(
flash_starcoder_gptq, generous_response_snapshot
):
response = await flash_starcoder_gptq.generate(
"def geometric_mean(L: List[float]):",
max_new_tokens=20,
temperature=0.2,
top_p=0.95,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 20
assert response == generous_response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_starcoder_gptq_load(
flash_starcoder_gptq, generate_load, generous_response_snapshot
):
responses = await generate_load(
flash_starcoder_gptq,
"def geometric_mean(L: List[float]):",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == generous_response_snapshot
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 5,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": 0,
"tokens": [
{
"id": 926,
"logprob": -4.3554688,
"special": false,
"text": " To"
},
{
"id": 18295,
"logprob": -7.7734375,
"special": false,
"text": " sell"
},
{
"id": 7868,
"logprob": -3.9257812,
"special": false,
"text": " things"
},
{
"id": 260,
"logprob": -2.4179688,
"special": false,
"text": "."
},
{
"id": 1,
"logprob": 0.0,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "To sell things."
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_all_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 9,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": 0,
"tokens": [
{
"id": 16017,
"logprob": -0.30908203,
"special": false,
"text": " blue"
},
{
"id": 20495,
"logprob": 0.0,
"special": false,
"text": " sky"
},
{
"id": 259,
"logprob": -0.28271484,
"special": false,
"text": " "
},
{
"id": 15484,
"logprob": -1.7929688,
"special": false,
"text": "appear"
},
{
"id": 345,
"logprob": -0.8935547,
"special": false,
"text": "ed"
},
{
"id": 281,
"logprob": 0.0,
"special": false,
"text": " in"
},
{
"id": 287,
"logprob": 0.0,
"special": false,
"text": " the"
},
{
"id": 20495,
"logprob": -0.32299805,
"special": false,
"text": " sky"
},
{
"id": 1,
"logprob": 0.0,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Why is the sky blue?blue sky appeared in the sky"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3798828,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36328125,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0947266,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8286133,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6826172,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.7290039,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3789062,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36279297,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0966797,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8276367,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6845703,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.72753906,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3789062,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36279297,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0966797,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8276367,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6845703,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.72753906,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3789062,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36279297,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0966797,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8276367,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6845703,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.72753906,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq_all_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 338,
"logprob": -9.0859375,
"text": "is"
},
{
"id": 21784,
"logprob": -10.90625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -2.65625,
"text": "Learning"
},
{
"id": 29973,
"logprob": -4.8085938,
"text": "?"
}
],
"seed": 0,
"tokens": [
{
"id": 13,
"logprob": -0.19958496,
"special": false,
"text": "\n"
},
{
"id": 4013,
"logprob": -2.203125,
"special": false,
"text": "This"
},
{
"id": 1139,
"logprob": -0.23693848,
"special": false,
"text": " question"
},
{
"id": 756,
"logprob": 0.0,
"special": false,
"text": " has"
},
{
"id": 1063,
"logprob": -0.076538086,
"special": false,
"text": " been"
},
{
"id": 4433,
"logprob": 0.0,
"special": false,
"text": " asked"
},
{
"id": 1784,
"logprob": -1.1367188,
"special": false,
"text": " many"
},
{
"id": 3064,
"logprob": 0.0,
"special": false,
"text": " times"
},
{
"id": 322,
"logprob": -1.7460938,
"special": false,
"text": " and"
},
{
"id": 306,
"logprob": 0.0,
"special": false,
"text": " I"
}
],
"top_tokens": null
},
"generated_text": "What is Deep Learning?\nThis question has been asked many times and I"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.703125,
"text": "What"
},
{
"id": 338,
"logprob": -1.4765625,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8583984,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7548828,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9306641,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4550781,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.5732422,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5761719,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5888672,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.026504517,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4287109,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15856934,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62646484,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.703125,
"text": "What"
},
{
"id": 338,
"logprob": -1.4765625,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8652344,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7548828,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9306641,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4550781,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.5732422,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5761719,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5888672,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.026504517,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4287109,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15856934,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62646484,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.703125,
"text": "What"
},
{
"id": 338,
"logprob": -1.4765625,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8583984,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7548828,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9306641,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4550781,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.5732422,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5761719,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5888672,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.026504517,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4287109,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15856934,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62646484,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.703125,
"text": "What"
},
{
"id": 338,
"logprob": -1.4765625,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8652344,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7548828,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9306641,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4550781,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.5732422,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5761719,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5888672,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.026504517,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4287109,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15856934,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62646484,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.703125,
"text": "What"
},
{
"id": 338,
"logprob": -1.4765625,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8652344,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7548828,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9306641,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4550781,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.5732422,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5761719,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5888672,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.026504517,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4287109,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15856934,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62646484,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_all_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "stop_sequence",
"generated_tokens": 5,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -8.6875,
"text": "Test"
},
{
"id": 2009,
"logprob": -11.546875,
"text": "request"
}
],
"seed": 0,
"tokens": [
{
"id": 5229,
"logprob": -2.5839844,
"special": false,
"text": " failed"
},
{
"id": 29901,
"logprob": -0.44970703,
"special": false,
"text": ":"
},
{
"id": 4829,
"logprob": -1.8339844,
"special": false,
"text": " Error"
},
{
"id": 297,
"logprob": -1.0556641,
"special": false,
"text": " in"
},
{
"id": 1243,
"logprob": 0.0,
"special": false,
"text": " test"
}
],
"top_tokens": null
},
"generated_text": "Test request failed: Error in test"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -8.6875,
"text": "Test"
},
{
"id": 2009,
"logprob": -11.546875,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5351562,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5722656,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2714844,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.03414917,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.95996094,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.3635254,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.013031006,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.1523438,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.43701172,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -8.6875,
"text": "Test"
},
{
"id": 2009,
"logprob": -11.546875,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5351562,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5566406,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2519531,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.03414917,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.96240234,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.3647461,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.012901306,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.1542969,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -8.6875,
"text": "Test"
},
{
"id": 2009,
"logprob": -11.546875,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5332031,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5625,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2617188,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.033996582,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.9609375,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.36572266,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.0129776,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.15625,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -8.6875,
"text": "Test"
},
{
"id": 2009,
"logprob": -11.546875,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5332031,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5625,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2617188,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.033996582,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.9609375,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.36572266,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.0129776,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.15625,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -8.6875,
"text": "Test"
},
{
"id": 2009,
"logprob": -11.546875,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 363,
"logprob": -1.5332031,
"special": false,
"text": " for"
},
{
"id": 847,
"logprob": -2.5625,
"special": false,
"text": " /"
},
{
"id": 2754,
"logprob": -2.2617188,
"special": false,
"text": "api"
},
{
"id": 29914,
"logprob": -0.033996582,
"special": false,
"text": "/"
},
{
"id": 29894,
"logprob": -0.9609375,
"special": false,
"text": "v"
},
{
"id": 29896,
"logprob": -0.36572266,
"special": false,
"text": "1"
},
{
"id": 29914,
"logprob": -0.0129776,
"special": false,
"text": "/"
},
{
"id": 16418,
"logprob": -3.15625,
"special": false,
"text": "projects"
},
{
"id": 29914,
"logprob": -0.4362793,
"special": false,
"text": "/"
},
{
"id": 29896,
"logprob": -1.9394531,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": " for /api/v1/projects/1"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -9.6015625,
"text": "Test"
},
{
"id": 2009,
"logprob": -9.671875,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 29918,
"logprob": -2.3828125,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -2.8105469,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -1.6396484,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -1.0546875,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.6513672,
"special": false,
"text": " request"
},
{
"id": 29918,
"logprob": -0.056365967,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -0.016082764,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -0.87841797,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -0.7548828,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.29711914,
"special": false,
"text": " request"
}
]
},
"generated_text": "_uri\nTest request_uri\nTest request"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -9.6015625,
"text": "Test"
},
{
"id": 2009,
"logprob": -9.6640625,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 29918,
"logprob": -2.3828125,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -2.828125,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -1.6386719,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -1.0527344,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.6542969,
"special": false,
"text": " request"
},
{
"id": 29918,
"logprob": -0.055877686,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -0.016021729,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -0.8769531,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -0.7583008,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.29833984,
"special": false,
"text": " request"
}
]
},
"generated_text": "_uri\nTest request_uri\nTest request"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -9.6015625,
"text": "Test"
},
{
"id": 2009,
"logprob": -9.671875,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 29918,
"logprob": -2.3847656,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -2.8144531,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -1.6396484,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -1.0527344,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.65478516,
"special": false,
"text": " request"
},
{
"id": 29918,
"logprob": -0.056243896,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -0.016143799,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -0.8808594,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -0.75341797,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.2956543,
"special": false,
"text": " request"
}
]
},
"generated_text": "_uri\nTest request_uri\nTest request"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -9.6015625,
"text": "Test"
},
{
"id": 2009,
"logprob": -9.6640625,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 29918,
"logprob": -2.3769531,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -2.8183594,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -1.6396484,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -1.0546875,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.65478516,
"special": false,
"text": " request"
},
{
"id": 29918,
"logprob": -0.05557251,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -0.01612854,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -0.8730469,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -0.7519531,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.29785156,
"special": false,
"text": " request"
}
]
},
"generated_text": "_uri\nTest request_uri\nTest request"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq_all_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -9.6015625,
"text": "Test"
},
{
"id": 2009,
"logprob": -9.6640625,
"text": "request"
}
],
"seed": 0,
"tokens": [
{
"id": 29899,
"logprob": -1.1640625,
"special": false,
"text": "-"
},
{
"id": 1454,
"logprob": -0.07543945,
"special": false,
"text": "for"
},
{
"id": 29899,
"logprob": 0.0,
"special": false,
"text": "-"
},
{
"id": 9342,
"logprob": 0.0,
"special": false,
"text": "comment"
},
{
"id": 29901,
"logprob": 0.0,
"special": false,
"text": ":"
},
{
"id": 396,
"logprob": -0.2956543,
"special": false,
"text": " #"
},
{
"id": 29906,
"logprob": -0.52734375,
"special": false,
"text": "2"
},
{
"id": 29900,
"logprob": -0.6899414,
"special": false,
"text": "0"
},
{
"id": 29896,
"logprob": 0.0,
"special": false,
"text": "1"
},
{
"id": 29946,
"logprob": -1.5068359,
"special": false,
"text": "4"
}
]
},
"generated_text": "Test request-for-comment: #2014"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_gptq/test_flash_llama_gptq.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4321,
"logprob": -9.59375,
"text": "Test"
},
{
"id": 2009,
"logprob": -9.6640625,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 29918,
"logprob": -2.3867188,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -2.8183594,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -1.6367188,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -1.0527344,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.6542969,
"special": false,
"text": " request"
},
{
"id": 29918,
"logprob": -0.056121826,
"special": false,
"text": "_"
},
{
"id": 5338,
"logprob": -0.01600647,
"special": false,
"text": "uri"
},
{
"id": 13,
"logprob": -0.87939453,
"special": false,
"text": "\n"
},
{
"id": 3057,
"logprob": -0.7529297,
"special": false,
"text": "Test"
},
{
"id": 2009,
"logprob": -0.2980957,
"special": false,
"text": " request"
}
]
},
"generated_text": "_uri\nTest request_uri\nTest request"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.5625,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14770508,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4609375,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5585938,
"text": "olan"
},
{
"id": 15,
"logprob": -1.4003906,
"text": ","
},
{
"id": 1669,
"logprob": -1.5673828,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94628906,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.703125,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7646484,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.6113281,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5263672,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -0.00010049343,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.4707031,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.2119141,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11883545,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.40844727,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.0037841797,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0195312,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.53125,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14770508,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4140625,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5234375,
"text": "olan"
},
{
"id": 15,
"logprob": -1.3613281,
"text": ","
},
{
"id": 1669,
"logprob": -1.5458984,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94189453,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.7011719,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7548828,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.578125,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5117188,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -0.00010049343,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.4707031,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11004639,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.4506836,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.003047943,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0185547,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.53125,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14770508,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4140625,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5234375,
"text": "olan"
},
{
"id": 15,
"logprob": -1.3613281,
"text": ","
},
{
"id": 1669,
"logprob": -1.5458984,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94189453,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.7011719,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7548828,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.578125,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5117188,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -0.00010049343,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.4707031,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11004639,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.4506836,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.003047943,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0185547,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.53125,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14770508,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4140625,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5234375,
"text": "olan"
},
{
"id": 15,
"logprob": -1.3613281,
"text": ","
},
{
"id": 1669,
"logprob": -1.5458984,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94189453,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.7011719,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7548828,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.578125,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5117188,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -0.00010049343,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.4707031,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11004639,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.4506836,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.003047943,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0185547,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.5625,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14770508,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4609375,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.5585938,
"text": "olan"
},
{
"id": 15,
"logprob": -1.4003906,
"text": ","
},
{
"id": 1669,
"logprob": -1.5673828,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94628906,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.703125,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": 0,
"tokens": [
{
"id": 578,
"logprob": -1.6591797,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.4492188,
"special": false,
"text": " faire"
},
{
"id": 159570,
"logprob": -6.6835938,
"special": false,
"text": " réch"
},
{
"id": 810,
"logprob": 0.0,
"special": false,
"text": "au"
},
{
"id": 12736,
"logprob": 0.0,
"special": false,
"text": "ffer"
},
{
"id": 1742,
"logprob": -2.5175781,
"special": false,
"text": " au"
},
{
"id": 6105,
"logprob": -2.0078125,
"special": false,
"text": " bain"
},
{
"id": 88254,
"logprob": -0.12695312,
"special": false,
"text": "-mar"
},
{
"id": 641,
"logprob": 0.0,
"special": false,
"text": "ie"
},
{
"id": 2940,
"logprob": -3.5175781,
"special": false,
"text": " avec"
}
]
},
"generated_text": " le faire réchauffer au bain-marie avec"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m/test_bloom_560m_all_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 15,
"logprob": null,
"text": ","
},
{
"id": 1669,
"logprob": -5.4414062,
"text": " il"
},
{
"id": 11580,
"logprob": -2.3378906,
"text": " faut"
},
{
"id": 3913,
"logprob": -4.3554688,
"text": " tout"
},
{
"id": 39261,
"logprob": -2.9238281,
"text": " d'abord"
}
],
"seed": 0,
"tokens": [
{
"id": 408,
"logprob": -0.07891846,
"special": false,
"text": " que"
},
{
"id": 366,
"logprob": -1.2939453,
"special": false,
"text": " la"
},
{
"id": 8769,
"logprob": -0.3708496,
"special": false,
"text": " personne"
},
{
"id": 1479,
"logprob": -2.2871094,
"special": false,
"text": " qui"
},
{
"id": 2997,
"logprob": -0.8671875,
"special": false,
"text": " vous"
},
{
"id": 35977,
"logprob": -1.5097656,
"special": false,
"text": " suit"
},
{
"id": 21558,
"logprob": -0.07891846,
"special": false,
"text": " ait"
},
{
"id": 447,
"logprob": -0.12695312,
"special": false,
"text": " un"
},
{
"id": 78606,
"logprob": -2.21875,
"special": false,
"text": " profil"
},
{
"id": 3899,
"logprob": -1.3535156,
"special": false,
"text": " bien"
}
]
},
"generated_text": "Pour déguster un ortolan, il faut tout d'abord que la personne qui vous suit ait un profil bien"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 7,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 3,
"logprob": -0.7001953,
"special": false,
"text": " "
},
{
"id": 18,
"logprob": -1.1943359,
"special": false,
"text": "-"
},
{
"id": 26937,
"logprob": -1.2099609,
"special": false,
"text": "196"
},
{
"id": 3,
"logprob": -1.2451172,
"special": false,
"text": " "
},
{
"id": 1956,
"logprob": -0.3322754,
"special": false,
"text": "°"
},
{
"id": 254,
"logprob": -0.19213867,
"special": false,
"text": "C"
},
{
"id": 1,
"logprob": -0.030151367,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "-196 °C"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 7,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 3,
"logprob": -0.7001953,
"special": false,
"text": " "
},
{
"id": 18,
"logprob": -1.1943359,
"special": false,
"text": "-"
},
{
"id": 26937,
"logprob": -1.2119141,
"special": false,
"text": "196"
},
{
"id": 3,
"logprob": -1.2480469,
"special": false,
"text": " "
},
{
"id": 1956,
"logprob": -0.33203125,
"special": false,
"text": "°"
},
{
"id": 254,
"logprob": -0.19250488,
"special": false,
"text": "C"
},
{
"id": 1,
"logprob": -0.030166626,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "-196 °C"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 7,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 3,
"logprob": -0.7001953,
"special": false,
"text": " "
},
{
"id": 18,
"logprob": -1.1943359,
"special": false,
"text": "-"
},
{
"id": 26937,
"logprob": -1.2119141,
"special": false,
"text": "196"
},
{
"id": 3,
"logprob": -1.2480469,
"special": false,
"text": " "
},
{
"id": 1956,
"logprob": -0.33203125,
"special": false,
"text": "°"
},
{
"id": 254,
"logprob": -0.19250488,
"special": false,
"text": "C"
},
{
"id": 1,
"logprob": -0.030166626,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "-196 °C"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 7,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 3,
"logprob": -0.7001953,
"special": false,
"text": " "
},
{
"id": 18,
"logprob": -1.1943359,
"special": false,
"text": "-"
},
{
"id": 26937,
"logprob": -1.2119141,
"special": false,
"text": "196"
},
{
"id": 3,
"logprob": -1.2480469,
"special": false,
"text": " "
},
{
"id": 1956,
"logprob": -0.33203125,
"special": false,
"text": "°"
},
{
"id": 254,
"logprob": -0.19250488,
"special": false,
"text": "C"
},
{
"id": 1,
"logprob": -0.030166626,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "-196 °C"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 7,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 3,
"logprob": -0.7001953,
"special": false,
"text": " "
},
{
"id": 18,
"logprob": -1.1943359,
"special": false,
"text": "-"
},
{
"id": 26937,
"logprob": -1.2099609,
"special": false,
"text": "196"
},
{
"id": 3,
"logprob": -1.2451172,
"special": false,
"text": " "
},
{
"id": 1956,
"logprob": -0.3322754,
"special": false,
"text": "°"
},
{
"id": 254,
"logprob": -0.19213867,
"special": false,
"text": "C"
},
{
"id": 1,
"logprob": -0.030151367,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "-196 °C"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 3735,
"logprob": -12.9140625,
"text": "Test"
},
{
"id": 2159,
"logprob": -10.7578125,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 28747,
"logprob": -0.54785156,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -1.4091797,
"special": false,
"text": " Let"
},
{
"id": 307,
"logprob": -3.0273438,
"special": false,
"text": " n"
},
{
"id": 327,
"logprob": -0.94433594,
"special": false,
"text": " ="
},
{
"id": 28705,
"logprob": -0.81347656,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.2958984,
"special": false,
"text": "1"
},
{
"id": 28734,
"logprob": -2.0644531,
"special": false,
"text": "0"
},
{
"id": 387,
"logprob": -1.9580078,
"special": false,
"text": " -"
},
{
"id": 28705,
"logprob": -0.5073242,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.1816406,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": ": Let n = 10 - 1"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 3735,
"logprob": -12.9140625,
"text": "Test"
},
{
"id": 2159,
"logprob": -10.7578125,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 28747,
"logprob": -0.55078125,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -1.4140625,
"special": false,
"text": " Let"
},
{
"id": 307,
"logprob": -3.0273438,
"special": false,
"text": " n"
},
{
"id": 327,
"logprob": -0.94140625,
"special": false,
"text": " ="
},
{
"id": 28705,
"logprob": -0.8173828,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.2978516,
"special": false,
"text": "1"
},
{
"id": 28734,
"logprob": -2.0664062,
"special": false,
"text": "0"
},
{
"id": 387,
"logprob": -1.9560547,
"special": false,
"text": " -"
},
{
"id": 28705,
"logprob": -0.5078125,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.1787109,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": ": Let n = 10 - 1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 3735,
"logprob": -12.9140625,
"text": "Test"
},
{
"id": 2159,
"logprob": -10.7578125,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 28747,
"logprob": -0.54785156,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -1.4111328,
"special": false,
"text": " Let"
},
{
"id": 307,
"logprob": -3.0292969,
"special": false,
"text": " n"
},
{
"id": 327,
"logprob": -0.94433594,
"special": false,
"text": " ="
},
{
"id": 28705,
"logprob": -0.8178711,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.2939453,
"special": false,
"text": "1"
},
{
"id": 28734,
"logprob": -2.0644531,
"special": false,
"text": "0"
},
{
"id": 387,
"logprob": -1.9550781,
"special": false,
"text": " -"
},
{
"id": 28705,
"logprob": -0.5078125,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.1796875,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": ": Let n = 10 - 1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 3735,
"logprob": -12.9140625,
"text": "Test"
},
{
"id": 2159,
"logprob": -10.7578125,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 28747,
"logprob": -0.55078125,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -1.4140625,
"special": false,
"text": " Let"
},
{
"id": 307,
"logprob": -3.0273438,
"special": false,
"text": " n"
},
{
"id": 327,
"logprob": -0.94140625,
"special": false,
"text": " ="
},
{
"id": 28705,
"logprob": -0.8173828,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.2978516,
"special": false,
"text": "1"
},
{
"id": 28734,
"logprob": -2.0664062,
"special": false,
"text": "0"
},
{
"id": 387,
"logprob": -1.9560547,
"special": false,
"text": " -"
},
{
"id": 28705,
"logprob": -0.5078125,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.1787109,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": ": Let n = 10 - 1"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 3735,
"logprob": -12.9140625,
"text": "Test"
},
{
"id": 2159,
"logprob": -10.7578125,
"text": "request"
}
],
"seed": null,
"tokens": [
{
"id": 28747,
"logprob": -0.55078125,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -1.4140625,
"special": false,
"text": " Let"
},
{
"id": 307,
"logprob": -3.0273438,
"special": false,
"text": " n"
},
{
"id": 327,
"logprob": -0.94140625,
"special": false,
"text": " ="
},
{
"id": 28705,
"logprob": -0.8173828,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.2978516,
"special": false,
"text": "1"
},
{
"id": 28734,
"logprob": -2.0664062,
"special": false,
"text": "0"
},
{
"id": 387,
"logprob": -1.9560547,
"special": false,
"text": " -"
},
{
"id": 28705,
"logprob": -0.5078125,
"special": false,
"text": " "
},
{
"id": 28740,
"logprob": -1.1787109,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": ": Let n = 10 - 1"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral_all_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 3735,
"logprob": -12.9140625,
"text": "Test"
},
{
"id": 2159,
"logprob": -10.7578125,
"text": "request"
}
],
"seed": 0,
"tokens": [
{
"id": 28747,
"logprob": 0.0,
"special": false,
"text": ":"
},
{
"id": 3169,
"logprob": -0.1307373,
"special": false,
"text": " Let"
},
{
"id": 332,
"logprob": -2.3359375,
"special": false,
"text": " u"
},
{
"id": 347,
"logprob": 0.0,
"special": false,
"text": " be"
},
{
"id": 325,
"logprob": -1.0234375,
"special": false,
"text": " ("
},
{
"id": 28734,
"logprob": -2.0292969,
"special": false,
"text": "0"
},
{
"id": 648,
"logprob": -1.0439453,
"special": false,
"text": " +"
},
{
"id": 28705,
"logprob": -0.24499512,
"special": false,
"text": " "
},
{
"id": 28770,
"logprob": -0.5073242,
"special": false,
"text": "3"
},
{
"id": 387,
"logprob": -1.5507812,
"special": false,
"text": " -"
}
],
"top_tokens": null
},
"generated_text": "Test request: Let u be (0 + 3 -"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 3226,
"logprob": -9.0234375,
"text": " ge"
},
{
"id": 21017,
"logprob": -9.0859375,
"text": "ometric"
},
{
"id": 81,
"logprob": -0.25927734,
"text": "_"
},
{
"id": 6009,
"logprob": -2.25,
"text": "mean"
},
{
"id": 26,
"logprob": -0.30126953,
"text": "("
},
{
"id": 62,
"logprob": -5.7539062,
"text": "L"
},
{
"id": 44,
"logprob": -3.0878906,
"text": ":"
},
{
"id": 1682,
"logprob": -0.6845703,
"text": " List"
},
{
"id": 77,
"logprob": -0.3918457,
"text": "["
},
{
"id": 1808,
"logprob": -0.8798828,
"text": "float"
},
{
"id": 10794,
"logprob": -2.4980469,
"text": "]):"
}
],
"seed": null,
"tokens": [
{
"id": 284,
"logprob": -1.1533203,
"special": false,
"text": "\n "
},
{
"id": 442,
"logprob": -0.91796875,
"special": false,
"text": " return"
},
{
"id": 3632,
"logprob": -1.3291016,
"special": false,
"text": " sum"
},
{
"id": 26,
"logprob": -0.08062744,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.097717285,
"special": false,
"text": "L"
},
{
"id": 27,
"logprob": -0.29003906,
"special": false,
"text": ")"
},
{
"id": 517,
"logprob": -0.34958984,
"special": false,
"text": " /"
},
{
"id": 2069,
"logprob": -0.03829956,
"special": false,
"text": " len"
},
{
"id": 26,
"logprob": -0.0011987686,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.00050878525,
"special": false,
"text": "L"
}
]
},
"generated_text": "\n return sum(L) / len(L"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 3226,
"logprob": -9.0234375,
"text": " ge"
},
{
"id": 21017,
"logprob": -9.0859375,
"text": "ometric"
},
{
"id": 81,
"logprob": -0.25878906,
"text": "_"
},
{
"id": 6009,
"logprob": -2.2109375,
"text": "mean"
},
{
"id": 26,
"logprob": -0.30371094,
"text": "("
},
{
"id": 62,
"logprob": -5.6054688,
"text": "L"
},
{
"id": 44,
"logprob": -3.0722656,
"text": ":"
},
{
"id": 1682,
"logprob": -0.6879883,
"text": " List"
},
{
"id": 77,
"logprob": -0.38500977,
"text": "["
},
{
"id": 1808,
"logprob": -0.984375,
"text": "float"
},
{
"id": 10794,
"logprob": -2.5351562,
"text": "]):"
}
],
"seed": null,
"tokens": [
{
"id": 284,
"logprob": -1.1738281,
"special": false,
"text": "\n "
},
{
"id": 442,
"logprob": -0.9584961,
"special": false,
"text": " return"
},
{
"id": 3632,
"logprob": -1.4169922,
"special": false,
"text": " sum"
},
{
"id": 26,
"logprob": -0.085876465,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.0982666,
"special": false,
"text": "L"
},
{
"id": 27,
"logprob": -0.3022461,
"special": false,
"text": ")"
},
{
"id": 517,
"logprob": -0.40504883,
"special": false,
"text": " /"
},
{
"id": 2069,
"logprob": -0.041656494,
"special": false,
"text": " len"
},
{
"id": 26,
"logprob": -0.0011844635,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.0005264282,
"special": false,
"text": "L"
}
]
},
"generated_text": "\n return sum(L) / len(L"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 3226,
"logprob": -9.0234375,
"text": " ge"
},
{
"id": 21017,
"logprob": -9.0859375,
"text": "ometric"
},
{
"id": 81,
"logprob": -0.25927734,
"text": "_"
},
{
"id": 6009,
"logprob": -2.25,
"text": "mean"
},
{
"id": 26,
"logprob": -0.30126953,
"text": "("
},
{
"id": 62,
"logprob": -5.7539062,
"text": "L"
},
{
"id": 44,
"logprob": -3.0878906,
"text": ":"
},
{
"id": 1682,
"logprob": -0.6845703,
"text": " List"
},
{
"id": 77,
"logprob": -0.3918457,
"text": "["
},
{
"id": 1808,
"logprob": -0.8798828,
"text": "float"
},
{
"id": 10794,
"logprob": -2.4980469,
"text": "]):"
}
],
"seed": null,
"tokens": [
{
"id": 284,
"logprob": -1.1533203,
"special": false,
"text": "\n "
},
{
"id": 442,
"logprob": -0.9165039,
"special": false,
"text": " return"
},
{
"id": 3632,
"logprob": -1.328125,
"special": false,
"text": " sum"
},
{
"id": 26,
"logprob": -0.07946777,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.09820557,
"special": false,
"text": "L"
},
{
"id": 27,
"logprob": -0.28930664,
"special": false,
"text": ")"
},
{
"id": 517,
"logprob": -0.34592773,
"special": false,
"text": " /"
},
{
"id": 2069,
"logprob": -0.038330078,
"special": false,
"text": " len"
},
{
"id": 26,
"logprob": -0.0011940002,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.00050878525,
"special": false,
"text": "L"
}
]
},
"generated_text": "\n return sum(L) / len(L"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 3226,
"logprob": -9.0234375,
"text": " ge"
},
{
"id": 21017,
"logprob": -9.0859375,
"text": "ometric"
},
{
"id": 81,
"logprob": -0.25927734,
"text": "_"
},
{
"id": 6009,
"logprob": -2.25,
"text": "mean"
},
{
"id": 26,
"logprob": -0.30126953,
"text": "("
},
{
"id": 62,
"logprob": -5.7539062,
"text": "L"
},
{
"id": 44,
"logprob": -3.0878906,
"text": ":"
},
{
"id": 1682,
"logprob": -0.6845703,
"text": " List"
},
{
"id": 77,
"logprob": -0.3918457,
"text": "["
},
{
"id": 1808,
"logprob": -0.8798828,
"text": "float"
},
{
"id": 10794,
"logprob": -2.4980469,
"text": "]):"
}
],
"seed": null,
"tokens": [
{
"id": 284,
"logprob": -1.1533203,
"special": false,
"text": "\n "
},
{
"id": 442,
"logprob": -0.91259766,
"special": false,
"text": " return"
},
{
"id": 3632,
"logprob": -1.3251953,
"special": false,
"text": " sum"
},
{
"id": 26,
"logprob": -0.08062744,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.09906006,
"special": false,
"text": "L"
},
{
"id": 27,
"logprob": -0.28979492,
"special": false,
"text": ")"
},
{
"id": 517,
"logprob": -0.35958984,
"special": false,
"text": " /"
},
{
"id": 2069,
"logprob": -0.038604736,
"special": false,
"text": " len"
},
{
"id": 26,
"logprob": -0.0011901855,
"special": false,
"text": "("
},
{
"id": 62,
"logprob": -0.0005078316,
"special": false,
"text": "L"
}
]
},
"generated_text": "\n return sum(L) / len(L"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json | {
"generated_text": "\n return sum(L) / len(L)\n\n\ndef geometric_mean(L",
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 20,
"seed": null,
"prefill": [
{
"id": 589,
"text": "def",
"logprob": null
},
{
"id": 3226,
"text": " ge",
"logprob": -9.0234375
},
{
"id": 21017,
"text": "ometric",
"logprob": -9.0859375
},
{
"id": 81,
"text": "_",
"logprob": -0.25878906
},
{
"id": 6009,
"text": "mean",
"logprob": -2.2109375
},
{
"id": 26,
"text": "(",
"logprob": -0.30371094
},
{
"id": 62,
"text": "L",
"logprob": -5.6054688
},
{
"id": 44,
"text": ":",
"logprob": -3.0722656
},
{
"id": 1682,
"text": " List",
"logprob": -0.6879883
},
{
"id": 77,
"text": "[",
"logprob": -0.38500977
},
{
"id": 1808,
"text": "float",
"logprob": -0.984375
},
{
"id": 10794,
"text": "]):",
"logprob": -2.5351562
}
],
"tokens": [
{
"id": 284,
"text": "\n ",
"logprob": -1.1738281,
"special": false
},
{
"id": 442,
"text": " return",
"logprob": -0.95947266,
"special": false
},
{
"id": 3632,
"text": " sum",
"logprob": -1.4199219,
"special": false
},
{
"id": 26,
"text": "(",
"logprob": -0.085876465,
"special": false
},
{
"id": 62,
"text": "L",
"logprob": -0.09875488,
"special": false
},
{
"id": 27,
"text": ")",
"logprob": -0.30517578,
"special": false
},
{
"id": 517,
"text": " /",
"logprob": -0.42089844,
"special": false
},
{
"id": 2069,
"text": " len",
"logprob": -0.042053223,
"special": false
},
{
"id": 26,
"text": "(",
"logprob": -0.0011806488,
"special": false
},
{
"id": 62,
"text": "L",
"logprob": -0.0005259514,
"special": false
},
{
"id": 27,
"text": ")",
"logprob": -0.0017633438,
"special": false
},
{
"id": 478,
"text": "\n\n",
"logprob": -0.69189453,
"special": false
},
{
"id": 203,
"text": "\n",
"logprob": -0.041870117,
"special": false
},
{
"id": 589,
"text": "def",
"logprob": -0.27856445,
"special": false
},
{
"id": 3226,
"text": " ge",
"logprob": -1.7255859,
"special": false
},
{
"id": 21017,
"text": "ometric",
"logprob": -0.011291504,
"special": false
},
{
"id": 81,
"text": "_",
"logprob": -0.008430481,
"special": false
},
{
"id": 6009,
"text": "mean",
"logprob": -0.025787354,
"special": false
},
{
"id": 26,
"text": "(",
"logprob": -0.073913574,
"special": false
},
{
"id": 62,
"text": "L",
"logprob": -0.09967041,
"special": false
}
]
}
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_default_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 20,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 3226,
"logprob": -9.0234375,
"text": " ge"
},
{
"id": 21017,
"logprob": -9.09375,
"text": "ometric"
},
{
"id": 81,
"logprob": -0.25976562,
"text": "_"
},
{
"id": 6009,
"logprob": -2.2148438,
"text": "mean"
},
{
"id": 26,
"logprob": -0.3010254,
"text": "("
},
{
"id": 62,
"logprob": -5.6757812,
"text": "L"
},
{
"id": 44,
"logprob": -3.0898438,
"text": ":"
},
{
"id": 1682,
"logprob": -0.6791992,
"text": " List"
},
{
"id": 77,
"logprob": -0.38891602,
"text": "["
},
{
"id": 1808,
"logprob": -0.92041016,
"text": "float"
},
{
"id": 10794,
"logprob": -2.5390625,
"text": "]):"
}
],
"seed": 0,
"tokens": [
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 442,
"logprob": 0.0,
"special": false,
"text": " return"
},
{
"id": 11665,
"logprob": -1.6005859,
"special": false,
"text": " reduce"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 5962,
"logprob": 0.0,
"special": false,
"text": "lambda"
},
{
"id": 816,
"logprob": 0.0,
"special": false,
"text": " x"
},
{
"id": 30,
"logprob": 0.0,
"special": false,
"text": ","
},
{
"id": 533,
"logprob": 0.0,
"special": false,
"text": " y"
},
{
"id": 44,
"logprob": 0.0,
"special": false,
"text": ":"
},
{
"id": 816,
"logprob": 0.0,
"special": false,
"text": " x"
},
{
"id": 319,
"logprob": 0.0,
"special": false,
"text": " *"
},
{
"id": 533,
"logprob": 0.0,
"special": false,
"text": " y"
},
{
"id": 30,
"logprob": 0.0,
"special": false,
"text": ","
},
{
"id": 498,
"logprob": 0.0,
"special": false,
"text": " L"
},
{
"id": 27,
"logprob": 0.0,
"special": false,
"text": ")"
},
{
"id": 203,
"logprob": -0.11968994,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": 0.0,
"special": false,
"text": "def"
},
{
"id": 3226,
"logprob": 0.0,
"special": false,
"text": " ge"
},
{
"id": 21017,
"logprob": 0.0,
"special": false,
"text": "ometric"
}
]
},
"generated_text": "\n return reduce(lambda x, y: x * y, L)\n\ndef geometric"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_all_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 338,
"logprob": -10.0078125,
"text": "is"
},
{
"id": 21784,
"logprob": -15.515625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -2.8847656,
"text": "Learning"
},
{
"id": 29973,
"logprob": -4.140625,
"text": "?"
}
],
"seed": 0,
"tokens": [
{
"id": 13,
"logprob": -1.1582031,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.23083496,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": 0.0,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": 0.0,
"special": false,
"text": " learning"
},
{
"id": 29892,
"logprob": -0.61816406,
"special": false,
"text": ","
},
{
"id": 607,
"logprob": -0.7089844,
"special": false,
"text": " which"
},
{
"id": 508,
"logprob": -1.7724609,
"special": false,
"text": " can"
},
{
"id": 367,
"logprob": 0.0,
"special": false,
"text": " be"
},
{
"id": 5545,
"logprob": 0.0,
"special": false,
"text": " considered"
},
{
"id": 408,
"logprob": -0.3869629,
"special": false,
"text": " as"
}
]
},
"generated_text": "What is Deep Learning?\nDeep learning, which can be considered as"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -10.734375,
"text": "What"
},
{
"id": 338,
"logprob": -1.5488281,
"text": "is"
},
{
"id": 21784,
"logprob": -9.2890625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.2753906,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.48046875,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.1845703,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.5727539,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": -0.00010967255,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": -0.1239624,
"special": false,
"text": " learning"
},
{
"id": 338,
"logprob": -0.04510498,
"special": false,
"text": " is"
},
{
"id": 263,
"logprob": -0.018295288,
"special": false,
"text": " a"
},
{
"id": 11306,
"logprob": -0.45922852,
"special": false,
"text": " subset"
},
{
"id": 310,
"logprob": -0.00020992756,
"special": false,
"text": " of"
},
{
"id": 4933,
"logprob": -0.0046539307,
"special": false,
"text": " machine"
},
{
"id": 6509,
"logprob": -0.00025844574,
"special": false,
"text": " learning"
}
]
},
"generated_text": "\nDeep learning is a subset of machine learning"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -10.734375,
"text": "What"
},
{
"id": 338,
"logprob": -1.5488281,
"text": "is"
},
{
"id": 21784,
"logprob": -9.2890625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.2724609,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.47729492,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.1826172,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.56689453,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": -0.000108003616,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": -0.1239624,
"special": false,
"text": " learning"
},
{
"id": 338,
"logprob": -0.044433594,
"special": false,
"text": " is"
},
{
"id": 263,
"logprob": -0.018295288,
"special": false,
"text": " a"
},
{
"id": 11306,
"logprob": -0.45922852,
"special": false,
"text": " subset"
},
{
"id": 310,
"logprob": -0.0002104044,
"special": false,
"text": " of"
},
{
"id": 4933,
"logprob": -0.004711151,
"special": false,
"text": " machine"
},
{
"id": 6509,
"logprob": -0.00025892258,
"special": false,
"text": " learning"
}
]
},
"generated_text": "\nDeep learning is a subset of machine learning"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -10.734375,
"text": "What"
},
{
"id": 338,
"logprob": -1.5488281,
"text": "is"
},
{
"id": 21784,
"logprob": -9.2890625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.2724609,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.47729492,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.1826172,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.56689453,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": -0.000108003616,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": -0.1239624,
"special": false,
"text": " learning"
},
{
"id": 338,
"logprob": -0.044433594,
"special": false,
"text": " is"
},
{
"id": 263,
"logprob": -0.018295288,
"special": false,
"text": " a"
},
{
"id": 11306,
"logprob": -0.45922852,
"special": false,
"text": " subset"
},
{
"id": 310,
"logprob": -0.0002104044,
"special": false,
"text": " of"
},
{
"id": 4933,
"logprob": -0.004711151,
"special": false,
"text": " machine"
},
{
"id": 6509,
"logprob": -0.00025892258,
"special": false,
"text": " learning"
}
]
},
"generated_text": "\nDeep learning is a subset of machine learning"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -10.734375,
"text": "What"
},
{
"id": 338,
"logprob": -1.5488281,
"text": "is"
},
{
"id": 21784,
"logprob": -9.2890625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.2724609,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.47729492,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.1826172,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.56689453,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": -0.000108003616,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": -0.1239624,
"special": false,
"text": " learning"
},
{
"id": 338,
"logprob": -0.044433594,
"special": false,
"text": " is"
},
{
"id": 263,
"logprob": -0.018295288,
"special": false,
"text": " a"
},
{
"id": 11306,
"logprob": -0.45922852,
"special": false,
"text": " subset"
},
{
"id": 310,
"logprob": -0.0002104044,
"special": false,
"text": " of"
},
{
"id": 4933,
"logprob": -0.004711151,
"special": false,
"text": " machine"
},
{
"id": 6509,
"logprob": -0.00025892258,
"special": false,
"text": " learning"
}
]
},
"generated_text": "\nDeep learning is a subset of machine learning"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_simple.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -10.734375,
"text": "What"
},
{
"id": 338,
"logprob": -1.5488281,
"text": "is"
},
{
"id": 21784,
"logprob": -9.2890625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.2753906,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.48046875,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.1845703,
"special": false,
"text": "\n"
},
{
"id": 2772,
"logprob": -0.5727539,
"special": false,
"text": "De"
},
{
"id": 1022,
"logprob": -0.000108122826,
"special": false,
"text": "ep"
},
{
"id": 6509,
"logprob": -0.1239624,
"special": false,
"text": " learning"
},
{
"id": 338,
"logprob": -0.044433594,
"special": false,
"text": " is"
},
{
"id": 263,
"logprob": -0.01852417,
"special": false,
"text": " a"
},
{
"id": 11306,
"logprob": -0.45922852,
"special": false,
"text": " subset"
},
{
"id": 310,
"logprob": -0.0002104044,
"special": false,
"text": " of"
},
{
"id": 4933,
"logprob": -0.004787445,
"special": false,
"text": " machine"
},
{
"id": 6509,
"logprob": -0.00026226044,
"special": false,
"text": " learning"
}
]
},
"generated_text": "\nDeep learning is a subset of machine learning"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.21875,
"text": " your"
},
{
"id": 12315,
"logprob": -9.9375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.1015625,
"text": " today"
},
{
"id": 32,
"logprob": -0.15319824,
"text": "?"
},
{
"id": 50279,
"logprob": -0.2614746,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8886719,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.98046875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2265625,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3479004,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.0117188,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67871094,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.421875,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7382812,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.051330566,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.1054688,
"text": " your"
},
{
"id": 12315,
"logprob": -9.953125,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.0820312,
"text": " today"
},
{
"id": 32,
"logprob": -0.15148926,
"text": "?"
},
{
"id": 50279,
"logprob": -0.27026367,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.88378906,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9819336,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2421875,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3474121,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.078125,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.69140625,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.4072266,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7041016,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.053375244,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0351562,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.21875,
"text": " your"
},
{
"id": 12315,
"logprob": -9.9375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.1015625,
"text": " today"
},
{
"id": 32,
"logprob": -0.15319824,
"text": "?"
},
{
"id": 50279,
"logprob": -0.2614746,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8886719,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.98046875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2265625,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3479004,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.0117188,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67871094,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.421875,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7382812,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.051330566,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.21875,
"text": " your"
},
{
"id": 12315,
"logprob": -9.9375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.1015625,
"text": " today"
},
{
"id": 32,
"logprob": -0.15319824,
"text": "?"
},
{
"id": 50279,
"logprob": -0.2614746,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8886719,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.98046875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2265625,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3479004,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.0117188,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67871094,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.421875,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7382812,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.051330566,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.1054688,
"text": " your"
},
{
"id": 12315,
"logprob": -9.953125,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.0820312,
"text": " today"
},
{
"id": 32,
"logprob": -0.15148926,
"text": "?"
},
{
"id": 50279,
"logprob": -0.27026367,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.88378906,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.94921875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2402344,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3725586,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.078125,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67822266,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.3837891,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7050781,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.052001953,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.1992188,
"text": "'s"
},
{
"id": 634,
"logprob": -5.125,
"text": " your"
},
{
"id": 12315,
"logprob": -9.8984375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.0976562,
"text": " today"
},
{
"id": 32,
"logprob": -0.14562988,
"text": "?"
},
{
"id": 50279,
"logprob": -0.26733398,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.86279297,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.94921875,
"special": false,
"text": "'m"
},
{
"id": 7016,
"logprob": -2.1835938,
"special": false,
"text": " sorry"
},
{
"id": 13,
"logprob": -0.074035645,
"special": false,
"text": ","
},
{
"id": 1394,
"logprob": -0.86376953,
"special": false,
"text": "You"
},
{
"id": 452,
"logprob": -1.2070312,
"special": false,
"text": " have"
},
{
"id": 247,
"logprob": -1.4365234,
"special": false,
"text": " a"
},
{
"id": 4327,
"logprob": -1.109375,
"special": false,
"text": " choice"
},
{
"id": 273,
"logprob": -0.93408203,
"special": false,
"text": " of"
},
{
"id": 752,
"logprob": -1.8808594,
"special": false,
"text": " what"
}
]
},
"generated_text": "I'm sorry,You have a choice of what"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox/test_neox_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.1953125,
"text": "'s"
},
{
"id": 634,
"logprob": -5.125,
"text": " your"
},
{
"id": 12315,
"logprob": -9.8828125,
"text": " mood"
},
{
"id": 3063,
"logprob": -3.9980469,
"text": " today"
},
{
"id": 32,
"logprob": -0.14672852,
"text": "?"
},
{
"id": 50279,
"logprob": -0.26489258,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8618164,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9506836,
"special": false,
"text": "'m"
},
{
"id": 7016,
"logprob": -2.1738281,
"special": false,
"text": " sorry"
},
{
"id": 13,
"logprob": -0.0758667,
"special": false,
"text": ","
},
{
"id": 1394,
"logprob": -0.9135742,
"special": false,
"text": "You"
},
{
"id": 452,
"logprob": -1.1445312,
"special": false,
"text": " have"
},
{
"id": 247,
"logprob": -1.4375,
"special": false,
"text": " a"
},
{
"id": 4327,
"logprob": -1.1103516,
"special": false,
"text": " choice"
},
{
"id": 273,
"logprob": -1.0058594,
"special": false,
"text": " of"
},
{
"id": 752,
"logprob": -1.921875,
"special": false,
"text": " what"
}
]
},
"generated_text": "I'm sorry,You have a choice of what"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.1953125,
"text": "'s"
},
{
"id": 634,
"logprob": -5.125,
"text": " your"
},
{
"id": 12315,
"logprob": -9.8828125,
"text": " mood"
},
{
"id": 3063,
"logprob": -3.9980469,
"text": " today"
},
{
"id": 32,
"logprob": -0.14672852,
"text": "?"
},
{
"id": 50279,
"logprob": -0.26489258,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8618164,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9506836,
"special": false,
"text": "'m"
},
{
"id": 7016,
"logprob": -2.1738281,
"special": false,
"text": " sorry"
},
{
"id": 13,
"logprob": -0.0758667,
"special": false,
"text": ","
},
{
"id": 1394,
"logprob": -0.9135742,
"special": false,
"text": "You"
},
{
"id": 452,
"logprob": -1.1445312,
"special": false,
"text": " have"
},
{
"id": 247,
"logprob": -1.4375,
"special": false,
"text": " a"
},
{
"id": 4327,
"logprob": -1.1103516,
"special": false,
"text": " choice"
},
{
"id": 273,
"logprob": -1.0058594,
"special": false,
"text": " of"
},
{
"id": 752,
"logprob": -1.921875,
"special": false,
"text": " what"
}
]
},
"generated_text": "I'm sorry,You have a choice of what"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.1953125,
"text": "'s"
},
{
"id": 634,
"logprob": -5.125,
"text": " your"
},
{
"id": 12315,
"logprob": -9.8828125,
"text": " mood"
},
{
"id": 3063,
"logprob": -3.9980469,
"text": " today"
},
{
"id": 32,
"logprob": -0.14672852,
"text": "?"
},
{
"id": 50279,
"logprob": -0.26489258,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8618164,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9506836,
"special": false,
"text": "'m"
},
{
"id": 7016,
"logprob": -2.1738281,
"special": false,
"text": " sorry"
},
{
"id": 13,
"logprob": -0.0758667,
"special": false,
"text": ","
},
{
"id": 1394,
"logprob": -0.9135742,
"special": false,
"text": "You"
},
{
"id": 452,
"logprob": -1.1445312,
"special": false,
"text": " have"
},
{
"id": 247,
"logprob": -1.4375,
"special": false,
"text": " a"
},
{
"id": 4327,
"logprob": -1.1103516,
"special": false,
"text": " choice"
},
{
"id": 273,
"logprob": -1.0058594,
"special": false,
"text": " of"
},
{
"id": 752,
"logprob": -1.921875,
"special": false,
"text": " what"
}
]
},
"generated_text": "I'm sorry,You have a choice of what"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.1953125,
"text": "'s"
},
{
"id": 634,
"logprob": -5.125,
"text": " your"
},
{
"id": 12315,
"logprob": -9.8828125,
"text": " mood"
},
{
"id": 3063,
"logprob": -3.9980469,
"text": " today"
},
{
"id": 32,
"logprob": -0.14672852,
"text": "?"
},
{
"id": 50279,
"logprob": -0.26489258,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8618164,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9506836,
"special": false,
"text": "'m"
},
{
"id": 7016,
"logprob": -2.1738281,
"special": false,
"text": " sorry"
},
{
"id": 13,
"logprob": -0.0758667,
"special": false,
"text": ","
},
{
"id": 1394,
"logprob": -0.9135742,
"special": false,
"text": "You"
},
{
"id": 452,
"logprob": -1.1445312,
"special": false,
"text": " have"
},
{
"id": 247,
"logprob": -1.4375,
"special": false,
"text": " a"
},
{
"id": 4327,
"logprob": -1.1103516,
"special": false,
"text": " choice"
},
{
"id": 273,
"logprob": -1.0058594,
"special": false,
"text": " of"
},
{
"id": 752,
"logprob": -1.921875,
"special": false,
"text": " what"
}
]
},
"generated_text": "I'm sorry,You have a choice of what"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50,
"logprob": null,
"text": "G"
},
{
"id": 330,
"logprob": -5.96875,
"text": "ir"
},
{
"id": 1622,
"logprob": -5.6171875,
"text": "af"
},
{
"id": 249,
"logprob": -6.5039062,
"text": "at"
},
{
"id": 1480,
"logprob": -8.0703125,
"text": "ron"
},
{
"id": 304,
"logprob": -2.328125,
"text": " is"
},
{
"id": 23866,
"logprob": -9.59375,
"text": " obsessed"
},
{
"id": 335,
"logprob": -0.04837036,
"text": " with"
},
{
"id": 26680,
"logprob": -3.9960938,
"text": " gir"
},
{
"id": 1903,
"logprob": -0.07525635,
"text": "aff"
},
{
"id": 255,
"logprob": -0.006790161,
"text": "es"
},
{
"id": 23,
"logprob": -1.546875,
"text": ","
},
{
"id": 248,
"logprob": -4.3320312,
"text": " the"
},
{
"id": 758,
"logprob": -3.7363281,
"text": " most"
},
{
"id": 21735,
"logprob": -5.109375,
"text": " glorious"
},
{
"id": 5985,
"logprob": -2.09375,
"text": " animal"
},
{
"id": 313,
"logprob": -1.1845703,
"text": " on"
},
{
"id": 248,
"logprob": -0.77734375,
"text": " the"
},
{
"id": 1936,
"logprob": -2.3828125,
"text": " face"
},
{
"id": 275,
"logprob": -0.0044403076,
"text": " of"
},
{
"id": 414,
"logprob": -1.9667969,
"text": " this"
},
{
"id": 6490,
"logprob": -2.0449219,
"text": " Earth"
},
{
"id": 25,
"logprob": -0.28198242,
"text": "."
},
{
"id": 401,
"logprob": -7.921875,
"text": " G"
},
{
"id": 6013,
"logprob": -2.2714844,
"text": "ira"
},
{
"id": 694,
"logprob": -0.62353516,
"text": "ft"
},
{
"id": 1480,
"logprob": -0.20947266,
"text": "ron"
},
{
"id": 9369,
"logprob": -4.5507812,
"text": " believes"
},
{
"id": 455,
"logprob": -4.5625,
"text": " all"
},
{
"id": 599,
"logprob": -2.7402344,
"text": " other"
},
{
"id": 5632,
"logprob": -0.21899414,
"text": " animals"
},
{
"id": 362,
"logprob": -0.76708984,
"text": " are"
},
{
"id": 23981,
"logprob": -4.9960938,
"text": " irrelevant"
},
{
"id": 635,
"logprob": -4.234375,
"text": " when"
},
{
"id": 4354,
"logprob": -0.5131836,
"text": " compared"
},
{
"id": 271,
"logprob": -0.103515625,
"text": " to"
},
{
"id": 248,
"logprob": -0.58447266,
"text": " the"
},
{
"id": 21735,
"logprob": -3.6796875,
"text": " glorious"
},
{
"id": 64398,
"logprob": -1.8222656,
"text": " majesty"
},
{
"id": 275,
"logprob": -0.23583984,
"text": " of"
},
{
"id": 248,
"logprob": -0.3544922,
"text": " the"
},
{
"id": 26680,
"logprob": -0.24609375,
"text": " gir"
},
{
"id": 23226,
"logprob": -0.02960205,
"text": "affe"
},
{
"id": 25,
"logprob": -0.17358398,
"text": "."
},
{
"id": 193,
"logprob": -1.3925781,
"text": "\n"
},
{
"id": 23626,
"logprob": -10.0625,
"text": "Daniel"
},
{
"id": 37,
"logprob": -4.5898438,
"text": ":"
},
{
"id": 23090,
"logprob": -6.9375,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.99365234,
"text": ","
},
{
"id": 29033,
"logprob": -2.2304688,
"text": " Gir"
},
{
"id": 1622,
"logprob": -0.107788086,
"text": "af"
},
{
"id": 249,
"logprob": -0.04257202,
"text": "at"
},
{
"id": 1480,
"logprob": -0.0024871826,
"text": "ron"
},
{
"id": 12,
"logprob": -1.4277344,
"text": "!"
},
{
"id": 193,
"logprob": -1.1005859,
"text": "\n"
},
{
"id": 50,
"logprob": -0.056915283,
"text": "G"
},
{
"id": 330,
"logprob": -0.1315918,
"text": "ir"
},
{
"id": 1622,
"logprob": -0.0071105957,
"text": "af"
},
{
"id": 249,
"logprob": -0.008453369,
"text": "at"
},
{
"id": 1480,
"logprob": -0.0006928444,
"text": "ron"
},
{
"id": 37,
"logprob": -0.0074920654,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 23090,
"logprob": -1.828125,
"special": false,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.3178711,
"special": false,
"text": ","
},
{
"id": 8156,
"logprob": -0.23925781,
"special": false,
"text": " Daniel"
},
{
"id": 12,
"logprob": -0.5698242,
"special": false,
"text": "!"
},
{
"id": 193,
"logprob": -0.61279297,
"special": false,
"text": "\n"
},
{
"id": 23626,
"logprob": -0.4177246,
"special": false,
"text": "Daniel"
},
{
"id": 37,
"logprob": -0.0023345947,
"special": false,
"text": ":"
},
{
"id": 1634,
"logprob": -2.0605469,
"special": false,
"text": " What"
},
{
"id": 18,
"logprob": -1.5283203,
"special": false,
"text": "'"
},
{
"id": 94,
"logprob": -0.007965088,
"special": false,
"text": "s"
}
]
},
"generated_text": " Hello, Daniel!\nDaniel: What's"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50,
"logprob": null,
"text": "G"
},
{
"id": 330,
"logprob": -5.96875,
"text": "ir"
},
{
"id": 1622,
"logprob": -5.6171875,
"text": "af"
},
{
"id": 249,
"logprob": -6.5,
"text": "at"
},
{
"id": 1480,
"logprob": -8.0703125,
"text": "ron"
},
{
"id": 304,
"logprob": -2.328125,
"text": " is"
},
{
"id": 23866,
"logprob": -9.59375,
"text": " obsessed"
},
{
"id": 335,
"logprob": -0.048339844,
"text": " with"
},
{
"id": 26680,
"logprob": -4.0,
"text": " gir"
},
{
"id": 1903,
"logprob": -0.07531738,
"text": "aff"
},
{
"id": 255,
"logprob": -0.006793976,
"text": "es"
},
{
"id": 23,
"logprob": -1.5478516,
"text": ","
},
{
"id": 248,
"logprob": -4.3320312,
"text": " the"
},
{
"id": 758,
"logprob": -3.7363281,
"text": " most"
},
{
"id": 21735,
"logprob": -5.1132812,
"text": " glorious"
},
{
"id": 5985,
"logprob": -2.0957031,
"text": " animal"
},
{
"id": 313,
"logprob": -1.1835938,
"text": " on"
},
{
"id": 248,
"logprob": -0.77685547,
"text": " the"
},
{
"id": 1936,
"logprob": -2.3808594,
"text": " face"
},
{
"id": 275,
"logprob": -0.004436493,
"text": " of"
},
{
"id": 414,
"logprob": -1.9638672,
"text": " this"
},
{
"id": 6490,
"logprob": -2.0449219,
"text": " Earth"
},
{
"id": 25,
"logprob": -0.28198242,
"text": "."
},
{
"id": 401,
"logprob": -7.9179688,
"text": " G"
},
{
"id": 6013,
"logprob": -2.2734375,
"text": "ira"
},
{
"id": 694,
"logprob": -0.6230469,
"text": "ft"
},
{
"id": 1480,
"logprob": -0.20947266,
"text": "ron"
},
{
"id": 9369,
"logprob": -4.5546875,
"text": " believes"
},
{
"id": 455,
"logprob": -4.5703125,
"text": " all"
},
{
"id": 599,
"logprob": -2.7382812,
"text": " other"
},
{
"id": 5632,
"logprob": -0.21948242,
"text": " animals"
},
{
"id": 362,
"logprob": -0.7661133,
"text": " are"
},
{
"id": 23981,
"logprob": -4.9960938,
"text": " irrelevant"
},
{
"id": 635,
"logprob": -4.234375,
"text": " when"
},
{
"id": 4354,
"logprob": -0.5131836,
"text": " compared"
},
{
"id": 271,
"logprob": -0.10357666,
"text": " to"
},
{
"id": 248,
"logprob": -0.58447266,
"text": " the"
},
{
"id": 21735,
"logprob": -3.6816406,
"text": " glorious"
},
{
"id": 64398,
"logprob": -1.8203125,
"text": " majesty"
},
{
"id": 275,
"logprob": -0.23583984,
"text": " of"
},
{
"id": 248,
"logprob": -0.35473633,
"text": " the"
},
{
"id": 26680,
"logprob": -0.24572754,
"text": " gir"
},
{
"id": 23226,
"logprob": -0.029586792,
"text": "affe"
},
{
"id": 25,
"logprob": -0.17346191,
"text": "."
},
{
"id": 193,
"logprob": -1.3945312,
"text": "\n"
},
{
"id": 23626,
"logprob": -10.0625,
"text": "Daniel"
},
{
"id": 37,
"logprob": -4.59375,
"text": ":"
},
{
"id": 23090,
"logprob": -6.9375,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.99316406,
"text": ","
},
{
"id": 29033,
"logprob": -2.2324219,
"text": " Gir"
},
{
"id": 1622,
"logprob": -0.10797119,
"text": "af"
},
{
"id": 249,
"logprob": -0.04248047,
"text": "at"
},
{
"id": 1480,
"logprob": -0.0024814606,
"text": "ron"
},
{
"id": 12,
"logprob": -1.4277344,
"text": "!"
},
{
"id": 193,
"logprob": -1.1005859,
"text": "\n"
},
{
"id": 50,
"logprob": -0.056884766,
"text": "G"
},
{
"id": 330,
"logprob": -0.1315918,
"text": "ir"
},
{
"id": 1622,
"logprob": -0.007095337,
"text": "af"
},
{
"id": 249,
"logprob": -0.00844574,
"text": "at"
},
{
"id": 1480,
"logprob": -0.00068998337,
"text": "ron"
},
{
"id": 37,
"logprob": -0.0074768066,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 23090,
"logprob": -1.8251953,
"special": false,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.31762695,
"special": false,
"text": ","
},
{
"id": 8156,
"logprob": -0.2388916,
"special": false,
"text": " Daniel"
},
{
"id": 12,
"logprob": -0.5698242,
"special": false,
"text": "!"
},
{
"id": 193,
"logprob": -0.6152344,
"special": false,
"text": "\n"
},
{
"id": 23626,
"logprob": -0.42211914,
"special": false,
"text": "Daniel"
},
{
"id": 37,
"logprob": -0.002336502,
"special": false,
"text": ":"
},
{
"id": 1634,
"logprob": -2.0605469,
"special": false,
"text": " What"
},
{
"id": 18,
"logprob": -1.5292969,
"special": false,
"text": "'"
},
{
"id": 94,
"logprob": -0.007926941,
"special": false,
"text": "s"
}
]
},
"generated_text": " Hello, Daniel!\nDaniel: What's"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50,
"logprob": null,
"text": "G"
},
{
"id": 330,
"logprob": -5.96875,
"text": "ir"
},
{
"id": 1622,
"logprob": -5.6171875,
"text": "af"
},
{
"id": 249,
"logprob": -6.5,
"text": "at"
},
{
"id": 1480,
"logprob": -8.0703125,
"text": "ron"
},
{
"id": 304,
"logprob": -2.328125,
"text": " is"
},
{
"id": 23866,
"logprob": -9.59375,
"text": " obsessed"
},
{
"id": 335,
"logprob": -0.048339844,
"text": " with"
},
{
"id": 26680,
"logprob": -4.0,
"text": " gir"
},
{
"id": 1903,
"logprob": -0.07531738,
"text": "aff"
},
{
"id": 255,
"logprob": -0.006793976,
"text": "es"
},
{
"id": 23,
"logprob": -1.5478516,
"text": ","
},
{
"id": 248,
"logprob": -4.3320312,
"text": " the"
},
{
"id": 758,
"logprob": -3.7363281,
"text": " most"
},
{
"id": 21735,
"logprob": -5.1132812,
"text": " glorious"
},
{
"id": 5985,
"logprob": -2.0957031,
"text": " animal"
},
{
"id": 313,
"logprob": -1.1835938,
"text": " on"
},
{
"id": 248,
"logprob": -0.77685547,
"text": " the"
},
{
"id": 1936,
"logprob": -2.3808594,
"text": " face"
},
{
"id": 275,
"logprob": -0.004436493,
"text": " of"
},
{
"id": 414,
"logprob": -1.9638672,
"text": " this"
},
{
"id": 6490,
"logprob": -2.0449219,
"text": " Earth"
},
{
"id": 25,
"logprob": -0.28198242,
"text": "."
},
{
"id": 401,
"logprob": -7.9179688,
"text": " G"
},
{
"id": 6013,
"logprob": -2.2734375,
"text": "ira"
},
{
"id": 694,
"logprob": -0.6230469,
"text": "ft"
},
{
"id": 1480,
"logprob": -0.20947266,
"text": "ron"
},
{
"id": 9369,
"logprob": -4.5546875,
"text": " believes"
},
{
"id": 455,
"logprob": -4.5703125,
"text": " all"
},
{
"id": 599,
"logprob": -2.7382812,
"text": " other"
},
{
"id": 5632,
"logprob": -0.21948242,
"text": " animals"
},
{
"id": 362,
"logprob": -0.7661133,
"text": " are"
},
{
"id": 23981,
"logprob": -4.9960938,
"text": " irrelevant"
},
{
"id": 635,
"logprob": -4.234375,
"text": " when"
},
{
"id": 4354,
"logprob": -0.5131836,
"text": " compared"
},
{
"id": 271,
"logprob": -0.10357666,
"text": " to"
},
{
"id": 248,
"logprob": -0.58447266,
"text": " the"
},
{
"id": 21735,
"logprob": -3.6816406,
"text": " glorious"
},
{
"id": 64398,
"logprob": -1.8203125,
"text": " majesty"
},
{
"id": 275,
"logprob": -0.23583984,
"text": " of"
},
{
"id": 248,
"logprob": -0.35473633,
"text": " the"
},
{
"id": 26680,
"logprob": -0.24572754,
"text": " gir"
},
{
"id": 23226,
"logprob": -0.029586792,
"text": "affe"
},
{
"id": 25,
"logprob": -0.17346191,
"text": "."
},
{
"id": 193,
"logprob": -1.3945312,
"text": "\n"
},
{
"id": 23626,
"logprob": -10.0625,
"text": "Daniel"
},
{
"id": 37,
"logprob": -4.59375,
"text": ":"
},
{
"id": 23090,
"logprob": -6.9375,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.99316406,
"text": ","
},
{
"id": 29033,
"logprob": -2.2324219,
"text": " Gir"
},
{
"id": 1622,
"logprob": -0.10797119,
"text": "af"
},
{
"id": 249,
"logprob": -0.04248047,
"text": "at"
},
{
"id": 1480,
"logprob": -0.0024814606,
"text": "ron"
},
{
"id": 12,
"logprob": -1.4277344,
"text": "!"
},
{
"id": 193,
"logprob": -1.1005859,
"text": "\n"
},
{
"id": 50,
"logprob": -0.056884766,
"text": "G"
},
{
"id": 330,
"logprob": -0.1315918,
"text": "ir"
},
{
"id": 1622,
"logprob": -0.007095337,
"text": "af"
},
{
"id": 249,
"logprob": -0.00844574,
"text": "at"
},
{
"id": 1480,
"logprob": -0.00068998337,
"text": "ron"
},
{
"id": 37,
"logprob": -0.0074768066,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 23090,
"logprob": -1.8251953,
"special": false,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.31762695,
"special": false,
"text": ","
},
{
"id": 8156,
"logprob": -0.2388916,
"special": false,
"text": " Daniel"
},
{
"id": 12,
"logprob": -0.5698242,
"special": false,
"text": "!"
},
{
"id": 193,
"logprob": -0.6152344,
"special": false,
"text": "\n"
},
{
"id": 23626,
"logprob": -0.42211914,
"special": false,
"text": "Daniel"
},
{
"id": 37,
"logprob": -0.002336502,
"special": false,
"text": ":"
},
{
"id": 1634,
"logprob": -2.0605469,
"special": false,
"text": " What"
},
{
"id": 18,
"logprob": -1.5292969,
"special": false,
"text": "'"
},
{
"id": 94,
"logprob": -0.007926941,
"special": false,
"text": "s"
}
]
},
"generated_text": " Hello, Daniel!\nDaniel: What's"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50,
"logprob": null,
"text": "G"
},
{
"id": 330,
"logprob": -5.96875,
"text": "ir"
},
{
"id": 1622,
"logprob": -5.6171875,
"text": "af"
},
{
"id": 249,
"logprob": -6.5,
"text": "at"
},
{
"id": 1480,
"logprob": -8.0703125,
"text": "ron"
},
{
"id": 304,
"logprob": -2.328125,
"text": " is"
},
{
"id": 23866,
"logprob": -9.59375,
"text": " obsessed"
},
{
"id": 335,
"logprob": -0.048339844,
"text": " with"
},
{
"id": 26680,
"logprob": -4.0,
"text": " gir"
},
{
"id": 1903,
"logprob": -0.07531738,
"text": "aff"
},
{
"id": 255,
"logprob": -0.006793976,
"text": "es"
},
{
"id": 23,
"logprob": -1.5478516,
"text": ","
},
{
"id": 248,
"logprob": -4.3320312,
"text": " the"
},
{
"id": 758,
"logprob": -3.7363281,
"text": " most"
},
{
"id": 21735,
"logprob": -5.1132812,
"text": " glorious"
},
{
"id": 5985,
"logprob": -2.0957031,
"text": " animal"
},
{
"id": 313,
"logprob": -1.1835938,
"text": " on"
},
{
"id": 248,
"logprob": -0.77685547,
"text": " the"
},
{
"id": 1936,
"logprob": -2.3808594,
"text": " face"
},
{
"id": 275,
"logprob": -0.004436493,
"text": " of"
},
{
"id": 414,
"logprob": -1.9638672,
"text": " this"
},
{
"id": 6490,
"logprob": -2.0449219,
"text": " Earth"
},
{
"id": 25,
"logprob": -0.28198242,
"text": "."
},
{
"id": 401,
"logprob": -7.9179688,
"text": " G"
},
{
"id": 6013,
"logprob": -2.2734375,
"text": "ira"
},
{
"id": 694,
"logprob": -0.6230469,
"text": "ft"
},
{
"id": 1480,
"logprob": -0.20947266,
"text": "ron"
},
{
"id": 9369,
"logprob": -4.5546875,
"text": " believes"
},
{
"id": 455,
"logprob": -4.5703125,
"text": " all"
},
{
"id": 599,
"logprob": -2.7382812,
"text": " other"
},
{
"id": 5632,
"logprob": -0.21948242,
"text": " animals"
},
{
"id": 362,
"logprob": -0.7661133,
"text": " are"
},
{
"id": 23981,
"logprob": -4.9960938,
"text": " irrelevant"
},
{
"id": 635,
"logprob": -4.234375,
"text": " when"
},
{
"id": 4354,
"logprob": -0.5131836,
"text": " compared"
},
{
"id": 271,
"logprob": -0.10357666,
"text": " to"
},
{
"id": 248,
"logprob": -0.58447266,
"text": " the"
},
{
"id": 21735,
"logprob": -3.6816406,
"text": " glorious"
},
{
"id": 64398,
"logprob": -1.8203125,
"text": " majesty"
},
{
"id": 275,
"logprob": -0.23583984,
"text": " of"
},
{
"id": 248,
"logprob": -0.35473633,
"text": " the"
},
{
"id": 26680,
"logprob": -0.24572754,
"text": " gir"
},
{
"id": 23226,
"logprob": -0.029586792,
"text": "affe"
},
{
"id": 25,
"logprob": -0.17346191,
"text": "."
},
{
"id": 193,
"logprob": -1.3945312,
"text": "\n"
},
{
"id": 23626,
"logprob": -10.0625,
"text": "Daniel"
},
{
"id": 37,
"logprob": -4.59375,
"text": ":"
},
{
"id": 23090,
"logprob": -6.9375,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.99316406,
"text": ","
},
{
"id": 29033,
"logprob": -2.2324219,
"text": " Gir"
},
{
"id": 1622,
"logprob": -0.10797119,
"text": "af"
},
{
"id": 249,
"logprob": -0.04248047,
"text": "at"
},
{
"id": 1480,
"logprob": -0.0024814606,
"text": "ron"
},
{
"id": 12,
"logprob": -1.4277344,
"text": "!"
},
{
"id": 193,
"logprob": -1.1005859,
"text": "\n"
},
{
"id": 50,
"logprob": -0.056884766,
"text": "G"
},
{
"id": 330,
"logprob": -0.1315918,
"text": "ir"
},
{
"id": 1622,
"logprob": -0.007095337,
"text": "af"
},
{
"id": 249,
"logprob": -0.00844574,
"text": "at"
},
{
"id": 1480,
"logprob": -0.00068998337,
"text": "ron"
},
{
"id": 37,
"logprob": -0.0074768066,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 23090,
"logprob": -1.8251953,
"special": false,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.31762695,
"special": false,
"text": ","
},
{
"id": 8156,
"logprob": -0.2388916,
"special": false,
"text": " Daniel"
},
{
"id": 12,
"logprob": -0.5698242,
"special": false,
"text": "!"
},
{
"id": 193,
"logprob": -0.6152344,
"special": false,
"text": "\n"
},
{
"id": 23626,
"logprob": -0.42211914,
"special": false,
"text": "Daniel"
},
{
"id": 37,
"logprob": -0.002336502,
"special": false,
"text": ":"
},
{
"id": 1634,
"logprob": -2.0605469,
"special": false,
"text": " What"
},
{
"id": 18,
"logprob": -1.5292969,
"special": false,
"text": "'"
},
{
"id": 94,
"logprob": -0.007926941,
"special": false,
"text": "s"
}
]
},
"generated_text": " Hello, Daniel!\nDaniel: What's"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50,
"logprob": null,
"text": "G"
},
{
"id": 330,
"logprob": -5.96875,
"text": "ir"
},
{
"id": 1622,
"logprob": -5.6132812,
"text": "af"
},
{
"id": 249,
"logprob": -6.5039062,
"text": "at"
},
{
"id": 1480,
"logprob": -8.078125,
"text": "ron"
},
{
"id": 304,
"logprob": -2.3261719,
"text": " is"
},
{
"id": 23866,
"logprob": -9.59375,
"text": " obsessed"
},
{
"id": 335,
"logprob": -0.048339844,
"text": " with"
},
{
"id": 26680,
"logprob": -4.0,
"text": " gir"
},
{
"id": 1903,
"logprob": -0.07556152,
"text": "aff"
},
{
"id": 255,
"logprob": -0.0067749023,
"text": "es"
},
{
"id": 23,
"logprob": -1.546875,
"text": ","
},
{
"id": 248,
"logprob": -4.3320312,
"text": " the"
},
{
"id": 758,
"logprob": -3.734375,
"text": " most"
},
{
"id": 21735,
"logprob": -5.109375,
"text": " glorious"
},
{
"id": 5985,
"logprob": -2.09375,
"text": " animal"
},
{
"id": 313,
"logprob": -1.1835938,
"text": " on"
},
{
"id": 248,
"logprob": -0.77685547,
"text": " the"
},
{
"id": 1936,
"logprob": -2.3828125,
"text": " face"
},
{
"id": 275,
"logprob": -0.004432678,
"text": " of"
},
{
"id": 414,
"logprob": -1.9677734,
"text": " this"
},
{
"id": 6490,
"logprob": -2.046875,
"text": " Earth"
},
{
"id": 25,
"logprob": -0.28198242,
"text": "."
},
{
"id": 401,
"logprob": -7.9179688,
"text": " G"
},
{
"id": 6013,
"logprob": -2.2753906,
"text": "ira"
},
{
"id": 694,
"logprob": -0.6230469,
"text": "ft"
},
{
"id": 1480,
"logprob": -0.20874023,
"text": "ron"
},
{
"id": 9369,
"logprob": -4.5507812,
"text": " believes"
},
{
"id": 455,
"logprob": -4.5664062,
"text": " all"
},
{
"id": 599,
"logprob": -2.7402344,
"text": " other"
},
{
"id": 5632,
"logprob": -0.21948242,
"text": " animals"
},
{
"id": 362,
"logprob": -0.7675781,
"text": " are"
},
{
"id": 23981,
"logprob": -5.0,
"text": " irrelevant"
},
{
"id": 635,
"logprob": -4.234375,
"text": " when"
},
{
"id": 4354,
"logprob": -0.5131836,
"text": " compared"
},
{
"id": 271,
"logprob": -0.103637695,
"text": " to"
},
{
"id": 248,
"logprob": -0.58447266,
"text": " the"
},
{
"id": 21735,
"logprob": -3.6835938,
"text": " glorious"
},
{
"id": 64398,
"logprob": -1.8173828,
"text": " majesty"
},
{
"id": 275,
"logprob": -0.23510742,
"text": " of"
},
{
"id": 248,
"logprob": -0.35473633,
"text": " the"
},
{
"id": 26680,
"logprob": -0.24633789,
"text": " gir"
},
{
"id": 23226,
"logprob": -0.02960205,
"text": "affe"
},
{
"id": 25,
"logprob": -0.17333984,
"text": "."
},
{
"id": 193,
"logprob": -1.3935547,
"text": "\n"
},
{
"id": 23626,
"logprob": -10.0625,
"text": "Daniel"
},
{
"id": 37,
"logprob": -4.59375,
"text": ":"
},
{
"id": 23090,
"logprob": -6.9375,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.99365234,
"text": ","
},
{
"id": 29033,
"logprob": -2.2324219,
"text": " Gir"
},
{
"id": 1622,
"logprob": -0.10809326,
"text": "af"
},
{
"id": 249,
"logprob": -0.042663574,
"text": "at"
},
{
"id": 1480,
"logprob": -0.0024776459,
"text": "ron"
},
{
"id": 12,
"logprob": -1.4277344,
"text": "!"
},
{
"id": 193,
"logprob": -1.1015625,
"text": "\n"
},
{
"id": 50,
"logprob": -0.05709839,
"text": "G"
},
{
"id": 330,
"logprob": -0.13208008,
"text": "ir"
},
{
"id": 1622,
"logprob": -0.0071487427,
"text": "af"
},
{
"id": 249,
"logprob": -0.008468628,
"text": "at"
},
{
"id": 1480,
"logprob": -0.00068998337,
"text": "ron"
},
{
"id": 37,
"logprob": -0.0074691772,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 23090,
"logprob": -1.8251953,
"special": false,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.3173828,
"special": false,
"text": ","
},
{
"id": 8156,
"logprob": -0.23803711,
"special": false,
"text": " Daniel"
},
{
"id": 12,
"logprob": -0.56933594,
"special": false,
"text": "!"
},
{
"id": 193,
"logprob": -0.61279297,
"special": false,
"text": "\n"
},
{
"id": 23626,
"logprob": -0.41967773,
"special": false,
"text": "Daniel"
},
{
"id": 37,
"logprob": -0.0023403168,
"special": false,
"text": ":"
},
{
"id": 1634,
"logprob": -2.0605469,
"special": false,
"text": " What"
},
{
"id": 18,
"logprob": -1.5292969,
"special": false,
"text": "'"
},
{
"id": 94,
"logprob": -0.007904053,
"special": false,
"text": "s"
}
]
},
"generated_text": " Hello, Daniel!\nDaniel: What's"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_all_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 330,
"logprob": null,
"text": "ir"
},
{
"id": 1622,
"logprob": -7.8125,
"text": "af"
},
{
"id": 249,
"logprob": -4.5,
"text": "at"
},
{
"id": 1480,
"logprob": -10.875,
"text": "ron"
},
{
"id": 37,
"logprob": -3.6875,
"text": ":"
}
],
"seed": 0,
"tokens": [
{
"id": 836,
"logprob": -1.265625,
"special": false,
"text": " i"
},
{
"id": 18,
"logprob": -0.119628906,
"special": false,
"text": "'"
},
{
"id": 298,
"logprob": -2.265625,
"special": false,
"text": "ve"
},
{
"id": 650,
"logprob": -0.49804688,
"special": false,
"text": " been"
},
{
"id": 1241,
"logprob": 0.0,
"special": false,
"text": " using"
},
{
"id": 334,
"logprob": 0.0,
"special": false,
"text": " it"
},
{
"id": 312,
"logprob": -1.2421875,
"special": false,
"text": " for"
},
{
"id": 909,
"logprob": -0.99609375,
"special": false,
"text": " years"
},
{
"id": 193,
"logprob": -0.30273438,
"special": false,
"text": "\n"
},
{
"id": 807,
"logprob": -1.078125,
"special": false,
"text": "ik"
}
]
},
"generated_text": "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron: i've been using it for years\nik"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 1459,
"logprob": -5.6289062,
"text": " print"
},
{
"id": 81,
"logprob": -1.6005859,
"text": "_"
},
{
"id": 7656,
"logprob": -5.9921875,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2262,
"logprob": -0.7705078,
"special": false,
"text": "():"
},
{
"id": 284,
"logprob": -0.2602539,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": -0.39282227,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": -0.6113281,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": -0.4765625,
"special": false,
"text": "Hello"
},
{
"id": 10896,
"logprob": -1.5068359,
"special": false,
"text": " World"
},
{
"id": 657,
"logprob": -0.8154297,
"special": false,
"text": "\")"
},
{
"id": 203,
"logprob": -0.7319336,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": -0.35229492,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": -1.0380859,
"special": false,
"text": "def"
}
]
},
"generated_text": "():\n print(\"Hello World\")\n\ndef"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 1459,
"logprob": -5.6289062,
"text": " print"
},
{
"id": 81,
"logprob": -1.6005859,
"text": "_"
},
{
"id": 7656,
"logprob": -5.9921875,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2262,
"logprob": -0.7705078,
"special": false,
"text": "():"
},
{
"id": 284,
"logprob": -0.2602539,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": -0.39282227,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": -0.6113281,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": -0.4765625,
"special": false,
"text": "Hello"
},
{
"id": 10896,
"logprob": -1.5068359,
"special": false,
"text": " World"
},
{
"id": 657,
"logprob": -0.8154297,
"special": false,
"text": "\")"
},
{
"id": 203,
"logprob": -0.7319336,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": -0.35229492,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": -1.0380859,
"special": false,
"text": "def"
}
]
},
"generated_text": "():\n print(\"Hello World\")\n\ndef"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 1459,
"logprob": -5.6289062,
"text": " print"
},
{
"id": 81,
"logprob": -1.6005859,
"text": "_"
},
{
"id": 7656,
"logprob": -5.9921875,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2262,
"logprob": -0.7705078,
"special": false,
"text": "():"
},
{
"id": 284,
"logprob": -0.2602539,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": -0.39282227,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": -0.6113281,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": -0.4765625,
"special": false,
"text": "Hello"
},
{
"id": 10896,
"logprob": -1.5068359,
"special": false,
"text": " World"
},
{
"id": 657,
"logprob": -0.8154297,
"special": false,
"text": "\")"
},
{
"id": 203,
"logprob": -0.7319336,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": -0.35229492,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": -1.0380859,
"special": false,
"text": "def"
}
]
},
"generated_text": "():\n print(\"Hello World\")\n\ndef"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 1459,
"logprob": -5.6289062,
"text": " print"
},
{
"id": 81,
"logprob": -1.6005859,
"text": "_"
},
{
"id": 7656,
"logprob": -5.9921875,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2262,
"logprob": -0.7705078,
"special": false,
"text": "():"
},
{
"id": 284,
"logprob": -0.2602539,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": -0.39282227,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": -0.6113281,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": -0.4765625,
"special": false,
"text": "Hello"
},
{
"id": 10896,
"logprob": -1.5068359,
"special": false,
"text": " World"
},
{
"id": 657,
"logprob": -0.8154297,
"special": false,
"text": "\")"
},
{
"id": 203,
"logprob": -0.7319336,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": -0.35229492,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": -1.0380859,
"special": false,
"text": "def"
}
]
},
"generated_text": "():\n print(\"Hello World\")\n\ndef"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 1459,
"logprob": -5.6289062,
"text": " print"
},
{
"id": 81,
"logprob": -1.6005859,
"text": "_"
},
{
"id": 7656,
"logprob": -5.9921875,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 2262,
"logprob": -0.7705078,
"special": false,
"text": "():"
},
{
"id": 284,
"logprob": -0.2590332,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": -0.39379883,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": -0.61376953,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": -0.47338867,
"special": false,
"text": "Hello"
},
{
"id": 10896,
"logprob": -1.5068359,
"special": false,
"text": " World"
},
{
"id": 657,
"logprob": -0.80810547,
"special": false,
"text": "\")"
},
{
"id": 203,
"logprob": -0.7397461,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": -0.35229492,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": -1.0371094,
"special": false,
"text": "def"
}
]
},
"generated_text": "():\n print(\"Hello World\")\n\ndef"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder/test_flash_starcoder_default_params.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 60,
"prefill": [
{
"id": 589,
"logprob": null,
"text": "def"
},
{
"id": 1459,
"logprob": -5.6328125,
"text": " print"
},
{
"id": 81,
"logprob": -1.6035156,
"text": "_"
},
{
"id": 7656,
"logprob": -5.9882812,
"text": "hello"
}
],
"seed": 0,
"tokens": [
{
"id": 2262,
"logprob": -0.042999268,
"special": false,
"text": "():"
},
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": 0.0,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 10896,
"logprob": -0.38549805,
"special": false,
"text": " World"
},
{
"id": 657,
"logprob": -0.5229492,
"special": false,
"text": "\")"
},
{
"id": 203,
"logprob": -0.10632324,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": -0.20141602,
"special": false,
"text": "def"
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 7656,
"logprob": 0.0,
"special": false,
"text": "hello"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 711,
"logprob": 0.0,
"special": false,
"text": "):"
},
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": -0.16027832,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 313,
"logprob": 0.0,
"special": false,
"text": " \""
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 636,
"logprob": 0.0,
"special": false,
"text": " name"
},
{
"id": 27,
"logprob": 0.0,
"special": false,
"text": ")"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": 0.0,
"special": false,
"text": "def"
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 7656,
"logprob": 0.0,
"special": false,
"text": "hello"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 81,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 381,
"logprob": 0.0,
"special": false,
"text": "age"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 426,
"logprob": 0.0,
"special": false,
"text": "name"
},
{
"id": 30,
"logprob": 0.0,
"special": false,
"text": ","
},
{
"id": 11442,
"logprob": 0.0,
"special": false,
"text": " age"
},
{
"id": 711,
"logprob": 0.0,
"special": false,
"text": "):"
},
{
"id": 284,
"logprob": 0.0,
"special": false,
"text": "\n "
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
},
{
"id": 440,
"logprob": 0.0,
"special": false,
"text": "(\""
},
{
"id": 8279,
"logprob": 0.0,
"special": false,
"text": "Hello"
},
{
"id": 313,
"logprob": 0.0,
"special": false,
"text": " \""
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 636,
"logprob": 0.0,
"special": false,
"text": " name"
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 313,
"logprob": -0.6328125,
"special": false,
"text": " \""
},
{
"id": 313,
"logprob": -1.7011719,
"special": false,
"text": " \""
},
{
"id": 474,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 596,
"logprob": 0.0,
"special": false,
"text": " str"
},
{
"id": 26,
"logprob": 0.0,
"special": false,
"text": "("
},
{
"id": 381,
"logprob": 0.0,
"special": false,
"text": "age"
},
{
"id": 490,
"logprob": 0.0,
"special": false,
"text": "))"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 203,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 589,
"logprob": 0.0,
"special": false,
"text": "def"
},
{
"id": 1459,
"logprob": 0.0,
"special": false,
"text": " print"
}
]
},
"generated_text": "():\n print(\"Hello World\")\n\ndef print_hello_name(name):\n print(\"Hello \" + name)\n\ndef print_hello_name_age(name, age):\n print(\"Hello \" + name + \" \" + str(age))\n\ndef print"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq_sharded/test_flash_llama_awq_sharded.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.6914062,
"text": "What"
},
{
"id": 338,
"logprob": -1.4746094,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8623047,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7558594,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9228516,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4609375,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.57177734,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5722656,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5927734,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.026428223,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4267578,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.16015625,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17382812,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62060547,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq_sharded/test_flash_llama_awq_load_sharded.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.6914062,
"text": "What"
},
{
"id": 338,
"logprob": -1.4746094,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8623047,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7558594,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9228516,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4609375,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.57177734,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5722656,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5859375,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.02633667,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4335938,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15991211,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62060547,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.6914062,
"text": "What"
},
{
"id": 338,
"logprob": -1.4746094,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8623047,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7558594,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9228516,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4609375,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.57177734,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5722656,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5859375,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.02633667,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4335938,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15991211,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62060547,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.6914062,
"text": "What"
},
{
"id": 338,
"logprob": -1.4746094,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8623047,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7558594,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9228516,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4609375,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.57177734,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5722656,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5859375,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.02633667,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4335938,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15991211,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62060547,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1724,
"logprob": -7.6914062,
"text": "What"
},
{
"id": 338,
"logprob": -1.4746094,
"text": "is"
},
{
"id": 21784,
"logprob": -9.390625,
"text": "Deep"
},
{
"id": 29257,
"logprob": -1.8623047,
"text": "Learning"
},
{
"id": 29973,
"logprob": -0.7558594,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 13,
"logprob": -1.9228516,
"special": false,
"text": "\n"
},
{
"id": 5618,
"logprob": -2.4609375,
"special": false,
"text": "What"
},
{
"id": 338,
"logprob": -0.57177734,
"special": false,
"text": " is"
},
{
"id": 278,
"logprob": -1.5722656,
"special": false,
"text": " the"
},
{
"id": 4328,
"logprob": -1.5859375,
"special": false,
"text": " difference"
},
{
"id": 1546,
"logprob": -0.02633667,
"special": false,
"text": " between"
},
{
"id": 21784,
"logprob": -1.4335938,
"special": false,
"text": " Deep"
},
{
"id": 29257,
"logprob": -0.15991211,
"special": false,
"text": " Learning"
},
{
"id": 322,
"logprob": -0.17456055,
"special": false,
"text": " and"
},
{
"id": 6189,
"logprob": -0.62060547,
"special": false,
"text": " Machine"
}
],
"top_tokens": null
},
"generated_text": "\nWhat is the difference between Deep Learning and Machine"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 17,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -1.5117188,
"text": " is"
},
{
"id": 18147,
"logprob": -8.96875,
"text": " Deep"
},
{
"id": 20727,
"logprob": -1.953125,
"text": " Learning"
},
{
"id": 32,
"logprob": -0.94189453,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 428,
"logprob": -1.5830078,
"special": false,
"text": " -"
},
{
"id": 18147,
"logprob": -3.3183594,
"special": false,
"text": " Deep"
},
{
"id": 20727,
"logprob": -0.32617188,
"special": false,
"text": " Learning"
},
{
"id": 187,
"logprob": -2.5742188,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.6015625,
"special": false,
"text": "Deep"
},
{
"id": 20727,
"logprob": -0.69628906,
"special": false,
"text": " Learning"
},
{
"id": 310,
"logprob": -0.67822266,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.5395508,
"special": false,
"text": " a"
},
{
"id": 749,
"logprob": -1.8623047,
"special": false,
"text": " sub"
},
{
"id": 3423,
"logprob": -0.6020508,
"special": false,
"text": "field"
},
{
"id": 273,
"logprob": -0.0552063,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.0742188,
"special": false,
"text": " machine"
},
{
"id": 4715,
"logprob": -0.011405945,
"special": false,
"text": " learning"
},
{
"id": 326,
"logprob": -0.9165039,
"special": false,
"text": " that"
},
{
"id": 4648,
"logprob": -1.4501953,
"special": false,
"text": " uses"
},
{
"id": 13345,
"logprob": -1.4960938,
"special": false,
"text": " artificial"
},
{
"id": 11454,
"logprob": -0.02116394,
"special": false,
"text": " neural"
}
]
},
"generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 17,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -1.5,
"text": " is"
},
{
"id": 18147,
"logprob": -8.984375,
"text": " Deep"
},
{
"id": 20727,
"logprob": -1.96875,
"text": " Learning"
},
{
"id": 32,
"logprob": -0.93359375,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 428,
"logprob": -1.5800781,
"special": false,
"text": " -"
},
{
"id": 18147,
"logprob": -3.3242188,
"special": false,
"text": " Deep"
},
{
"id": 20727,
"logprob": -0.31835938,
"special": false,
"text": " Learning"
},
{
"id": 187,
"logprob": -2.5644531,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.5957031,
"special": false,
"text": "Deep"
},
{
"id": 20727,
"logprob": -0.69628906,
"special": false,
"text": " Learning"
},
{
"id": 310,
"logprob": -0.68603516,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.5258789,
"special": false,
"text": " a"
},
{
"id": 749,
"logprob": -1.859375,
"special": false,
"text": " sub"
},
{
"id": 3423,
"logprob": -0.6166992,
"special": false,
"text": "field"
},
{
"id": 273,
"logprob": -0.056762695,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.0703125,
"special": false,
"text": " machine"
},
{
"id": 4715,
"logprob": -0.011428833,
"special": false,
"text": " learning"
},
{
"id": 326,
"logprob": -0.9213867,
"special": false,
"text": " that"
},
{
"id": 4648,
"logprob": -1.4726562,
"special": false,
"text": " uses"
},
{
"id": 13345,
"logprob": -1.5039062,
"special": false,
"text": " artificial"
},
{
"id": 11454,
"logprob": -0.021652222,
"special": false,
"text": " neural"
}
]
},
"generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 17,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -1.5,
"text": " is"
},
{
"id": 18147,
"logprob": -8.984375,
"text": " Deep"
},
{
"id": 20727,
"logprob": -1.96875,
"text": " Learning"
},
{
"id": 32,
"logprob": -0.93359375,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 428,
"logprob": -1.5800781,
"special": false,
"text": " -"
},
{
"id": 18147,
"logprob": -3.3242188,
"special": false,
"text": " Deep"
},
{
"id": 20727,
"logprob": -0.31835938,
"special": false,
"text": " Learning"
},
{
"id": 187,
"logprob": -2.5644531,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.5957031,
"special": false,
"text": "Deep"
},
{
"id": 20727,
"logprob": -0.69628906,
"special": false,
"text": " Learning"
},
{
"id": 310,
"logprob": -0.68603516,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.5258789,
"special": false,
"text": " a"
},
{
"id": 749,
"logprob": -1.859375,
"special": false,
"text": " sub"
},
{
"id": 3423,
"logprob": -0.6166992,
"special": false,
"text": "field"
},
{
"id": 273,
"logprob": -0.056762695,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.0703125,
"special": false,
"text": " machine"
},
{
"id": 4715,
"logprob": -0.011428833,
"special": false,
"text": " learning"
},
{
"id": 326,
"logprob": -0.9213867,
"special": false,
"text": " that"
},
{
"id": 4648,
"logprob": -1.4726562,
"special": false,
"text": " uses"
},
{
"id": 13345,
"logprob": -1.5039062,
"special": false,
"text": " artificial"
},
{
"id": 11454,
"logprob": -0.021652222,
"special": false,
"text": " neural"
}
]
},
"generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 17,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -1.5,
"text": " is"
},
{
"id": 18147,
"logprob": -8.984375,
"text": " Deep"
},
{
"id": 20727,
"logprob": -1.96875,
"text": " Learning"
},
{
"id": 32,
"logprob": -0.93359375,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 428,
"logprob": -1.5800781,
"special": false,
"text": " -"
},
{
"id": 18147,
"logprob": -3.3242188,
"special": false,
"text": " Deep"
},
{
"id": 20727,
"logprob": -0.31835938,
"special": false,
"text": " Learning"
},
{
"id": 187,
"logprob": -2.5644531,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.5957031,
"special": false,
"text": "Deep"
},
{
"id": 20727,
"logprob": -0.69628906,
"special": false,
"text": " Learning"
},
{
"id": 310,
"logprob": -0.68603516,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.5258789,
"special": false,
"text": " a"
},
{
"id": 749,
"logprob": -1.859375,
"special": false,
"text": " sub"
},
{
"id": 3423,
"logprob": -0.6166992,
"special": false,
"text": "field"
},
{
"id": 273,
"logprob": -0.056762695,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.0703125,
"special": false,
"text": " machine"
},
{
"id": 4715,
"logprob": -0.011428833,
"special": false,
"text": " learning"
},
{
"id": 326,
"logprob": -0.9213867,
"special": false,
"text": " that"
},
{
"id": 4648,
"logprob": -1.4726562,
"special": false,
"text": " uses"
},
{
"id": 13345,
"logprob": -1.5039062,
"special": false,
"text": " artificial"
},
{
"id": 11454,
"logprob": -0.021652222,
"special": false,
"text": " neural"
}
]
},
"generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 17,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -1.5117188,
"text": " is"
},
{
"id": 18147,
"logprob": -8.96875,
"text": " Deep"
},
{
"id": 20727,
"logprob": -1.953125,
"text": " Learning"
},
{
"id": 32,
"logprob": -0.94189453,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 428,
"logprob": -1.5830078,
"special": false,
"text": " -"
},
{
"id": 18147,
"logprob": -3.3105469,
"special": false,
"text": " Deep"
},
{
"id": 20727,
"logprob": -0.3215332,
"special": false,
"text": " Learning"
},
{
"id": 187,
"logprob": -2.5566406,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.6074219,
"special": false,
"text": "Deep"
},
{
"id": 20727,
"logprob": -0.69628906,
"special": false,
"text": " Learning"
},
{
"id": 310,
"logprob": -0.6923828,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.5263672,
"special": false,
"text": " a"
},
{
"id": 749,
"logprob": -1.8544922,
"special": false,
"text": " sub"
},
{
"id": 3423,
"logprob": -0.6118164,
"special": false,
"text": "field"
},
{
"id": 273,
"logprob": -0.055877686,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.0537109,
"special": false,
"text": " machine"
},
{
"id": 4715,
"logprob": -0.0115737915,
"special": false,
"text": " learning"
},
{
"id": 326,
"logprob": -0.9111328,
"special": false,
"text": " that"
},
{
"id": 4648,
"logprob": -1.4589844,
"special": false,
"text": " uses"
},
{
"id": 13345,
"logprob": -1.4853516,
"special": false,
"text": " artificial"
},
{
"id": 11454,
"logprob": -0.021636963,
"special": false,
"text": " neural"
}
]
},
"generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.5390625,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14758301,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9296875,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4453125,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.59375,
"text": "olan"
},
{
"id": 15,
"logprob": -1.3994141,
"text": ","
},
{
"id": 1669,
"logprob": -1.578125,
"text": " il"
},
{
"id": 11580,
"logprob": -0.9453125,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.7011719,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7529297,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.6054688,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5283203,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -0.00010049343,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.4716797,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11853027,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.41210938,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.0037765503,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0166016,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.515625,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.1484375,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.34375,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.515625,
"text": "olan"
},
{
"id": 15,
"logprob": -1.4199219,
"text": ","
},
{
"id": 1669,
"logprob": -1.5664062,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94091797,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.6660156,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.7753906,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7626953,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.5820312,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5097656,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -9.393692e-05,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.5175781,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11883545,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.4909668,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.003047943,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0185547,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.515625,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.1484375,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.34375,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.515625,
"text": "olan"
},
{
"id": 15,
"logprob": -1.4199219,
"text": ","
},
{
"id": 1669,
"logprob": -1.5664062,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94091797,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.6660156,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.7753906,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7626953,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.5820312,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5097656,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -9.393692e-05,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.5175781,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11883545,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.4909668,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.003047943,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0185547,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.515625,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.1484375,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9287109,
"text": " un"
},
{
"id": 46341,
"logprob": -15.34375,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.515625,
"text": "olan"
},
{
"id": 15,
"logprob": -1.4199219,
"text": ","
},
{
"id": 1669,
"logprob": -1.5664062,
"text": " il"
},
{
"id": 11580,
"logprob": -0.94091797,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.6660156,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.7753906,
"text": " d'abord"
}
],
"seed": null,
"tokens": [
{
"id": 578,
"logprob": -1.7626953,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.5820312,
"special": false,
"text": " faire"
},
{
"id": 1767,
"logprob": -1.5097656,
"special": false,
"text": " cu"
},
{
"id": 1273,
"logprob": -9.393692e-05,
"special": false,
"text": "ire"
},
{
"id": 1486,
"logprob": -1.5175781,
"special": false,
"text": " dans"
},
{
"id": 283,
"logprob": -1.1982422,
"special": false,
"text": " de"
},
{
"id": 40410,
"logprob": -0.11883545,
"special": false,
"text": " l'eau"
},
{
"id": 20226,
"logprob": -0.4909668,
"special": false,
"text": " bou"
},
{
"id": 172483,
"logprob": -0.003047943,
"special": false,
"text": "illante"
},
{
"id": 2805,
"logprob": -1.0185547,
"special": false,
"text": " sal"
}
]
},
"generated_text": " le faire cuire dans de l'eau bouillante sal"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_bloom_560m_sharded/test_bloom_560m_sharded.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 17934,
"logprob": null,
"text": "Pour"
},
{
"id": 49833,
"logprob": -10.5390625,
"text": " dég"
},
{
"id": 21543,
"logprob": -0.14758301,
"text": "uster"
},
{
"id": 447,
"logprob": -1.9296875,
"text": " un"
},
{
"id": 46341,
"logprob": -15.4453125,
"text": " ort"
},
{
"id": 35567,
"logprob": -7.59375,
"text": "olan"
},
{
"id": 15,
"logprob": -1.3994141,
"text": ","
},
{
"id": 1669,
"logprob": -1.578125,
"text": " il"
},
{
"id": 11580,
"logprob": -0.9453125,
"text": " faut"
},
{
"id": 3913,
"logprob": -3.7011719,
"text": " tout"
},
{
"id": 39261,
"logprob": -1.5732422,
"text": " d'abord"
}
],
"seed": 0,
"tokens": [
{
"id": 578,
"logprob": -1.6474609,
"special": false,
"text": " le"
},
{
"id": 5608,
"logprob": -2.5097656,
"special": false,
"text": " faire"
},
{
"id": 159570,
"logprob": -6.65625,
"special": false,
"text": " réch"
},
{
"id": 810,
"logprob": 0.0,
"special": false,
"text": "au"
},
{
"id": 12736,
"logprob": 0.0,
"special": false,
"text": "ffer"
},
{
"id": 1742,
"logprob": -2.5859375,
"special": false,
"text": " au"
},
{
"id": 6105,
"logprob": -2.03125,
"special": false,
"text": " bain"
},
{
"id": 88254,
"logprob": -0.12695312,
"special": false,
"text": "-mar"
},
{
"id": 641,
"logprob": 0.0,
"special": false,
"text": "ie"
},
{
"id": 2940,
"logprob": -3.5175781,
"special": false,
"text": " avec"
}
]
},
"generated_text": " le faire réchauffer au bain-marie avec"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.0234375,
"text": "What"
},
{
"id": 310,
"logprob": -5.4179688,
"text": " is"
},
{
"id": 247,
"logprob": -2.1542969,
"text": " a"
},
{
"id": 1167,
"logprob": -5.359375,
"text": " mem"
},
{
"id": 70,
"logprob": -0.006038666,
"text": "e"
},
{
"id": 13,
"logprob": -7.328125,
"text": ","
},
{
"id": 285,
"logprob": -0.3173828,
"text": " and"
},
{
"id": 752,
"logprob": -2.0625,
"text": " what"
},
{
"id": 434,
"logprob": -5.7734375,
"text": "'s"
},
{
"id": 253,
"logprob": -0.74072266,
"text": " the"
},
{
"id": 2892,
"logprob": -6.5898438,
"text": " history"
},
{
"id": 3212,
"logprob": -2.2949219,
"text": " behind"
},
{
"id": 436,
"logprob": -11.40625,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1113281,
"text": " word"
},
{
"id": 32,
"logprob": -0.008056641,
"text": "?"
},
{
"id": 0,
"logprob": -2.3300781,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.28125,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.5878906,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.5449219,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.05038452,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.002292633,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -1.3828278e-05,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.0010242462,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.090270996,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12719727,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.016571045,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.43432617,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.0234375,
"text": "What"
},
{
"id": 310,
"logprob": -5.4179688,
"text": " is"
},
{
"id": 247,
"logprob": -2.1542969,
"text": " a"
},
{
"id": 1167,
"logprob": -5.359375,
"text": " mem"
},
{
"id": 70,
"logprob": -0.006038666,
"text": "e"
},
{
"id": 13,
"logprob": -7.328125,
"text": ","
},
{
"id": 285,
"logprob": -0.3173828,
"text": " and"
},
{
"id": 752,
"logprob": -2.0625,
"text": " what"
},
{
"id": 434,
"logprob": -5.7734375,
"text": "'s"
},
{
"id": 253,
"logprob": -0.74072266,
"text": " the"
},
{
"id": 2892,
"logprob": -6.5898438,
"text": " history"
},
{
"id": 3212,
"logprob": -2.2949219,
"text": " behind"
},
{
"id": 436,
"logprob": -11.40625,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1113281,
"text": " word"
},
{
"id": 32,
"logprob": -0.008056641,
"text": "?"
},
{
"id": 0,
"logprob": -2.3300781,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.28125,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.5878906,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.5498047,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.04815674,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.002313614,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -1.2636185e-05,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.0010147095,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.0859375,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12609863,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.016601562,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.38256836,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.0234375,
"text": "What"
},
{
"id": 310,
"logprob": -5.421875,
"text": " is"
},
{
"id": 247,
"logprob": -2.1640625,
"text": " a"
},
{
"id": 1167,
"logprob": -5.40625,
"text": " mem"
},
{
"id": 70,
"logprob": -0.005420685,
"text": "e"
},
{
"id": 13,
"logprob": -7.2226562,
"text": ","
},
{
"id": 285,
"logprob": -0.26879883,
"text": " and"
},
{
"id": 752,
"logprob": -2.1992188,
"text": " what"
},
{
"id": 434,
"logprob": -5.46875,
"text": "'s"
},
{
"id": 253,
"logprob": -0.8017578,
"text": " the"
},
{
"id": 2892,
"logprob": -6.6796875,
"text": " history"
},
{
"id": 3212,
"logprob": -2.1972656,
"text": " behind"
},
{
"id": 436,
"logprob": -11.4453125,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1933594,
"text": " word"
},
{
"id": 32,
"logprob": -0.007858276,
"text": "?"
},
{
"id": 0,
"logprob": -2.328125,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.21875,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.6201172,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.546875,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.051879883,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.0020179749,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -9.059906e-06,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.00096797943,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.07940674,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12182617,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.017227173,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.44482422,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.0234375,
"text": "What"
},
{
"id": 310,
"logprob": -5.421875,
"text": " is"
},
{
"id": 247,
"logprob": -2.1640625,
"text": " a"
},
{
"id": 1167,
"logprob": -5.40625,
"text": " mem"
},
{
"id": 70,
"logprob": -0.005420685,
"text": "e"
},
{
"id": 13,
"logprob": -7.2226562,
"text": ","
},
{
"id": 285,
"logprob": -0.26879883,
"text": " and"
},
{
"id": 752,
"logprob": -2.1992188,
"text": " what"
},
{
"id": 434,
"logprob": -5.46875,
"text": "'s"
},
{
"id": 253,
"logprob": -0.8017578,
"text": " the"
},
{
"id": 2892,
"logprob": -6.6796875,
"text": " history"
},
{
"id": 3212,
"logprob": -2.1972656,
"text": " behind"
},
{
"id": 436,
"logprob": -11.4453125,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1933594,
"text": " word"
},
{
"id": 32,
"logprob": -0.007858276,
"text": "?"
},
{
"id": 0,
"logprob": -2.328125,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.21875,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.6201172,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.546875,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.051879883,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.0020179749,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -9.059906e-06,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.00096797943,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.07940674,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12182617,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.017227173,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.44482422,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.0234375,
"text": "What"
},
{
"id": 310,
"logprob": -5.421875,
"text": " is"
},
{
"id": 247,
"logprob": -2.1640625,
"text": " a"
},
{
"id": 1167,
"logprob": -5.40625,
"text": " mem"
},
{
"id": 70,
"logprob": -0.005420685,
"text": "e"
},
{
"id": 13,
"logprob": -7.2226562,
"text": ","
},
{
"id": 285,
"logprob": -0.26879883,
"text": " and"
},
{
"id": 752,
"logprob": -2.1992188,
"text": " what"
},
{
"id": 434,
"logprob": -5.46875,
"text": "'s"
},
{
"id": 253,
"logprob": -0.8017578,
"text": " the"
},
{
"id": 2892,
"logprob": -6.6796875,
"text": " history"
},
{
"id": 3212,
"logprob": -2.1972656,
"text": " behind"
},
{
"id": 436,
"logprob": -11.4453125,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1933594,
"text": " word"
},
{
"id": 32,
"logprob": -0.007858276,
"text": "?"
},
{
"id": 0,
"logprob": -2.328125,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.21875,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.6201172,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.546875,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.051879883,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.0020179749,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -1.04904175e-05,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.0009560585,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.08557129,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12084961,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.01737976,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.4025879,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.03125,
"text": "What"
},
{
"id": 310,
"logprob": -5.421875,
"text": " is"
},
{
"id": 247,
"logprob": -2.1601562,
"text": " a"
},
{
"id": 1167,
"logprob": -5.4609375,
"text": " mem"
},
{
"id": 70,
"logprob": -0.005657196,
"text": "e"
},
{
"id": 13,
"logprob": -7.28125,
"text": ","
},
{
"id": 285,
"logprob": -0.2980957,
"text": " and"
},
{
"id": 752,
"logprob": -2.1679688,
"text": " what"
},
{
"id": 434,
"logprob": -5.6210938,
"text": "'s"
},
{
"id": 253,
"logprob": -0.81103516,
"text": " the"
},
{
"id": 2892,
"logprob": -6.6640625,
"text": " history"
},
{
"id": 3212,
"logprob": -2.265625,
"text": " behind"
},
{
"id": 436,
"logprob": -11.5078125,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1582031,
"text": " word"
},
{
"id": 32,
"logprob": -0.008720398,
"text": "?"
},
{
"id": 0,
"logprob": -2.4726562,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.265625,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.63183594,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.5488281,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.045684814,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.00207901,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -1.335144e-05,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.00097227097,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.0892334,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12463379,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.01737976,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.50341797,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.03125,
"text": "What"
},
{
"id": 310,
"logprob": -5.421875,
"text": " is"
},
{
"id": 247,
"logprob": -2.1601562,
"text": " a"
},
{
"id": 1167,
"logprob": -5.4609375,
"text": " mem"
},
{
"id": 70,
"logprob": -0.005657196,
"text": "e"
},
{
"id": 13,
"logprob": -7.28125,
"text": ","
},
{
"id": 285,
"logprob": -0.2980957,
"text": " and"
},
{
"id": 752,
"logprob": -2.1679688,
"text": " what"
},
{
"id": 434,
"logprob": -5.6210938,
"text": "'s"
},
{
"id": 253,
"logprob": -0.81103516,
"text": " the"
},
{
"id": 2892,
"logprob": -6.6640625,
"text": " history"
},
{
"id": 3212,
"logprob": -2.265625,
"text": " behind"
},
{
"id": 436,
"logprob": -11.5078125,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1582031,
"text": " word"
},
{
"id": 32,
"logprob": -0.008720398,
"text": "?"
},
{
"id": 0,
"logprob": -2.4726562,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.265625,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.63183594,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.5488281,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.045684814,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.00207901,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -1.335144e-05,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.00097227097,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.0892334,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12463379,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.01737976,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.50341797,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.03125,
"text": "What"
},
{
"id": 310,
"logprob": -5.421875,
"text": " is"
},
{
"id": 247,
"logprob": -2.1601562,
"text": " a"
},
{
"id": 1167,
"logprob": -5.4609375,
"text": " mem"
},
{
"id": 70,
"logprob": -0.005657196,
"text": "e"
},
{
"id": 13,
"logprob": -7.28125,
"text": ","
},
{
"id": 285,
"logprob": -0.2980957,
"text": " and"
},
{
"id": 752,
"logprob": -2.1679688,
"text": " what"
},
{
"id": 434,
"logprob": -5.6210938,
"text": "'s"
},
{
"id": 253,
"logprob": -0.81103516,
"text": " the"
},
{
"id": 2892,
"logprob": -6.6640625,
"text": " history"
},
{
"id": 3212,
"logprob": -2.265625,
"text": " behind"
},
{
"id": 436,
"logprob": -11.5078125,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1582031,
"text": " word"
},
{
"id": 32,
"logprob": -0.008720398,
"text": "?"
},
{
"id": 0,
"logprob": -2.4726562,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.265625,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.63183594,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.5488281,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.045684814,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.00207901,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -1.335144e-05,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.00097227097,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.0892334,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12463379,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.01737976,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.50341797,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.03125,
"text": "What"
},
{
"id": 310,
"logprob": -5.421875,
"text": " is"
},
{
"id": 247,
"logprob": -2.1601562,
"text": " a"
},
{
"id": 1167,
"logprob": -5.4609375,
"text": " mem"
},
{
"id": 70,
"logprob": -0.005657196,
"text": "e"
},
{
"id": 13,
"logprob": -7.28125,
"text": ","
},
{
"id": 285,
"logprob": -0.2980957,
"text": " and"
},
{
"id": 752,
"logprob": -2.1679688,
"text": " what"
},
{
"id": 434,
"logprob": -5.6210938,
"text": "'s"
},
{
"id": 253,
"logprob": -0.81103516,
"text": " the"
},
{
"id": 2892,
"logprob": -6.6640625,
"text": " history"
},
{
"id": 3212,
"logprob": -2.265625,
"text": " behind"
},
{
"id": 436,
"logprob": -11.5078125,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1582031,
"text": " word"
},
{
"id": 32,
"logprob": -0.008720398,
"text": "?"
},
{
"id": 0,
"logprob": -2.4726562,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.265625,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.63183594,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.5488281,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.045684814,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.00207901,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -1.335144e-05,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.00097227097,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.0892334,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12463379,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.01737976,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.50341797,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|prompter|>"
},
{
"id": 1276,
"logprob": -8.03125,
"text": "What"
},
{
"id": 310,
"logprob": -5.421875,
"text": " is"
},
{
"id": 247,
"logprob": -2.1601562,
"text": " a"
},
{
"id": 1167,
"logprob": -5.4609375,
"text": " mem"
},
{
"id": 70,
"logprob": -0.005657196,
"text": "e"
},
{
"id": 13,
"logprob": -7.28125,
"text": ","
},
{
"id": 285,
"logprob": -0.2980957,
"text": " and"
},
{
"id": 752,
"logprob": -2.1679688,
"text": " what"
},
{
"id": 434,
"logprob": -5.6210938,
"text": "'s"
},
{
"id": 253,
"logprob": -0.81103516,
"text": " the"
},
{
"id": 2892,
"logprob": -6.6640625,
"text": " history"
},
{
"id": 3212,
"logprob": -2.265625,
"text": " behind"
},
{
"id": 436,
"logprob": -11.5078125,
"text": " this"
},
{
"id": 3159,
"logprob": -2.1582031,
"text": " word"
},
{
"id": 32,
"logprob": -0.008720398,
"text": "?"
},
{
"id": 0,
"logprob": -2.4726562,
"text": "<|endoftext|>"
},
{
"id": 50281,
"logprob": -18.265625,
"text": "<|assistant|>"
}
],
"seed": null,
"tokens": [
{
"id": 510,
"logprob": -0.63183594,
"special": false,
"text": "The"
},
{
"id": 3159,
"logprob": -0.5390625,
"special": false,
"text": " word"
},
{
"id": 346,
"logprob": -0.045684814,
"special": false,
"text": " \""
},
{
"id": 6441,
"logprob": -0.002090454,
"special": false,
"text": "mem"
},
{
"id": 70,
"logprob": -1.3589859e-05,
"special": false,
"text": "e"
},
{
"id": 3,
"logprob": -0.0009455681,
"special": false,
"text": "\""
},
{
"id": 369,
"logprob": -0.088012695,
"special": false,
"text": " was"
},
{
"id": 806,
"logprob": -0.12585449,
"special": false,
"text": " first"
},
{
"id": 908,
"logprob": -0.017196655,
"special": false,
"text": " used"
},
{
"id": 275,
"logprob": -0.49731445,
"special": false,
"text": " in"
}
]
},
"generated_text": "The word \"meme\" was first used in"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4911,
"logprob": -5.7851562,
"text": "User"
},
{
"id": 29901,
"logprob": -0.006996155,
"text": ":"
},
{
"id": 32000,
"logprob": -0.81347656,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -6.687641e-05,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.5762787e-07,
"text": "<fake_token_around_image>"
},
{
"id": 1815,
"logprob": -4.2148438,
"text": "Can"
},
{
"id": 366,
"logprob": -0.014137268,
"text": "you"
},
{
"id": 2649,
"logprob": -4.4335938,
"text": "tell"
},
{
"id": 592,
"logprob": -0.2919922,
"text": "me"
},
{
"id": 263,
"logprob": -4.2070312,
"text": "a"
},
{
"id": 1407,
"logprob": -9.421875,
"text": "very"
},
{
"id": 3273,
"logprob": -1.8720703,
"text": "short"
},
{
"id": 5828,
"logprob": -0.26489258,
"text": "story"
},
{
"id": 2729,
"logprob": -3.7441406,
"text": "based"
},
{
"id": 373,
"logprob": -0.0005393028,
"text": "on"
},
{
"id": 278,
"logprob": -0.140625,
"text": "the"
},
{
"id": 1967,
"logprob": -0.06756592,
"text": "image"
},
{
"id": 29973,
"logprob": -0.15454102,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 32002,
"logprob": -0.0019140244,
"special": true,
"text": "<end_of_utterance>"
},
{
"id": 29871,
"logprob": -8.392334e-05,
"special": false,
"text": " "
},
{
"id": 13,
"logprob": -1.7881393e-05,
"special": false,
"text": "\n"
},
{
"id": 7900,
"logprob": -2.9802322e-06,
"special": false,
"text": "Ass"
},
{
"id": 22137,
"logprob": 0.0,
"special": false,
"text": "istant"
},
{
"id": 29901,
"logprob": -3.0994415e-06,
"special": false,
"text": ":"
},
{
"id": 319,
"logprob": -0.9057617,
"special": false,
"text": " A"
},
{
"id": 696,
"logprob": -1.2294922,
"special": false,
"text": " ro"
},
{
"id": 15664,
"logprob": -0.00024533272,
"special": false,
"text": "oster"
},
{
"id": 15028,
"logprob": -1.1640625,
"special": false,
"text": " stands"
}
],
"top_tokens": null
},
"generated_text": " \nAssistant: A rooster stands"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4911,
"logprob": -5.7773438,
"text": "User"
},
{
"id": 29901,
"logprob": -0.0070114136,
"text": ":"
},
{
"id": 32000,
"logprob": -0.8208008,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -6.699562e-05,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.5762787e-07,
"text": "<fake_token_around_image>"
},
{
"id": 1815,
"logprob": -4.2265625,
"text": "Can"
},
{
"id": 366,
"logprob": -0.014175415,
"text": "you"
},
{
"id": 2649,
"logprob": -4.4296875,
"text": "tell"
},
{
"id": 592,
"logprob": -0.29516602,
"text": "me"
},
{
"id": 263,
"logprob": -4.2109375,
"text": "a"
},
{
"id": 1407,
"logprob": -9.4296875,
"text": "very"
},
{
"id": 3273,
"logprob": -1.8720703,
"text": "short"
},
{
"id": 5828,
"logprob": -0.26879883,
"text": "story"
},
{
"id": 2729,
"logprob": -3.7675781,
"text": "based"
},
{
"id": 373,
"logprob": -0.0005354881,
"text": "on"
},
{
"id": 278,
"logprob": -0.13671875,
"text": "the"
},
{
"id": 1967,
"logprob": -0.06719971,
"text": "image"
},
{
"id": 29973,
"logprob": -0.15551758,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 32002,
"logprob": -0.0019130707,
"special": true,
"text": "<end_of_utterance>"
},
{
"id": 29871,
"logprob": -8.392334e-05,
"special": false,
"text": " "
},
{
"id": 13,
"logprob": -1.7881393e-05,
"special": false,
"text": "\n"
},
{
"id": 7900,
"logprob": -3.0994415e-06,
"special": false,
"text": "Ass"
},
{
"id": 22137,
"logprob": 0.0,
"special": false,
"text": "istant"
},
{
"id": 29901,
"logprob": -3.0994415e-06,
"special": false,
"text": ":"
},
{
"id": 319,
"logprob": -0.9013672,
"special": false,
"text": " A"
},
{
"id": 696,
"logprob": -1.2324219,
"special": false,
"text": " ro"
},
{
"id": 15664,
"logprob": -0.0002477169,
"special": false,
"text": "oster"
},
{
"id": 15028,
"logprob": -1.1660156,
"special": false,
"text": " stands"
}
],
"top_tokens": null
},
"generated_text": " \nAssistant: A rooster stands"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4911,
"logprob": -5.7773438,
"text": "User"
},
{
"id": 29901,
"logprob": -0.0070114136,
"text": ":"
},
{
"id": 32000,
"logprob": -0.8208008,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -6.699562e-05,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.5762787e-07,
"text": "<fake_token_around_image>"
},
{
"id": 1815,
"logprob": -4.2265625,
"text": "Can"
},
{
"id": 366,
"logprob": -0.014175415,
"text": "you"
},
{
"id": 2649,
"logprob": -4.4296875,
"text": "tell"
},
{
"id": 592,
"logprob": -0.29516602,
"text": "me"
},
{
"id": 263,
"logprob": -4.2109375,
"text": "a"
},
{
"id": 1407,
"logprob": -9.4296875,
"text": "very"
},
{
"id": 3273,
"logprob": -1.8720703,
"text": "short"
},
{
"id": 5828,
"logprob": -0.26879883,
"text": "story"
},
{
"id": 2729,
"logprob": -3.7675781,
"text": "based"
},
{
"id": 373,
"logprob": -0.0005354881,
"text": "on"
},
{
"id": 278,
"logprob": -0.13671875,
"text": "the"
},
{
"id": 1967,
"logprob": -0.06719971,
"text": "image"
},
{
"id": 29973,
"logprob": -0.15551758,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 32002,
"logprob": -0.001912117,
"special": true,
"text": "<end_of_utterance>"
},
{
"id": 29871,
"logprob": -8.392334e-05,
"special": false,
"text": " "
},
{
"id": 13,
"logprob": -1.7762184e-05,
"special": false,
"text": "\n"
},
{
"id": 7900,
"logprob": -3.0994415e-06,
"special": false,
"text": "Ass"
},
{
"id": 22137,
"logprob": 0.0,
"special": false,
"text": "istant"
},
{
"id": 29901,
"logprob": -3.0994415e-06,
"special": false,
"text": ":"
},
{
"id": 319,
"logprob": -0.9013672,
"special": false,
"text": " A"
},
{
"id": 696,
"logprob": -1.2324219,
"special": false,
"text": " ro"
},
{
"id": 15664,
"logprob": -0.0002477169,
"special": false,
"text": "oster"
},
{
"id": 15028,
"logprob": -1.1660156,
"special": false,
"text": " stands"
}
],
"top_tokens": null
},
"generated_text": " \nAssistant: A rooster stands"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4911,
"logprob": -5.7773438,
"text": "User"
},
{
"id": 29901,
"logprob": -0.0070114136,
"text": ":"
},
{
"id": 32000,
"logprob": -0.8208008,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -6.699562e-05,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.5762787e-07,
"text": "<fake_token_around_image>"
},
{
"id": 1815,
"logprob": -4.2265625,
"text": "Can"
},
{
"id": 366,
"logprob": -0.014175415,
"text": "you"
},
{
"id": 2649,
"logprob": -4.4296875,
"text": "tell"
},
{
"id": 592,
"logprob": -0.29516602,
"text": "me"
},
{
"id": 263,
"logprob": -4.2109375,
"text": "a"
},
{
"id": 1407,
"logprob": -9.4296875,
"text": "very"
},
{
"id": 3273,
"logprob": -1.8720703,
"text": "short"
},
{
"id": 5828,
"logprob": -0.26879883,
"text": "story"
},
{
"id": 2729,
"logprob": -3.7675781,
"text": "based"
},
{
"id": 373,
"logprob": -0.0005354881,
"text": "on"
},
{
"id": 278,
"logprob": -0.13671875,
"text": "the"
},
{
"id": 1967,
"logprob": -0.06719971,
"text": "image"
},
{
"id": 29973,
"logprob": -0.15551758,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 32002,
"logprob": -0.001912117,
"special": true,
"text": "<end_of_utterance>"
},
{
"id": 29871,
"logprob": -8.392334e-05,
"special": false,
"text": " "
},
{
"id": 13,
"logprob": -1.7762184e-05,
"special": false,
"text": "\n"
},
{
"id": 7900,
"logprob": -3.0994415e-06,
"special": false,
"text": "Ass"
},
{
"id": 22137,
"logprob": 0.0,
"special": false,
"text": "istant"
},
{
"id": 29901,
"logprob": -3.0994415e-06,
"special": false,
"text": ":"
},
{
"id": 319,
"logprob": -0.9013672,
"special": false,
"text": " A"
},
{
"id": 696,
"logprob": -1.2324219,
"special": false,
"text": " ro"
},
{
"id": 15664,
"logprob": -0.0002477169,
"special": false,
"text": "oster"
},
{
"id": 15028,
"logprob": -1.1660156,
"special": false,
"text": " stands"
}
],
"top_tokens": null
},
"generated_text": " \nAssistant: A rooster stands"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_idefics/test_idefics.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 4911,
"logprob": -5.7851562,
"text": "User"
},
{
"id": 29901,
"logprob": -0.006996155,
"text": ":"
},
{
"id": 32000,
"logprob": -0.81347656,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -6.687641e-05,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.5762787e-07,
"text": "<fake_token_around_image>"
},
{
"id": 1815,
"logprob": -4.2148438,
"text": "Can"
},
{
"id": 366,
"logprob": -0.014137268,
"text": "you"
},
{
"id": 2649,
"logprob": -4.4335938,
"text": "tell"
},
{
"id": 592,
"logprob": -0.2919922,
"text": "me"
},
{
"id": 263,
"logprob": -4.2070312,
"text": "a"
},
{
"id": 1407,
"logprob": -9.421875,
"text": "very"
},
{
"id": 3273,
"logprob": -1.8720703,
"text": "short"
},
{
"id": 5828,
"logprob": -0.26489258,
"text": "story"
},
{
"id": 2729,
"logprob": -3.7441406,
"text": "based"
},
{
"id": 373,
"logprob": -0.0005393028,
"text": "on"
},
{
"id": 278,
"logprob": -0.140625,
"text": "the"
},
{
"id": 1967,
"logprob": -0.06756592,
"text": "image"
},
{
"id": 29973,
"logprob": -0.15454102,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 32002,
"logprob": -0.0019140244,
"special": true,
"text": "<end_of_utterance>"
},
{
"id": 29871,
"logprob": -8.404255e-05,
"special": false,
"text": " "
},
{
"id": 13,
"logprob": -1.7642975e-05,
"special": false,
"text": "\n"
},
{
"id": 7900,
"logprob": -2.9802322e-06,
"special": false,
"text": "Ass"
},
{
"id": 22137,
"logprob": 0.0,
"special": false,
"text": "istant"
},
{
"id": 29901,
"logprob": -3.2186508e-06,
"special": false,
"text": ":"
},
{
"id": 319,
"logprob": -0.91064453,
"special": false,
"text": " A"
},
{
"id": 696,
"logprob": -1.2412109,
"special": false,
"text": " ro"
},
{
"id": 15664,
"logprob": -0.0002439022,
"special": false,
"text": "oster"
},
{
"id": 15028,
"logprob": -1.1630859,
"special": false,
"text": " stands"
}
],
"top_tokens": null
},
"generated_text": " \nAssistant: A rooster stands"
}
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder_load.json | [
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 563,
"logprob": null,
"text": "def"
},
{
"id": 942,
"logprob": -5.1367188,
"text": " print"
},
{
"id": 62,
"logprob": -0.24450684,
"text": "_"
},
{
"id": 7196,
"logprob": -6.9609375,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 1241,
"logprob": -0.9863281,
"special": false,
"text": "():"
},
{
"id": 258,
"logprob": -0.21362305,
"special": false,
"text": "\n "
},
{
"id": 942,
"logprob": -0.44360352,
"special": false,
"text": " print"
},
{
"id": 372,
"logprob": -0.54248047,
"special": false,
"text": "(\""
},
{
"id": 7371,
"logprob": -0.44555664,
"special": false,
"text": "Hello"
},
{
"id": 9956,
"logprob": -1.2441406,
"special": false,
"text": " World"
},
{
"id": 8657,
"logprob": -0.75878906,
"special": false,
"text": "!\")"
},
{
"id": 185,
"logprob": -0.76171875,
"special": false,
"text": "\n"
},
{
"id": 185,
"logprob": -0.2084961,
"special": false,
"text": "\n"
},
{
"id": 1018,
"logprob": -1.2460938,
"special": false,
"text": "print"
}
]
},
"generated_text": "():\n print(\"Hello World!\")\n\nprint"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 563,
"logprob": null,
"text": "def"
},
{
"id": 942,
"logprob": -5.1367188,
"text": " print"
},
{
"id": 62,
"logprob": -0.24450684,
"text": "_"
},
{
"id": 7196,
"logprob": -6.9609375,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 1241,
"logprob": -0.9863281,
"special": false,
"text": "():"
},
{
"id": 258,
"logprob": -0.21362305,
"special": false,
"text": "\n "
},
{
"id": 942,
"logprob": -0.44360352,
"special": false,
"text": " print"
},
{
"id": 372,
"logprob": -0.54248047,
"special": false,
"text": "(\""
},
{
"id": 7371,
"logprob": -0.44555664,
"special": false,
"text": "Hello"
},
{
"id": 9956,
"logprob": -1.2441406,
"special": false,
"text": " World"
},
{
"id": 8657,
"logprob": -0.75878906,
"special": false,
"text": "!\")"
},
{
"id": 185,
"logprob": -0.76171875,
"special": false,
"text": "\n"
},
{
"id": 185,
"logprob": -0.2084961,
"special": false,
"text": "\n"
},
{
"id": 1018,
"logprob": -1.2460938,
"special": false,
"text": "print"
}
]
},
"generated_text": "():\n print(\"Hello World!\")\n\nprint"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 563,
"logprob": null,
"text": "def"
},
{
"id": 942,
"logprob": -5.1367188,
"text": " print"
},
{
"id": 62,
"logprob": -0.24450684,
"text": "_"
},
{
"id": 7196,
"logprob": -6.9609375,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 1241,
"logprob": -0.9863281,
"special": false,
"text": "():"
},
{
"id": 258,
"logprob": -0.21362305,
"special": false,
"text": "\n "
},
{
"id": 942,
"logprob": -0.44360352,
"special": false,
"text": " print"
},
{
"id": 372,
"logprob": -0.54248047,
"special": false,
"text": "(\""
},
{
"id": 7371,
"logprob": -0.44555664,
"special": false,
"text": "Hello"
},
{
"id": 9956,
"logprob": -1.2441406,
"special": false,
"text": " World"
},
{
"id": 8657,
"logprob": -0.75878906,
"special": false,
"text": "!\")"
},
{
"id": 185,
"logprob": -0.76171875,
"special": false,
"text": "\n"
},
{
"id": 185,
"logprob": -0.2084961,
"special": false,
"text": "\n"
},
{
"id": 1018,
"logprob": -1.2460938,
"special": false,
"text": "print"
}
]
},
"generated_text": "():\n print(\"Hello World!\")\n\nprint"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 563,
"logprob": null,
"text": "def"
},
{
"id": 942,
"logprob": -5.1367188,
"text": " print"
},
{
"id": 62,
"logprob": -0.24450684,
"text": "_"
},
{
"id": 7196,
"logprob": -6.9609375,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 1241,
"logprob": -0.9863281,
"special": false,
"text": "():"
},
{
"id": 258,
"logprob": -0.21362305,
"special": false,
"text": "\n "
},
{
"id": 942,
"logprob": -0.44360352,
"special": false,
"text": " print"
},
{
"id": 372,
"logprob": -0.54248047,
"special": false,
"text": "(\""
},
{
"id": 7371,
"logprob": -0.44555664,
"special": false,
"text": "Hello"
},
{
"id": 9956,
"logprob": -1.2441406,
"special": false,
"text": " World"
},
{
"id": 8657,
"logprob": -0.75878906,
"special": false,
"text": "!\")"
},
{
"id": 185,
"logprob": -0.76171875,
"special": false,
"text": "\n"
},
{
"id": 185,
"logprob": -0.2084961,
"special": false,
"text": "\n"
},
{
"id": 1018,
"logprob": -1.2460938,
"special": false,
"text": "print"
}
]
},
"generated_text": "():\n print(\"Hello World!\")\n\nprint"
}
]
| 0 |
hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__ | hf_public_repos/text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json | {
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 563,
"logprob": null,
"text": "def"
},
{
"id": 942,
"logprob": -5.1367188,
"text": " print"
},
{
"id": 62,
"logprob": -0.24450684,
"text": "_"
},
{
"id": 7196,
"logprob": -6.9609375,
"text": "hello"
}
],
"seed": null,
"tokens": [
{
"id": 1241,
"logprob": -0.9863281,
"special": false,
"text": "():"
},
{
"id": 258,
"logprob": -0.21447754,
"special": false,
"text": "\n "
},
{
"id": 942,
"logprob": -0.43701172,
"special": false,
"text": " print"
},
{
"id": 372,
"logprob": -0.5361328,
"special": false,
"text": "(\""
},
{
"id": 7371,
"logprob": -0.44555664,
"special": false,
"text": "Hello"
},
{
"id": 9956,
"logprob": -1.2412109,
"special": false,
"text": " World"
},
{
"id": 8657,
"logprob": -0.7583008,
"special": false,
"text": "!\")"
},
{
"id": 185,
"logprob": -0.76171875,
"special": false,
"text": "\n"
},
{
"id": 185,
"logprob": -0.20837402,
"special": false,
"text": "\n"
},
{
"id": 1018,
"logprob": -1.2470703,
"special": false,
"text": "print"
}
]
},
"generated_text": "():\n print(\"Hello World!\")\n\nprint"
}
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/launcher/build.rs | use std::error::Error;
use vergen::EmitBuilder;
fn main() -> Result<(), Box<dyn Error>> {
// Emit cargo and rustc compile time values
EmitBuilder::builder().all_cargo().all_rustc().emit()?;
// Try to get the git sha from the local git repository
if EmitBuilder::builder()
.fail_on_error()
.git_sha(false)
.emit()
.is_err()
{
// Unable to get the git sha
if let Ok(sha) = std::env::var("GIT_SHA") {
// Set it from an env var
println!("cargo:rustc-env=VERGEN_GIT_SHA={sha}");
}
}
// Set docker label if present
if let Ok(label) = std::env::var("DOCKER_LABEL") {
// Set it from an env var
println!("cargo:rustc-env=DOCKER_LABEL={label}");
}
Ok(())
}
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/launcher/Cargo.toml | [package]
name = "text-generation-launcher"
description = "Text Generation Launcher"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[dependencies]
clap = { version = "4.4.5", features = ["derive", "env"] }
ctrlc = { version = "3.4.1", features = ["termination"] }
nix = "0.27.1"
serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.107"
tracing = "0.1.37"
tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] }
[dev-dependencies]
float_eq = "1.0.1"
reqwest = { version = "0.11.20", features = ["blocking", "json"] }
[build-dependencies]
vergen = { version = "8.2.5", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] }
| 0 |
hf_public_repos/text-generation-inference/launcher | hf_public_repos/text-generation-inference/launcher/src/main.rs | use clap::{Parser, ValueEnum};
use nix::sys::signal::{self, Signal};
use nix::unistd::Pid;
use serde::Deserialize;
use std::env;
use std::ffi::OsString;
use std::io::{BufRead, BufReader, Lines, Read};
use std::os::unix::process::{CommandExt, ExitStatusExt};
use std::path::Path;
use std::process::{Child, Command, ExitStatus, Stdio};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::TryRecvError;
use std::sync::{mpsc, Arc};
use std::thread;
use std::thread::sleep;
use std::time::{Duration, Instant};
use std::{fs, io};
use tracing_subscriber::EnvFilter;
mod env_runtime;
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Quantization {
/// 4 bit quantization. Requires a specific GTPQ quantized model:
/// https://hf.co/models?search=awq.
/// Should replace GPTQ models whereever possible because of the better latency
Awq,
/// 8 bit quantization, doesn't require specific model.
/// Should be a drop-in replacement to bitsandbytes with much better performance.
/// Kernels are from https://github.com/NetEase-FuXi/EETQ.git
Eetq,
/// 4 bit quantization. Requires a specific GTPQ quantized model: https://hf.co/models?search=gptq.
/// text-generation-inference will use exllama (faster) kernels whereever possible, and use
/// triton kernel (wider support) when it's not.
/// AWQ has faster kernels.
Gptq,
/// Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half,
/// but it is known that the model will be much slower to run than the native f16.
#[deprecated(
since = "1.1.0",
note = "Use `eetq` instead, which provides better latencies overall and is drop-in in most cases"
)]
Bitsandbytes,
/// Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x,
/// but it is known that the model will be much slower to run than the native f16.
BitsandbytesNF4,
/// Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better
/// perplexity performance for you model
BitsandbytesFP4,
}
impl std::fmt::Display for Quantization {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// To keep in track with `server`.
match self {
Quantization::Bitsandbytes => {
write!(f, "bitsandbytes")
}
Quantization::BitsandbytesNF4 => {
write!(f, "bitsandbytes-nf4")
}
Quantization::BitsandbytesFP4 => {
write!(f, "bitsandbytes-fp4")
}
Quantization::Gptq => {
write!(f, "gptq")
}
Quantization::Awq => {
write!(f, "awq")
}
Quantization::Eetq => {
write!(f, "eetq")
}
}
}
}
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Dtype {
Float16,
#[clap(name = "bfloat16")]
BFloat16,
}
impl std::fmt::Display for Dtype {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// To keep in track with `server`.
match self {
Dtype::Float16 => {
write!(f, "float16")
}
Dtype::BFloat16 => {
write!(f, "bfloat16")
}
}
}
}
#[derive(Clone, Copy, Debug, ValueEnum)]
enum RopeScaling {
Linear,
Dynamic,
}
impl std::fmt::Display for RopeScaling {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// To keep in track with `server`.
match self {
RopeScaling::Linear => {
write!(f, "linear")
}
RopeScaling::Dynamic => {
write!(f, "dynamic")
}
}
}
}
/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
/// The name of the model to load.
/// Can be a MODEL_ID as listed on <https://hf.co/models> like
/// `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`.
/// Or it can be a local directory containing the necessary files
/// as saved by `save_pretrained(...)` methods of transformers
#[clap(default_value = "bigscience/bloom-560m", long, env)]
model_id: String,
/// The actual revision of the model if you're referring to a model
/// on the hub. You can use a specific commit id or a branch like `refs/pr/2`.
#[clap(long, env)]
revision: Option<String>,
/// The number of tokenizer workers used for payload validation and truncation inside the
/// router.
#[clap(default_value = "2", long, env)]
validation_workers: usize,
/// Whether to shard the model across multiple GPUs
/// By default text-generation-inference will use all available GPUs to run
/// the model. Setting it to `false` deactivates `num_shard`.
#[clap(long, env)]
sharded: Option<bool>,
/// The number of shards to use if you don't want to use all GPUs on a given machine.
/// You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2`
/// and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to
/// launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance.
#[clap(long, env)]
num_shard: Option<usize>,
/// Whether you want the model to be quantized.
#[clap(long, env, value_enum)]
quantize: Option<Quantization>,
/// The number of input_ids to speculate on
/// If using a medusa model, the heads will be picked up automatically
/// Other wise, it will use n-gram speculation which is relatively free
/// in terms of compute, but the speedup heavily depends on the task.
#[clap(long, env)]
speculate: Option<usize>,
/// The dtype to be forced upon the model. This option cannot be used with `--quantize`.
#[clap(long, env, value_enum)]
dtype: Option<Dtype>,
/// Whether you want to execute hub modelling code. Explicitly passing a `revision` is
/// encouraged when loading a model with custom code to ensure no malicious code has been
/// contributed in a newer revision.
#[clap(long, env, value_enum)]
trust_remote_code: bool,
/// The maximum amount of concurrent requests for this particular deployment.
/// Having a low limit will refuse clients requests instead of having them
/// wait for too long and is usually good to handle backpressure correctly.
#[clap(default_value = "128", long, env)]
max_concurrent_requests: usize,
/// This is the maximum allowed value for clients to set `best_of`.
/// Best of makes `n` generations at the same time, and return the best
/// in terms of overall log probability over the entire generated sequence
#[clap(default_value = "2", long, env)]
max_best_of: usize,
/// This is the maximum allowed value for clients to set `stop_sequences`.
/// Stop sequences are used to allow the model to stop on more than just
/// the EOS token, and enable more complex "prompting" where users can preprompt
/// the model in a specific way and define their "own" stop token aligned with
/// their prompt.
#[clap(default_value = "4", long, env)]
max_stop_sequences: usize,
/// This is the maximum allowed value for clients to set `top_n_tokens`.
/// `top_n_tokens is used to return information about the the `n` most likely
/// tokens at each generation step, instead of just the sampled token. This
/// information can be used for downstream tasks like for classification or
/// ranking.
#[clap(default_value = "5", long, env)]
max_top_n_tokens: u32,
/// This is the maximum allowed input length (expressed in number of tokens)
/// for users. The larger this value, the longer prompt users can send which
/// can impact the overall memory required to handle the load.
/// Please note that some models have a finite range of sequence they can handle.
#[clap(default_value = "1024", long, env)]
max_input_length: usize,
/// This is the most important value to set as it defines the "memory budget"
/// of running clients requests.
/// Clients will send input sequences and ask to generate `max_new_tokens`
/// on top. with a value of `1512` users can send either a prompt of
/// `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for
/// `1511` max_new_tokens.
/// The larger this value, the larger amount each request will be in your RAM
/// and the less effective batching can be.
#[clap(default_value = "2048", long, env)]
max_total_tokens: usize,
/// This represents the ratio of waiting queries vs running queries where
/// you want to start considering pausing the running queries to include the waiting
/// ones into the same batch.
/// `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's
/// only 10 queries left in the current batch we check if we can fit those 12
/// waiting queries into the batching strategy, and if yes, then batching happens
/// delaying the 10 running queries by a `prefill` run.
///
/// This setting is only applied if there is room in the batch
/// as defined by `max_batch_total_tokens`.
#[clap(default_value = "1.2", long, env)]
waiting_served_ratio: f32,
/// Limits the number of tokens for the prefill operation.
/// Since this operation take the most memory and is compute bound, it is interesting
/// to limit the number of requests that can be sent.
#[clap(default_value = "4096", long, env)]
max_batch_prefill_tokens: u32,
/// **IMPORTANT** This is one critical control to allow maximum usage
/// of the available hardware.
///
/// This represents the total amount of potential tokens within a batch.
/// When using padding (not recommended) this would be equivalent of
/// `batch_size` * `max_total_tokens`.
///
/// However in the non-padded (flash attention) version this can be much finer.
///
/// For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100`
/// or a single query of `1000` tokens.
///
/// Overall this number should be the largest possible amount that fits the
/// remaining memory (after the model is loaded). Since the actual memory overhead
/// depends on other parameters like if you're using quantization, flash attention
/// or the model implementation, text-generation-inference cannot infer this number
/// automatically.
#[clap(long, env)]
max_batch_total_tokens: Option<u32>,
/// This setting defines how many tokens can be passed before forcing the waiting
/// queries to be put on the batch (if the size of the batch allows for it).
/// New queries require 1 `prefill` forward, which is different from `decode`
/// and therefore you need to pause the running batch in order to run `prefill`
/// to create the correct values for the waiting queries to be able to join the batch.
///
/// With a value too small, queries will always "steal" the compute to run `prefill`
/// and running queries will be delayed by a lot.
///
/// With a value too big, waiting queries could wait for a very long time
/// before being allowed a slot in the running batch. If your server is busy
/// that means that requests that could run in ~2s on an empty server could
/// end up running in ~20s because the query had to wait for 18s.
///
/// This number is expressed in number of tokens to make it a bit more
/// "model" agnostic, but what should really matter is the overall latency
/// for end users.
#[clap(default_value = "20", long, env)]
max_waiting_tokens: usize,
/// The IP address to listen on
#[clap(default_value = "0.0.0.0", long, env)]
hostname: String,
/// The port to listen on.
#[clap(default_value = "3000", long, short, env)]
port: u16,
/// The name of the socket for gRPC communication between the webserver
/// and the shards.
#[clap(default_value = "/tmp/text-generation-server", long, env)]
shard_uds_path: String,
/// The address the master shard will listen on. (setting used by torch distributed)
#[clap(default_value = "localhost", long, env)]
master_addr: String,
/// The address the master port will listen on. (setting used by torch distributed)
#[clap(default_value = "29500", long, env)]
master_port: usize,
/// The location of the huggingface hub cache.
/// Used to override the location if you want to provide a mounted disk for instance
#[clap(long, env)]
huggingface_hub_cache: Option<String>,
/// The location of the huggingface hub cache.
/// Used to override the location if you want to provide a mounted disk for instance
#[clap(long, env)]
weights_cache_override: Option<String>,
/// For some models (like bloom), text-generation-inference implemented custom
/// cuda kernels to speed up inference. Those kernels were only tested on A100.
/// Use this flag to disable them if you're running on different hardware and
/// encounter issues.
#[clap(long, env)]
disable_custom_kernels: bool,
/// Limit the CUDA available memory.
/// The allowed value equals the total visible memory multiplied by cuda-memory-fraction.
#[clap(default_value = "1.0", long, env)]
cuda_memory_fraction: f32,
/// Rope scaling will only be used for RoPE models
/// and allow rescaling the position rotary to accomodate for
/// larger prompts.
///
/// Goes together with `rope_factor`.
///
/// `--rope-factor 2.0` gives linear scaling with a factor of 2.0
/// `--rope-scaling dynamic` gives dynamic scaling with a factor of 1.0
/// `--rope-scaling linear` gives linear scaling with a factor of 1.0 (Nothing will be changed
/// basically)
///
/// `--rope-scaling linear --rope-factor` fully describes the scaling you want
#[clap(long, env)]
rope_scaling: Option<RopeScaling>,
/// Rope scaling will only be used for RoPE models
/// See `rope_scaling`
#[clap(long, env)]
rope_factor: Option<f32>,
/// Outputs the logs in JSON format (useful for telemetry)
#[clap(long, env)]
json_output: bool,
#[clap(long, env)]
otlp_endpoint: Option<String>,
#[clap(long, env)]
cors_allow_origin: Vec<String>,
#[clap(long, env)]
watermark_gamma: Option<f32>,
#[clap(long, env)]
watermark_delta: Option<f32>,
/// Enable ngrok tunneling
#[clap(long, env)]
ngrok: bool,
/// ngrok authentication token
#[clap(long, env)]
ngrok_authtoken: Option<String>,
/// ngrok edge
#[clap(long, env)]
ngrok_edge: Option<String>,
/// Display a lot of information about your runtime environment
#[clap(long, short, action)]
env: bool,
}
#[derive(Debug)]
enum ShardStatus {
Ready,
Failed(usize),
}
#[allow(clippy::too_many_arguments)]
fn shard_manager(
model_id: String,
revision: Option<String>,
quantize: Option<Quantization>,
speculate: Option<usize>,
dtype: Option<Dtype>,
trust_remote_code: bool,
uds_path: String,
rank: usize,
world_size: usize,
master_addr: String,
master_port: usize,
huggingface_hub_cache: Option<String>,
weights_cache_override: Option<String>,
disable_custom_kernels: bool,
watermark_gamma: Option<f32>,
watermark_delta: Option<f32>,
cuda_memory_fraction: f32,
rope_scaling: Option<RopeScaling>,
rope_factor: Option<f32>,
otlp_endpoint: Option<String>,
status_sender: mpsc::Sender<ShardStatus>,
shutdown: Arc<AtomicBool>,
_shutdown_sender: mpsc::Sender<()>,
) {
// Enter shard-manager tracing span
let _span = tracing::span!(tracing::Level::INFO, "shard-manager", rank = rank).entered();
// Get UDS path
let uds_string = format!("{uds_path}-{rank}");
let uds = Path::new(&uds_string);
// Clean previous runs
if uds.exists() {
fs::remove_file(uds).unwrap();
}
// Process args
let mut shard_args = vec![
"serve".to_string(),
model_id,
"--uds-path".to_string(),
uds_path,
"--logger-level".to_string(),
"INFO".to_string(),
"--json-output".to_string(),
];
// Activate trust remote code
if trust_remote_code {
shard_args.push("--trust-remote-code".to_string());
}
// Activate tensor parallelism
if world_size > 1 {
shard_args.push("--sharded".to_string());
}
if let Some(quantize) = quantize {
shard_args.push("--quantize".to_string());
shard_args.push(quantize.to_string())
}
if let Some(speculate) = speculate {
shard_args.push("--speculate".to_string());
shard_args.push(speculate.to_string())
}
if let Some(dtype) = dtype {
shard_args.push("--dtype".to_string());
shard_args.push(dtype.to_string())
}
// Model optional revision
if let Some(revision) = revision {
shard_args.push("--revision".to_string());
shard_args.push(revision)
}
let rope = match (rope_scaling, rope_factor) {
(None, None) => None,
(Some(scaling), None) => Some((scaling, 1.0)),
(Some(scaling), Some(factor)) => Some((scaling, factor)),
(None, Some(factor)) => Some((RopeScaling::Linear, factor)),
};
// OpenTelemetry
if let Some(otlp_endpoint) = otlp_endpoint {
shard_args.push("--otlp-endpoint".to_string());
shard_args.push(otlp_endpoint);
}
// Copy current process env
let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect();
// Torch Distributed Env vars
envs.push(("RANK".into(), rank.to_string().into()));
envs.push(("WORLD_SIZE".into(), world_size.to_string().into()));
envs.push(("MASTER_ADDR".into(), master_addr.into()));
envs.push(("MASTER_PORT".into(), master_port.to_string().into()));
envs.push(("NCCL_ASYNC_ERROR_HANDLING".into(), "1".into()));
// CUDA memory fraction
envs.push((
"CUDA_MEMORY_FRACTION".into(),
cuda_memory_fraction.to_string().into(),
));
// Safetensors load fast
envs.push(("SAFETENSORS_FAST_GPU".into(), "1".into()));
// Enable hf transfer for insane download speeds
let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string());
envs.push((
"HF_HUB_ENABLE_HF_TRANSFER".into(),
enable_hf_transfer.into(),
));
// Parse Inference API token
if let Ok(api_token) = env::var("HF_API_TOKEN") {
envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
};
// Detect rope scaling
// Sending as env instead of CLI args to not bloat everything
// those only can be used by RoPE models, so passing information around
// for all models will complexify code unnecessarily
if let Some((scaling, factor)) = rope {
envs.push(("ROPE_SCALING".into(), scaling.to_string().into()));
envs.push(("ROPE_FACTOR".into(), factor.to_string().into()));
}
// If huggingface_hub_cache is some, pass it to the shard
// Useful when running inside a docker container
if let Some(huggingface_hub_cache) = huggingface_hub_cache {
envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into()));
};
// If weights_cache_override is some, pass it to the shard
// Useful when running inside a HuggingFace Inference Endpoint
if let Some(weights_cache_override) = weights_cache_override {
envs.push((
"WEIGHTS_CACHE_OVERRIDE".into(),
weights_cache_override.into(),
));
};
// If disable_custom_kernels is true, pass it to the shard as an env var
if disable_custom_kernels {
envs.push(("DISABLE_CUSTOM_KERNELS".into(), "True".into()))
}
// Watermark Gamma
if let Some(watermark_gamma) = watermark_gamma {
envs.push(("WATERMARK_GAMMA".into(), watermark_gamma.to_string().into()))
}
// Watermark Delta
if let Some(watermark_delta) = watermark_delta {
envs.push(("WATERMARK_DELTA".into(), watermark_delta.to_string().into()))
}
// Start process
tracing::info!("Starting shard");
let mut p = match Command::new("text-generation-server")
.args(shard_args)
.envs(envs)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.process_group(0)
.spawn()
{
Ok(p) => p,
Err(err) => {
if err.kind() == io::ErrorKind::NotFound {
tracing::error!("text-generation-server not found in PATH");
tracing::error!("Please install it with `make install-server`")
}
{
tracing::error!("{}", err);
}
status_sender.send(ShardStatus::Failed(rank)).unwrap();
return;
}
};
// Redirect STDOUT to the console
let shard_stdout_reader = BufReader::new(p.stdout.take().unwrap());
let shard_stderr_reader = BufReader::new(p.stderr.take().unwrap());
//stdout tracing thread
thread::spawn(move || {
log_lines(shard_stdout_reader.lines());
});
let mut ready = false;
let start_time = Instant::now();
let mut wait_time = Instant::now();
loop {
// Process exited
if let Some(exit_status) = p.try_wait().unwrap() {
// We read stderr in another thread as it seems that lines() can block in some cases
let (err_sender, err_receiver) = mpsc::channel();
thread::spawn(move || {
for line in shard_stderr_reader.lines().flatten() {
err_sender.send(line).unwrap_or(());
}
});
let mut err = String::new();
while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) {
err = err + "\n" + &line;
}
tracing::error!("Shard complete standard error output:\n{err}");
if let Some(signal) = exit_status.signal() {
tracing::error!("Shard process was signaled to shutdown with signal {signal}");
}
status_sender.send(ShardStatus::Failed(rank)).unwrap();
return;
}
// We received a shutdown signal
if shutdown.load(Ordering::SeqCst) {
p.kill().unwrap();
let _ = p.wait();
tracing::info!("Shard terminated");
return;
}
// Shard is ready
if uds.exists() && !ready {
tracing::info!("Shard ready in {:?}", start_time.elapsed());
status_sender.send(ShardStatus::Ready).unwrap();
ready = true;
} else if !ready && wait_time.elapsed() > Duration::from_secs(10) {
tracing::info!("Waiting for shard to be ready...");
wait_time = Instant::now();
}
sleep(Duration::from_millis(100));
}
}
fn shutdown_shards(shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>) {
tracing::info!("Shutting down shards");
// Update shutdown value to true
// This will be picked up by the shard manager
shutdown.store(true, Ordering::SeqCst);
// Wait for shards to shutdown
// This will block till all shutdown_sender are dropped
let _ = shutdown_receiver.recv();
}
fn num_cuda_devices() -> Option<usize> {
let devices = match env::var("CUDA_VISIBLE_DEVICES") {
Ok(devices) => devices,
Err(_) => env::var("NVIDIA_VISIBLE_DEVICES").ok()?,
};
let n_devices = devices.split(',').count();
Some(n_devices)
}
#[derive(Deserialize)]
#[serde(rename_all = "UPPERCASE")]
enum PythonLogLevelEnum {
Trace,
Debug,
Info,
Success,
Warning,
Error,
Critical,
}
#[derive(Deserialize)]
struct PythonLogLevel {
name: PythonLogLevelEnum,
}
#[derive(Deserialize)]
struct PythonLogRecord {
level: PythonLogLevel,
}
#[derive(Deserialize)]
struct PythonLogMessage {
text: String,
record: PythonLogRecord,
}
impl PythonLogMessage {
fn trace(&self) {
match self.record.level.name {
PythonLogLevelEnum::Trace => tracing::trace!("{}", self.text),
PythonLogLevelEnum::Debug => tracing::debug!("{}", self.text),
PythonLogLevelEnum::Info => tracing::info!("{}", self.text),
PythonLogLevelEnum::Success => tracing::info!("{}", self.text),
PythonLogLevelEnum::Warning => tracing::warn!("{}", self.text),
PythonLogLevelEnum::Error => tracing::error!("{}", self.text),
PythonLogLevelEnum::Critical => tracing::error!("{}", self.text),
}
}
}
impl TryFrom<&String> for PythonLogMessage {
type Error = serde_json::Error;
fn try_from(value: &String) -> Result<Self, Self::Error> {
serde_json::from_str::<Self>(value)
}
}
fn log_lines<S: Sized + BufRead>(lines: Lines<S>) {
for line in lines.flatten() {
match PythonLogMessage::try_from(&line) {
Ok(log) => log.trace(),
Err(_) => tracing::debug!("{line}"),
}
}
}
fn find_num_shards(
sharded: Option<bool>,
num_shard: Option<usize>,
) -> Result<usize, LauncherError> {
// get the number of shards given `sharded` and `num_shard`
let num_shard = match (sharded, num_shard) {
(Some(true), None) => {
// try to default to the number of available GPUs
tracing::info!("Parsing num_shard from CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES");
let n_devices = num_cuda_devices()
.expect("--num-shard and CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES are not set");
if n_devices <= 1 {
return Err(LauncherError::NotEnoughCUDADevices(format!(
"`sharded` is true but only found {n_devices} CUDA devices"
)));
}
n_devices
}
(Some(true), Some(num_shard)) => {
// we can't have only one shard while sharded
if num_shard <= 1 {
return Err(LauncherError::ArgumentValidation(
"`sharded` is true but `num_shard` <= 1".to_string(),
));
}
num_shard
}
(Some(false), Some(num_shard)) => num_shard,
(Some(false), None) => 1,
(None, None) => num_cuda_devices().unwrap_or(1),
(None, Some(num_shard)) => num_shard,
};
if num_shard < 1 {
return Err(LauncherError::ArgumentValidation(
"`num_shard` cannot be < 1".to_string(),
));
}
Ok(num_shard)
}
#[derive(Debug)]
enum LauncherError {
ArgumentValidation(String),
NotEnoughCUDADevices(String),
DownloadError,
ShardCannotStart,
ShardDisconnected,
ShardFailed,
WebserverFailed,
WebserverCannotStart,
}
fn download_convert_model(args: &Args, running: Arc<AtomicBool>) -> Result<(), LauncherError> {
// Enter download tracing span
let _span = tracing::span!(tracing::Level::INFO, "download").entered();
let mut download_args = vec![
"download-weights".to_string(),
args.model_id.to_string(),
"--extension".to_string(),
".safetensors".to_string(),
"--logger-level".to_string(),
"INFO".to_string(),
"--json-output".to_string(),
];
// Model optional revision
if let Some(revision) = &args.revision {
download_args.push("--revision".to_string());
download_args.push(revision.to_string())
}
// Trust remote code for automatic peft fusion
if args.trust_remote_code {
download_args.push("--trust-remote-code".to_string());
}
// Copy current process env
let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect();
// If huggingface_hub_cache is set, pass it to the download process
// Useful when running inside a docker container
if let Some(ref huggingface_hub_cache) = args.huggingface_hub_cache {
envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into()));
};
// Enable hf transfer for insane download speeds
let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string());
envs.push((
"HF_HUB_ENABLE_HF_TRANSFER".into(),
enable_hf_transfer.into(),
));
// Parse Inference API token
if let Ok(api_token) = env::var("HF_API_TOKEN") {
envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
};
// If args.weights_cache_override is some, pass it to the download process
// Useful when running inside a HuggingFace Inference Endpoint
if let Some(weights_cache_override) = &args.weights_cache_override {
envs.push((
"WEIGHTS_CACHE_OVERRIDE".into(),
weights_cache_override.into(),
));
};
// Start process
tracing::info!("Starting download process.");
let mut download_process = match Command::new("text-generation-server")
.args(download_args)
.envs(envs)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.process_group(0)
.spawn()
{
Ok(p) => p,
Err(err) => {
if err.kind() == io::ErrorKind::NotFound {
tracing::error!("text-generation-server not found in PATH");
tracing::error!("Please install it with `make install-server`")
} else {
tracing::error!("{}", err);
}
return Err(LauncherError::DownloadError);
}
};
// Redirect STDOUT to the console
let download_stdout = download_process.stdout.take().unwrap();
let stdout = BufReader::new(download_stdout);
thread::spawn(move || {
log_lines(stdout.lines());
});
loop {
if let Some(status) = download_process.try_wait().unwrap() {
if status.success() {
tracing::info!("Successfully downloaded weights.");
break;
}
let mut err = String::new();
download_process
.stderr
.take()
.unwrap()
.read_to_string(&mut err)
.unwrap();
if let Some(signal) = status.signal() {
tracing::error!(
"Download process was signaled to shutdown with signal {signal}: {err}"
);
} else {
tracing::error!("Download encountered an error: {err}");
}
return Err(LauncherError::DownloadError);
}
if !running.load(Ordering::SeqCst) {
terminate("download", download_process, Duration::from_secs(10)).unwrap();
return Ok(());
}
sleep(Duration::from_millis(100));
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn spawn_shards(
num_shard: usize,
args: &Args,
shutdown: Arc<AtomicBool>,
shutdown_receiver: &mpsc::Receiver<()>,
shutdown_sender: mpsc::Sender<()>,
status_receiver: &mpsc::Receiver<ShardStatus>,
status_sender: mpsc::Sender<ShardStatus>,
running: Arc<AtomicBool>,
) -> Result<(), LauncherError> {
// Start shard processes
for rank in 0..num_shard {
let model_id = args.model_id.clone();
let revision = args.revision.clone();
let uds_path = args.shard_uds_path.clone();
let master_addr = args.master_addr.clone();
let huggingface_hub_cache = args.huggingface_hub_cache.clone();
let weights_cache_override = args.weights_cache_override.clone();
let status_sender = status_sender.clone();
let shutdown = shutdown.clone();
let shutdown_sender = shutdown_sender.clone();
let otlp_endpoint = args.otlp_endpoint.clone();
let quantize = args.quantize;
let speculate = args.speculate;
let dtype = args.dtype;
let trust_remote_code = args.trust_remote_code;
let master_port = args.master_port;
let disable_custom_kernels = args.disable_custom_kernels;
let watermark_gamma = args.watermark_gamma;
let watermark_delta = args.watermark_delta;
let cuda_memory_fraction = args.cuda_memory_fraction;
let rope_scaling = args.rope_scaling;
let rope_factor = args.rope_factor;
thread::spawn(move || {
shard_manager(
model_id,
revision,
quantize,
speculate,
dtype,
trust_remote_code,
uds_path,
rank,
num_shard,
master_addr,
master_port,
huggingface_hub_cache,
weights_cache_override,
disable_custom_kernels,
watermark_gamma,
watermark_delta,
cuda_memory_fraction,
rope_scaling,
rope_factor,
otlp_endpoint,
status_sender,
shutdown,
shutdown_sender,
)
});
}
drop(shutdown_sender);
// Wait for shard to start
let mut shard_ready = 0;
while running.load(Ordering::SeqCst) {
match status_receiver.try_recv() {
Ok(ShardStatus::Ready) => {
shard_ready += 1;
if shard_ready == num_shard {
break;
}
}
Err(TryRecvError::Empty) => {
sleep(Duration::from_millis(100));
}
Ok(ShardStatus::Failed(rank)) => {
tracing::error!("Shard {rank} failed to start");
shutdown_shards(shutdown, shutdown_receiver);
return Err(LauncherError::ShardCannotStart);
}
Err(TryRecvError::Disconnected) => {
tracing::error!("Shard status channel disconnected");
shutdown_shards(shutdown, shutdown_receiver);
return Err(LauncherError::ShardDisconnected);
}
}
}
Ok(())
}
fn spawn_webserver(
args: Args,
shutdown: Arc<AtomicBool>,
shutdown_receiver: &mpsc::Receiver<()>,
) -> Result<Child, LauncherError> {
// All shard started
// Start webserver
tracing::info!("Starting Webserver");
let mut router_args = vec![
"--max-concurrent-requests".to_string(),
args.max_concurrent_requests.to_string(),
"--max-best-of".to_string(),
args.max_best_of.to_string(),
"--max-stop-sequences".to_string(),
args.max_stop_sequences.to_string(),
"--max-top-n-tokens".to_string(),
args.max_top_n_tokens.to_string(),
"--max-input-length".to_string(),
args.max_input_length.to_string(),
"--max-total-tokens".to_string(),
args.max_total_tokens.to_string(),
"--max-batch-prefill-tokens".to_string(),
args.max_batch_prefill_tokens.to_string(),
"--waiting-served-ratio".to_string(),
args.waiting_served_ratio.to_string(),
"--max-waiting-tokens".to_string(),
args.max_waiting_tokens.to_string(),
"--validation-workers".to_string(),
args.validation_workers.to_string(),
"--hostname".to_string(),
args.hostname.to_string(),
"--port".to_string(),
args.port.to_string(),
"--master-shard-uds-path".to_string(),
format!("{}-0", args.shard_uds_path),
"--tokenizer-name".to_string(),
args.model_id,
];
// Model optional max batch total tokens
if let Some(max_batch_total_tokens) = args.max_batch_total_tokens {
router_args.push("--max-batch-total-tokens".to_string());
router_args.push(max_batch_total_tokens.to_string());
}
// Model optional revision
if let Some(ref revision) = args.revision {
router_args.push("--revision".to_string());
router_args.push(revision.to_string())
}
if args.json_output {
router_args.push("--json-output".to_string());
}
// OpenTelemetry
if let Some(otlp_endpoint) = args.otlp_endpoint {
router_args.push("--otlp-endpoint".to_string());
router_args.push(otlp_endpoint);
}
// CORS origins
for origin in args.cors_allow_origin.into_iter() {
router_args.push("--cors-allow-origin".to_string());
router_args.push(origin);
}
// Ngrok
if args.ngrok {
router_args.push("--ngrok".to_string());
router_args.push("--ngrok-authtoken".to_string());
router_args.push(args.ngrok_authtoken.unwrap());
router_args.push("--ngrok-edge".to_string());
router_args.push(args.ngrok_edge.unwrap());
}
// Copy current process env
let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect();
// Parse Inference API token
if let Ok(api_token) = env::var("HF_API_TOKEN") {
envs.push(("HUGGING_FACE_HUB_TOKEN".into(), api_token.into()))
};
let mut webserver = match Command::new("text-generation-router")
.args(router_args)
.envs(envs)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.process_group(0)
.spawn()
{
Ok(p) => p,
Err(err) => {
tracing::error!("Failed to start webserver: {}", err);
if err.kind() == io::ErrorKind::NotFound {
tracing::error!("text-generation-router not found in PATH");
tracing::error!("Please install it with `make install-router`")
} else {
tracing::error!("{}", err);
}
shutdown_shards(shutdown, shutdown_receiver);
return Err(LauncherError::WebserverCannotStart);
}
};
// Redirect STDOUT and STDERR to the console
let webserver_stdout = webserver.stdout.take().unwrap();
let webserver_stderr = webserver.stderr.take().unwrap();
thread::spawn(move || {
let stdout = BufReader::new(webserver_stdout);
let stderr = BufReader::new(webserver_stderr);
for line in stdout.lines() {
println!("{}", line.unwrap());
}
for line in stderr.lines() {
println!("{}", line.unwrap());
}
});
Ok(webserver)
}
fn terminate(process_name: &str, mut process: Child, timeout: Duration) -> io::Result<ExitStatus> {
tracing::info!("Terminating {process_name}");
let terminate_time = Instant::now();
signal::kill(Pid::from_raw(process.id() as i32), Signal::SIGTERM).unwrap();
tracing::info!("Waiting for {process_name} to gracefully shutdown");
while terminate_time.elapsed() < timeout {
if let Some(status) = process.try_wait()? {
tracing::info!("{process_name} terminated");
return Ok(status);
}
sleep(Duration::from_millis(100));
}
tracing::info!("Killing {process_name}");
process.kill()?;
let exit_status = process.wait()?;
tracing::info!("{process_name} killed");
Ok(exit_status)
}
fn main() -> Result<(), LauncherError> {
// Pattern match configuration
let args: Args = Args::parse();
// Filter events with LOG_LEVEL
let env_filter =
EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info"));
if args.json_output {
tracing_subscriber::fmt()
.with_env_filter(env_filter)
.json()
.init();
} else {
tracing_subscriber::fmt()
.with_env_filter(env_filter)
.compact()
.init();
}
if args.env {
let env_runtime = env_runtime::Env::new();
tracing::info!("{}", env_runtime);
}
tracing::info!("{:?}", args);
// Validate args
if args.max_input_length >= args.max_total_tokens {
return Err(LauncherError::ArgumentValidation(
"`max_input_length` must be < `max_total_tokens`".to_string(),
));
}
if args.max_input_length as u32 > args.max_batch_prefill_tokens {
return Err(LauncherError::ArgumentValidation(format!(
"`max_batch_prefill_tokens` must be >= `max_input_length`. Given: {} and {}",
args.max_batch_prefill_tokens, args.max_input_length
)));
}
if args.validation_workers == 0 {
return Err(LauncherError::ArgumentValidation(
"`validation_workers` must be > 0".to_string(),
));
}
if args.trust_remote_code {
tracing::warn!(
"`trust_remote_code` is set. Trusting that model `{}` do not contain malicious code.",
args.model_id
);
}
let num_shard = find_num_shards(args.sharded, args.num_shard)?;
if num_shard > 1 {
tracing::info!("Sharding model on {num_shard} processes");
}
if let Some(ref max_batch_total_tokens) = args.max_batch_total_tokens {
if args.max_batch_prefill_tokens > *max_batch_total_tokens {
return Err(LauncherError::ArgumentValidation(format!(
"`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}",
args.max_batch_prefill_tokens, max_batch_total_tokens
)));
}
if args.max_total_tokens as u32 > *max_batch_total_tokens {
return Err(LauncherError::ArgumentValidation(format!(
"`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}",
args.max_total_tokens, max_batch_total_tokens
)));
}
}
if args.ngrok {
if args.ngrok_authtoken.is_none() {
return Err(LauncherError::ArgumentValidation(
"`ngrok-authtoken` must be set when using ngrok tunneling".to_string(),
));
}
if args.ngrok_edge.is_none() {
return Err(LauncherError::ArgumentValidation(
"`ngrok-edge` must be set when using ngrok tunneling".to_string(),
));
}
}
// Signal handler
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
ctrlc::set_handler(move || {
r.store(false, Ordering::SeqCst);
})
.expect("Error setting Ctrl-C handler");
// Download and convert model weights
download_convert_model(&args, running.clone())?;
if !running.load(Ordering::SeqCst) {
// Launcher was asked to stop
return Ok(());
}
// Shared shutdown bool
let shutdown = Arc::new(AtomicBool::new(false));
// Shared shutdown channel
// When shutting down, the main thread will wait for all senders to be dropped
let (shutdown_sender, shutdown_receiver) = mpsc::channel();
// Shared channel to track shard status
let (status_sender, status_receiver) = mpsc::channel();
spawn_shards(
num_shard,
&args,
shutdown.clone(),
&shutdown_receiver,
shutdown_sender,
&status_receiver,
status_sender,
running.clone(),
)?;
// We might have received a termination signal
if !running.load(Ordering::SeqCst) {
shutdown_shards(shutdown, &shutdown_receiver);
return Ok(());
}
let mut webserver =
spawn_webserver(args, shutdown.clone(), &shutdown_receiver).map_err(|err| {
shutdown_shards(shutdown.clone(), &shutdown_receiver);
err
})?;
// Default exit code
let mut exit_code = Ok(());
while running.load(Ordering::SeqCst) {
if let Ok(ShardStatus::Failed(rank)) = status_receiver.try_recv() {
tracing::error!("Shard {rank} crashed");
exit_code = Err(LauncherError::ShardFailed);
break;
};
match webserver.try_wait().unwrap() {
Some(_) => {
tracing::error!("Webserver Crashed");
shutdown_shards(shutdown, &shutdown_receiver);
return Err(LauncherError::WebserverFailed);
}
None => {
sleep(Duration::from_millis(100));
}
};
}
// Graceful termination
terminate("webserver", webserver, Duration::from_secs(90)).unwrap();
shutdown_shards(shutdown, &shutdown_receiver);
exit_code
}
| 0 |
hf_public_repos/text-generation-inference/launcher | hf_public_repos/text-generation-inference/launcher/src/env_runtime.rs | use std::fmt;
use std::process::Command;
pub(crate) struct Env {
cargo_target: &'static str,
cargo_version: &'static str,
git_sha: &'static str,
docker_label: &'static str,
nvidia_env: String,
}
impl Env {
pub fn new() -> Self {
let nvidia_env = nvidia_smi();
Self {
nvidia_env: nvidia_env.unwrap_or("N/A".to_string()),
cargo_target: env!("VERGEN_CARGO_TARGET_TRIPLE"),
cargo_version: env!("VERGEN_RUSTC_SEMVER"),
git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("N/A"),
docker_label: option_env!("DOCKER_LABEL").unwrap_or("N/A"),
}
}
}
impl fmt::Display for Env {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Runtime environment:")?;
writeln!(f, "Target: {}", self.cargo_target)?;
writeln!(f, "Cargo version: {}", self.cargo_version)?;
writeln!(f, "Commit sha: {}", self.git_sha)?;
writeln!(f, "Docker label: {}", self.docker_label)?;
write!(f, "nvidia-smi:\n{}", self.nvidia_env)?;
Ok(())
}
}
fn nvidia_smi() -> Option<String> {
let output = Command::new("nvidia-smi").output().ok()?;
let nvidia_smi = String::from_utf8(output.stdout).ok()?;
let output = nvidia_smi.replace('\n', "\n ");
Some(output.trim().to_string())
}
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/benchmark/README.md | <div align="center">
# Text Generation Inference benchmarking tool
![benchmark](../assets/benchmark.png)
</div>
A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha)
and powered by [tui](https://github.com/tui-rs-revival/ratatui).
## Install
```shell
make install-benchmark
```
## Run
First, start `text-generation-inference`:
```shell
text-generation-launcher --model-id bigscience/bloom-560m
```
Then run the benchmarking tool:
```shell
text-generation-benchmark --tokenizer-name bigscience/bloom-560m
``` | 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/benchmark/Cargo.toml | [package]
name = "text-generation-benchmark"
description = "Text Generation Benchmarking tool"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[lib]
path = "src/lib.rs"
[[bin]]
name = "text-generation-benchmark"
path = "src/main.rs"
[dependencies]
average = "0.14"
clap = { version = "4.4.5", features = ["derive", "env"] }
crossterm = "0.27"
float-ord = "0.3.2"
serde = {version = "1.0.188", features = ["derive"]}
serde_json = "1.0"
tabled = "0.14.0"
text-generation-client = { path = "../router/client" }
thiserror = "1.0.48"
tokenizers = { version = "0.14.0", features = ["http"] }
tokio = { version = "1.32.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync", "macros"] }
tui = {package = "ratatui", version = "0.23", default-features = false, features = ["crossterm"]}
tracing = "0.1.37"
tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] }
hf-hub = "0.3.1"
| 0 |
hf_public_repos/text-generation-inference/benchmark | hf_public_repos/text-generation-inference/benchmark/src/main.rs | /// Text Generation Inference benchmarking tool
///
/// Inspired by the great Oha app: https://github.com/hatoo/oha
/// and: https://github.com/orhun/rust-tui-template
use clap::Parser;
use std::path::Path;
use text_generation_client::ShardedClient;
use tokenizers::{FromPretrainedParameters, Tokenizer};
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::EnvFilter;
/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
/// The name of the tokenizer (as in model_id on the huggingface hub, or local path).
#[clap(short, long, env)]
tokenizer_name: String,
/// The revision to use for the tokenizer if on the hub.
#[clap(default_value = "main", long, env)]
revision: String,
/// The various batch sizes to benchmark for, the idea is to get enough
/// batching to start seeing increased latency, this usually means you're
/// moving from memory bound (usual as BS=1) to compute bound, and this is
/// a sweet spot for the maximum batch size for the model under test
#[clap(short, long)]
batch_size: Option<Vec<u32>>,
/// This is the initial prompt sent to the text-generation-server length
/// in token. Longer prompt will slow down the benchmark. Usually the
/// latency grows somewhat linearly with this for the prefill step.
///
/// Most importantly, the prefill step is usually not the one dominating
/// your runtime, so it's ok to keep it short.
#[clap(default_value = "10", short, long, env)]
sequence_length: u32,
/// This is how many tokens will be generated by the server and averaged out
/// to give the `decode` latency. This is the *critical* number you want to optimize for
/// LLM spend most of their time doing decoding.
///
/// Decode latency is usually quite stable.
#[clap(default_value = "8", short, long, env)]
decode_length: u32,
///How many runs should we average from
#[clap(default_value = "10", short, long, env)]
runs: usize,
/// Number of warmup cycles
#[clap(default_value = "1", short, long, env)]
warmups: usize,
/// The location of the grpc socket. This benchmark tool bypasses the router
/// completely and directly talks to the gRPC processes
#[clap(default_value = "/tmp/text-generation-server-0", short, long, env)]
master_shard_uds_path: String,
/// Generation parameter in case you want to specifically test/debug particular
/// decoding strategies, for full doc refer to the `text-generation-server`
#[clap(long, env)]
temperature: Option<f32>,
/// Generation parameter in case you want to specifically test/debug particular
/// decoding strategies, for full doc refer to the `text-generation-server`
#[clap(long, env)]
top_k: Option<u32>,
/// Generation parameter in case you want to specifically test/debug particular
/// decoding strategies, for full doc refer to the `text-generation-server`
#[clap(long, env)]
top_p: Option<f32>,
/// Generation parameter in case you want to specifically test/debug particular
/// decoding strategies, for full doc refer to the `text-generation-server`
#[clap(long, env)]
typical_p: Option<f32>,
/// Generation parameter in case you want to specifically test/debug particular
/// decoding strategies, for full doc refer to the `text-generation-server`
#[clap(long, env)]
repetition_penalty: Option<f32>,
/// Generation parameter in case you want to specifically test/debug particular
/// decoding strategies, for full doc refer to the `text-generation-server`
#[clap(long, env)]
watermark: bool,
/// Generation parameter in case you want to specifically test/debug particular
/// decoding strategies, for full doc refer to the `text-generation-server`
#[clap(long, env)]
do_sample: bool,
/// Generation parameter in case you want to specifically test/debug particular
/// decoding strategies, for full doc refer to the `text-generation-server`
#[clap(long, env)]
top_n_tokens: Option<u32>,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
init_logging();
// Get args
let args = Args::parse();
// Pattern match configuration
let Args {
tokenizer_name,
revision,
batch_size,
sequence_length,
decode_length,
runs,
warmups,
temperature,
top_k,
top_p,
typical_p,
repetition_penalty,
watermark,
do_sample,
master_shard_uds_path,
top_n_tokens,
} = args;
let batch_size = batch_size.unwrap_or(vec![1, 2, 4, 8, 16, 32]);
// Tokenizer instance
// This will only be used to validate payloads
tracing::info!("Loading tokenizer");
let local_path = Path::new(&tokenizer_name);
let tokenizer =
if local_path.exists() && local_path.is_dir() && local_path.join("tokenizer.json").exists()
{
// Load local tokenizer
tracing::info!("Found local tokenizer");
Tokenizer::from_file(local_path.join("tokenizer.json")).unwrap()
} else {
tracing::info!("Downloading tokenizer");
// Parse Huggingface hub token
let auth_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok();
// Download and instantiate tokenizer
// We need to download it outside of the Tokio runtime
let params = FromPretrainedParameters {
revision,
auth_token,
..Default::default()
};
Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap()
};
tracing::info!("Tokenizer loaded");
// Launch Tokio runtime
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
// Instantiate sharded client from the master unix socket
tracing::info!("Connect to model server");
let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path)
.await
.expect("Could not connect to server");
// Clear the cache; useful if the webserver rebooted
sharded_client
.clear_cache(None)
.await
.expect("Unable to clear cache");
tracing::info!("Connected");
// Run app
text_generation_benchmark::run(
tokenizer_name,
tokenizer,
batch_size,
sequence_length,
decode_length,
top_n_tokens,
runs,
warmups,
temperature,
top_k,
top_p,
typical_p,
repetition_penalty,
watermark,
do_sample,
sharded_client,
)
.await
.unwrap();
});
Ok(())
}
/// Init logging using LOG_LEVEL
fn init_logging() {
// STDOUT/STDERR layer
let fmt_layer = tracing_subscriber::fmt::layer()
.with_file(true)
.with_line_number(true);
// Filter events with LOG_LEVEL
let env_filter =
EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info"));
tracing_subscriber::registry()
.with(env_filter)
.with(fmt_layer)
.init();
}
| 0 |
hf_public_repos/text-generation-inference/benchmark | hf_public_repos/text-generation-inference/benchmark/src/table.rs | use crate::app::Data;
use tabled::settings::Merge;
use tabled::{builder::Builder, settings::Style, Table};
#[allow(clippy::too_many_arguments)]
pub(crate) fn parameters_table(
tokenizer_name: String,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
temperature: Option<f32>,
top_k: Option<u32>,
top_p: Option<f32>,
typical_p: Option<f32>,
repetition_penalty: Option<f32>,
watermark: bool,
do_sample: bool,
) -> Table {
let mut builder = Builder::default();
builder.set_header(["Parameter", "Value"]);
builder.push_record(["Model", &tokenizer_name]);
builder.push_record(["Sequence Length", &sequence_length.to_string()]);
builder.push_record(["Decode Length", &decode_length.to_string()]);
builder.push_record(["Top N Tokens", &format!("{top_n_tokens:?}")]);
builder.push_record(["N Runs", &n_runs.to_string()]);
builder.push_record(["Warmups", &warmups.to_string()]);
builder.push_record(["Temperature", &format!("{temperature:?}")]);
builder.push_record(["Top K", &format!("{top_k:?}")]);
builder.push_record(["Top P", &format!("{top_p:?}")]);
builder.push_record(["Typical P", &format!("{typical_p:?}")]);
builder.push_record(["Repetition Penalty", &format!("{repetition_penalty:?}")]);
builder.push_record(["Watermark", &watermark.to_string()]);
builder.push_record(["Do Sample", &do_sample.to_string()]);
let mut table = builder.build();
table.with(Style::markdown());
table
}
pub(crate) fn latency_table(data: &Data) -> Table {
let mut builder = Builder::default();
builder.set_header([
"Step",
"Batch Size",
"Average",
"Lowest",
"Highest",
"p50",
"p90",
"p99",
]);
add_latencies(
&mut builder,
"Prefill",
&data.batch_size,
&data.prefill_latencies,
);
add_latencies(
&mut builder,
"Decode (token)",
&data.batch_size,
&data.decode_token_latencies,
);
add_latencies(
&mut builder,
"Decode (total)",
&data.batch_size,
&data.decode_latencies,
);
let mut table = builder.build();
table.with(Style::markdown()).with(Merge::vertical());
table
}
pub(crate) fn throughput_table(data: &Data) -> Table {
let mut builder = Builder::default();
builder.set_header(["Step", "Batch Size", "Average", "Lowest", "Highest"]);
add_throuhgputs(
&mut builder,
"Prefill",
&data.batch_size,
&data.prefill_throughputs,
);
add_throuhgputs(
&mut builder,
"Decode",
&data.batch_size,
&data.decode_throughputs,
);
let mut table = builder.build();
table.with(Style::markdown()).with(Merge::vertical());
table
}
fn add_latencies(
builder: &mut Builder,
step: &'static str,
batch_size: &[u32],
batch_latencies: &[Vec<f64>],
) {
for (i, b) in batch_size.iter().enumerate() {
let latencies = &batch_latencies[i];
let (avg, min, max) = avg_min_max(latencies);
let row = [
step,
&b.to_string(),
&format_value(avg, "ms"),
&format_value(min, "ms"),
&format_value(max, "ms"),
&format_value(px(latencies, 50), "ms"),
&format_value(px(latencies, 90), "ms"),
&format_value(px(latencies, 99), "ms"),
];
builder.push_record(row);
}
}
fn add_throuhgputs(
builder: &mut Builder,
step: &'static str,
batch_size: &[u32],
batch_throughputs: &[Vec<f64>],
) {
for (i, b) in batch_size.iter().enumerate() {
let throughputs = &batch_throughputs[i];
let (avg, min, max) = avg_min_max(throughputs);
let row = [
step,
&b.to_string(),
&format_value(avg, "tokens/secs"),
&format_value(min, "tokens/secs"),
&format_value(max, "tokens/secs"),
];
builder.push_record(row);
}
}
fn avg_min_max(data: &Vec<f64>) -> (f64, f64, f64) {
let average = data.iter().sum::<f64>() / data.len() as f64;
let min = data
.iter()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN);
let max = data
.iter()
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN);
(average, *min, *max)
}
fn px(data: &Vec<f64>, p: u32) -> f64 {
let i = (f64::from(p) / 100.0 * data.len() as f64) as usize;
*data.get(i).unwrap_or(&std::f64::NAN)
}
fn format_value(value: f64, unit: &'static str) -> String {
format!("{:.2} {unit}", value)
}
| 0 |
hf_public_repos/text-generation-inference/benchmark | hf_public_repos/text-generation-inference/benchmark/src/lib.rs | mod app;
mod event;
mod generation;
mod table;
mod utils;
use crate::app::App;
use crate::event::Event;
use crossterm::ExecutableCommand;
use std::io;
use text_generation_client::{NextTokenChooserParameters, ShardedClient};
use tokenizers::Tokenizer;
use tokio::sync::{broadcast, mpsc};
use tui::backend::CrosstermBackend;
use tui::Terminal;
/// Run benchmarking app
#[allow(clippy::too_many_arguments)]
pub async fn run(
tokenizer_name: String,
tokenizer: Tokenizer,
batch_size: Vec<u32>,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
temperature: Option<f32>,
top_k: Option<u32>,
top_p: Option<f32>,
typical_p: Option<f32>,
repetition_penalty: Option<f32>,
watermark: bool,
do_sample: bool,
client: ShardedClient,
) -> Result<(), std::io::Error> {
let parameters = NextTokenChooserParameters {
temperature: temperature.unwrap_or(1.0),
top_k: top_k.unwrap_or(0),
top_p: top_p.unwrap_or(1.0),
typical_p: typical_p.unwrap_or(1.0),
do_sample,
seed: 0,
repetition_penalty: repetition_penalty.unwrap_or(1.0),
watermark,
};
// Initialize terminal properties
crossterm::terminal::enable_raw_mode()?;
io::stdout().execute(crossterm::terminal::EnterAlternateScreen)?;
io::stdout().execute(crossterm::cursor::Hide)?;
// Initialize terminal
let mut terminal = {
let backend = CrosstermBackend::new(io::stdout());
Terminal::new(backend)?
};
// Create message channel between generation_task and app
let (run_sender, run_receiver) = mpsc::channel(8);
// Crossterm event channel
let (event_sender, mut event_receiver) = mpsc::channel(8);
// Shutdown channel to terminate tasks
let (shutdown_sender, _) = broadcast::channel(1);
// Channel to check if tasks terminated
let (shutdown_guard_sender, mut shutdown_guard_receiver) = mpsc::channel(1);
// Create generation task
tokio::spawn(generation::generation_task(
tokenizer,
batch_size.clone(),
sequence_length,
decode_length,
top_n_tokens,
n_runs,
warmups,
parameters,
client,
run_sender,
shutdown_sender.subscribe(),
shutdown_guard_sender.clone(),
));
// Create event task
tokio::spawn(event::terminal_event_task(
250,
event_sender,
shutdown_sender.subscribe(),
shutdown_guard_sender.clone(),
));
// Drop our end of shutdown sender
drop(shutdown_guard_sender);
// Create App
let mut app = App::new(
run_receiver,
tokenizer_name.clone(),
sequence_length,
decode_length,
n_runs,
batch_size,
);
while app.running {
// Draw frame
terminal.draw(|frame| app.render(frame))?;
// Await a new event from event handling task
match event_receiver.recv().await {
None => break,
// Update app state
Some(event) => match event {
Event::Tick => app.tick(),
Event::Key(key_event) => app.handle_key_event(key_event),
_ => {}
},
}
}
// Ask tasks to shutdown
let _ = shutdown_sender.send(());
// Wait for tasks to shutdown
let _ = shutdown_guard_receiver.recv().await;
// Revert terminal to original view
io::stdout().execute(crossterm::terminal::LeaveAlternateScreen)?;
crossterm::terminal::disable_raw_mode()?;
io::stdout().execute(crossterm::cursor::Show)?;
let parameters_table = table::parameters_table(
tokenizer_name,
sequence_length,
decode_length,
top_n_tokens,
n_runs,
warmups,
temperature,
top_k,
top_p,
typical_p,
repetition_penalty,
watermark,
do_sample,
);
println!("\n{parameters_table}\n");
let latency_table = table::latency_table(&app.data);
println!("\n{latency_table}\n");
let throughput_table = table::throughput_table(&app.data);
println!("\n{throughput_table}\n");
Ok(())
}
| 0 |
hf_public_repos/text-generation-inference/benchmark | hf_public_repos/text-generation-inference/benchmark/src/event.rs | /// Inspired by https://github.com/orhun/rust-tui-template/blob/472aa515119d4c94903eac12d9784417281dc7f5/src/event.rs
use crossterm::event;
use std::time::{Duration, Instant};
use tokio::sync::{broadcast, mpsc};
/// Events
#[derive(Debug)]
pub(crate) enum Event {
/// Terminal tick.
Tick,
/// Key press.
Key(event::KeyEvent),
/// Terminal resize.
Resize(u16, u16),
}
pub(crate) async fn terminal_event_task(
fps: u32,
event_sender: mpsc::Sender<Event>,
mut shutdown_receiver: broadcast::Receiver<()>,
_shutdown_guard_sender: mpsc::Sender<()>,
) {
// End task if a message is received on shutdown_receiver
// _shutdown_guard_sender will be dropped once the task is finished
tokio::select! {
_ = event_loop(fps, event_sender) => {
},
_ = shutdown_receiver.recv() => {}
}
}
/// Main event loop
async fn event_loop(fps: u32, event_sender: mpsc::Sender<Event>) {
// Frame budget
let per_frame = Duration::from_secs(1) / fps;
// When was last frame executed
let mut last_frame = Instant::now();
loop {
// Sleep to avoid blocking the thread for too long
if let Some(sleep) = per_frame.checked_sub(last_frame.elapsed()) {
tokio::time::sleep(sleep).await;
}
// Get crossterm event and send a new one over the channel
if event::poll(Duration::from_secs(0)).expect("no events available") {
match event::read().expect("unable to read event") {
event::Event::Key(e) => event_sender.send(Event::Key(e)).await.unwrap_or(()),
event::Event::Resize(w, h) => {
event_sender.send(Event::Resize(w, h)).await.unwrap_or(())
}
_ => (),
}
}
// Frame budget exceeded
if last_frame.elapsed() >= per_frame {
// Send tick
event_sender.send(Event::Tick).await.unwrap_or(());
// Rest last_frame time
last_frame = Instant::now();
}
}
}
| 0 |
hf_public_repos/text-generation-inference/benchmark | hf_public_repos/text-generation-inference/benchmark/src/utils.rs | /// MIT License
//
// Copyright (c) 2020 hatoo
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
use std::collections::BTreeMap;
pub(crate) fn histogram(values: &[f64], bins: usize) -> Vec<(f64, usize)> {
assert!(bins >= 2);
let mut bucket: Vec<usize> = vec![0; bins];
let min = values.iter().collect::<average::Min>().min();
let max = values.iter().collect::<average::Max>().max();
let step = (max - min) / (bins - 1) as f64;
for &v in values {
let i = std::cmp::min(((v - min) / step).ceil() as usize, bins - 1);
bucket[i] += 1;
}
bucket
.into_iter()
.enumerate()
.map(|(i, v)| (min + step * i as f64, v))
.collect()
}
pub(crate) fn percentiles(values: &[f64], pecents: &[i32]) -> BTreeMap<String, f64> {
pecents
.iter()
.map(|&p| {
let i = (f64::from(p) / 100.0 * values.len() as f64) as usize;
(format!("p{p}"), *values.get(i).unwrap_or(&std::f64::NAN))
})
.collect()
}
| 0 |
hf_public_repos/text-generation-inference/benchmark | hf_public_repos/text-generation-inference/benchmark/src/app.rs | /// Inspired by https://github.com/hatoo/oha/blob/bb989ea3cd77727e7743e7daa60a19894bb5e901/src/monitor.rs
use crate::generation::{Decode, Message, Prefill};
use crossterm::event::{KeyCode, KeyEvent, KeyModifiers};
use text_generation_client::ClientError;
use tokio::sync::mpsc;
use tui::backend::Backend;
use tui::layout::{Alignment, Constraint, Direction, Layout};
use tui::style::{Color, Modifier, Style};
use tui::text::{Line, Span};
use tui::widgets::{
Axis, BarChart, Block, Borders, Chart, Dataset, Gauge, GraphType, Paragraph, Tabs,
};
use tui::{symbols, Frame};
/// TUI powered App
pub(crate) struct App {
pub(crate) running: bool,
pub(crate) data: Data,
completed_runs: Vec<usize>,
completed_batch: usize,
current_batch: usize,
current_tab: usize,
touched_tab: bool,
zoom: bool,
is_error: bool,
tokenizer_name: String,
sequence_length: u32,
decode_length: u32,
n_run: usize,
receiver: mpsc::Receiver<Result<Message, ClientError>>,
}
impl App {
pub(crate) fn new(
receiver: mpsc::Receiver<Result<Message, ClientError>>,
tokenizer_name: String,
sequence_length: u32,
decode_length: u32,
n_run: usize,
batch_size: Vec<u32>,
) -> Self {
let current_tab = 0;
let completed_runs: Vec<usize> = (0..batch_size.len()).map(|_| 0).collect();
let completed_batch = 0;
let current_batch = 0;
let is_error = false;
let data = Data::new(n_run, batch_size);
Self {
running: true,
data,
completed_runs,
completed_batch,
current_batch,
current_tab,
touched_tab: false,
zoom: false,
is_error,
tokenizer_name,
sequence_length,
decode_length,
n_run,
receiver,
}
}
/// Handle crossterm key events
pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) {
match key_event {
// Increase and wrap tab
KeyEvent {
code: KeyCode::Right,
..
}
| KeyEvent {
code: KeyCode::Tab, ..
} => {
self.touched_tab = true;
self.current_tab = (self.current_tab + 1) % self.data.batch_size.len();
}
// Decrease and wrap tab
KeyEvent {
code: KeyCode::Left,
..
} => {
self.touched_tab = true;
if self.current_tab > 0 {
self.current_tab -= 1;
} else {
self.current_tab = self.data.batch_size.len() - 1;
}
}
// Zoom on throughput/latency fig
KeyEvent {
code: KeyCode::Char('+'),
..
} => {
self.zoom = true;
}
// Unzoom on throughput/latency fig
KeyEvent {
code: KeyCode::Char('-'),
..
} => {
self.zoom = false;
}
// Quit
KeyEvent {
code: KeyCode::Char('q'),
..
}
| KeyEvent {
code: KeyCode::Char('c'),
modifiers: KeyModifiers::CONTROL,
..
} => {
self.running = false;
}
_ => (),
}
}
/// Get all pending messages from generation task
pub(crate) fn tick(&mut self) {
while let Ok(message) = self.receiver.try_recv() {
match message {
Ok(message) => match message {
Message::Prefill(step) => self.data.push_prefill(step, self.current_batch),
Message::Decode(step) => self.data.push_decode(step, self.current_batch),
Message::EndRun => {
self.completed_runs[self.current_batch] += 1;
}
Message::EndBatch => {
self.data.end_batch(self.current_batch);
self.completed_batch += 1;
if self.current_batch < self.data.batch_size.len() - 1 {
// Only go to next tab if the user never touched the tab keys
if !self.touched_tab {
self.current_tab += 1;
}
self.current_batch += 1;
}
}
Message::Warmup => {}
},
Err(_) => self.is_error = true,
}
}
}
/// Render frame
pub fn render<B: Backend>(&mut self, f: &mut Frame<'_, B>) {
let batch_progress =
(self.completed_batch as f64 / self.data.batch_size.len() as f64).clamp(0.0, 1.0);
let run_progress =
(self.completed_runs[self.current_batch] as f64 / self.n_run as f64).clamp(0.0, 1.0);
// Vertical layout
let row5 = Layout::default()
.direction(Direction::Vertical)
.constraints(
[
Constraint::Length(1),
Constraint::Length(3),
Constraint::Length(3),
Constraint::Length(13),
Constraint::Min(10),
]
.as_ref(),
)
.split(f.size());
// Top row horizontal layout
let top = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(row5[2]);
// Mid row horizontal layout
let mid = Layout::default()
.direction(Direction::Horizontal)
.constraints(
[
Constraint::Percentage(25),
Constraint::Percentage(25),
Constraint::Percentage(25),
Constraint::Percentage(25),
]
.as_ref(),
)
.split(row5[3]);
// Left mid row vertical layout
let prefill_text = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(8), Constraint::Length(5)].as_ref())
.split(mid[0]);
// Right mid row vertical layout
let decode_text = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Length(8), Constraint::Length(5)].as_ref())
.split(mid[2]);
let decode_text_latency = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(decode_text[0]);
// Bottom row horizontal layout
let bottom = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(row5[4]);
// Title
let title = Block::default()
.borders(Borders::NONE)
.title(format!(
"Model: {} | Sequence Length: {} | Decode Length: {}",
self.tokenizer_name, self.sequence_length, self.decode_length
))
.style(
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::White),
);
f.render_widget(title, row5[0]);
// Helper
let helper = Block::default()
.borders(Borders::NONE)
.title("<- | tab | ->: change batch tab | q / CTRL + c: quit | +/-: zoom")
.title_alignment(Alignment::Right)
.style(Style::default().fg(Color::White));
f.render_widget(helper, row5[0]);
// Batch tabs
let titles = self
.data
.batch_size
.iter()
.map(|b| {
Line::from(vec![Span::styled(
format!("Batch: {b}"),
Style::default().fg(Color::White),
)])
})
.collect();
let tabs = Tabs::new(titles)
.block(Block::default().borders(Borders::ALL).title("Tabs"))
.select(self.current_tab)
.style(Style::default().fg(Color::LightCyan))
.highlight_style(
Style::default()
.add_modifier(Modifier::BOLD)
.bg(Color::Black),
);
f.render_widget(tabs, row5[1]);
// Total progress bar
let color = if self.is_error {
Color::Red
} else {
Color::LightGreen
};
let batch_gauge = progress_gauge(
"Total Progress",
format!("{} / {}", self.completed_batch, self.data.batch_size.len()),
batch_progress,
color,
);
f.render_widget(batch_gauge, top[0]);
// Batch progress Bar
let color = if self.is_error {
Color::Red
} else {
Color::LightBlue
};
let run_gauge = progress_gauge(
"Batch Progress",
format!(
"{} / {}",
self.completed_runs[self.current_batch], self.n_run
),
run_progress,
color,
);
f.render_widget(run_gauge, top[1]);
// Prefill text infos
let prefill_latency_block = latency_paragraph(
&mut self.data.prefill_latencies[self.current_tab],
"Prefill",
);
let prefill_throughput_block =
throughput_paragraph(&self.data.prefill_throughputs[self.current_tab], "Prefill");
f.render_widget(prefill_latency_block, prefill_text[0]);
f.render_widget(prefill_throughput_block, prefill_text[1]);
// Prefill latency histogram
let histo_width = 7;
let bins = if mid[1].width < 2 {
0
} else {
(mid[1].width as usize - 2) / (histo_width + 1)
}
.max(2);
let histo_data =
latency_histogram_data(&self.data.prefill_latencies[self.current_tab], bins);
let histo_data_str: Vec<(&str, u64)> =
histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect();
let prefill_histogram =
latency_histogram(&histo_data_str, "Prefill").bar_width(histo_width as u16);
f.render_widget(prefill_histogram, mid[1]);
// Decode text info
let decode_latency_block = latency_paragraph(
&mut self.data.decode_latencies[self.current_tab],
"Decode Total",
);
let decode_token_latency_block = latency_paragraph(
&mut self.data.decode_token_latencies[self.current_tab],
"Decode Token",
);
let decode_throughput_block =
throughput_paragraph(&self.data.decode_throughputs[self.current_tab], "Decode");
f.render_widget(decode_latency_block, decode_text_latency[0]);
f.render_widget(decode_token_latency_block, decode_text_latency[1]);
f.render_widget(decode_throughput_block, decode_text[1]);
// Decode latency histogram
let histo_data =
latency_histogram_data(&self.data.decode_latencies[self.current_tab], bins);
let histo_data_str: Vec<(&str, u64)> =
histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect();
let decode_histogram =
latency_histogram(&histo_data_str, "Decode").bar_width(histo_width as u16);
f.render_widget(decode_histogram, mid[3]);
// Prefill latency/throughput chart
let prefill_latency_throughput_chart = latency_throughput_chart(
&self.data.prefill_batch_latency_throughput,
&self.data.batch_size,
self.zoom,
"Prefill",
);
f.render_widget(prefill_latency_throughput_chart, bottom[0]);
// Decode latency/throughput chart
let decode_latency_throughput_chart = latency_throughput_chart(
&self.data.decode_batch_latency_throughput,
&self.data.batch_size,
self.zoom,
"Decode",
);
f.render_widget(decode_latency_throughput_chart, bottom[1]);
}
}
/// App internal data struct
pub(crate) struct Data {
pub(crate) batch_size: Vec<u32>,
pub(crate) prefill_latencies: Vec<Vec<f64>>,
pub(crate) prefill_throughputs: Vec<Vec<f64>>,
pub(crate) decode_latencies: Vec<Vec<f64>>,
pub(crate) decode_token_latencies: Vec<Vec<f64>>,
pub(crate) decode_throughputs: Vec<Vec<f64>>,
pub(crate) prefill_batch_latency_throughput: Vec<(f64, f64)>,
pub(crate) decode_batch_latency_throughput: Vec<(f64, f64)>,
}
impl Data {
fn new(n_run: usize, batch_size: Vec<u32>) -> Self {
let prefill_latencies: Vec<Vec<f64>> = (0..batch_size.len())
.map(|_| Vec::with_capacity(n_run))
.collect();
let prefill_throughputs: Vec<Vec<f64>> = prefill_latencies.clone();
let decode_latencies: Vec<Vec<f64>> = prefill_latencies.clone();
let decode_token_latencies: Vec<Vec<f64>> = decode_latencies.clone();
let decode_throughputs: Vec<Vec<f64>> = prefill_throughputs.clone();
let prefill_batch_latency_throughput: Vec<(f64, f64)> =
Vec::with_capacity(batch_size.len());
let decode_batch_latency_throughput: Vec<(f64, f64)> =
prefill_batch_latency_throughput.clone();
Self {
batch_size,
prefill_latencies,
prefill_throughputs,
decode_latencies,
decode_token_latencies,
decode_throughputs,
prefill_batch_latency_throughput,
decode_batch_latency_throughput,
}
}
fn push_prefill(&mut self, prefill: Prefill, batch_idx: usize) {
let latency = prefill.latency.as_micros() as f64 / 1000.0;
self.prefill_latencies[batch_idx].push(latency);
self.prefill_throughputs[batch_idx].push(prefill.throughput);
}
fn push_decode(&mut self, decode: Decode, batch_idx: usize) {
let latency = decode.latency.as_micros() as f64 / 1000.0;
let token_latency = decode.token_latency.as_micros() as f64 / 1000.0;
self.decode_latencies[batch_idx].push(latency);
self.decode_token_latencies[batch_idx].push(token_latency);
self.decode_throughputs[batch_idx].push(decode.throughput);
}
fn end_batch(&mut self, batch_idx: usize) {
self.prefill_batch_latency_throughput.push((
self.prefill_latencies[batch_idx].iter().sum::<f64>()
/ self.prefill_latencies[batch_idx].len() as f64,
self.prefill_throughputs[batch_idx].iter().sum::<f64>()
/ self.prefill_throughputs[batch_idx].len() as f64,
));
self.decode_batch_latency_throughput.push((
self.decode_latencies[batch_idx].iter().sum::<f64>()
/ self.decode_latencies[batch_idx].len() as f64,
self.decode_throughputs[batch_idx].iter().sum::<f64>()
/ self.decode_throughputs[batch_idx].len() as f64,
));
}
}
/// Progress bar
fn progress_gauge(title: &str, label: String, progress: f64, color: Color) -> Gauge {
Gauge::default()
.block(Block::default().title(title).borders(Borders::ALL))
.gauge_style(Style::default().fg(color))
.label(Span::raw(label))
.ratio(progress)
}
/// Throughput paragraph
fn throughput_paragraph<'a>(throughput: &Vec<f64>, name: &'static str) -> Paragraph<'a> {
// Throughput average/high/low texts
let throughput_texts = statis_spans(throughput, "tokens/secs");
// Throughput block
Paragraph::new(throughput_texts).block(
Block::default()
.title(Span::raw(format!("{name} Throughput")))
.borders(Borders::ALL),
)
}
/// Latency paragraph
fn latency_paragraph<'a>(latency: &mut Vec<f64>, name: &'static str) -> Paragraph<'a> {
// Latency average/high/low texts
let mut latency_texts = statis_spans(latency, "ms");
// Sort latency for percentiles
float_ord::sort(latency);
let latency_percentiles = crate::utils::percentiles(latency, &[50, 90, 99]);
// Latency p50/p90/p99 texts
let colors = vec![Color::LightGreen, Color::LightYellow, Color::LightRed];
for (i, (name, value)) in latency_percentiles.iter().enumerate() {
let span = Line::from(vec![Span::styled(
format!("{name}: {value:.2} ms"),
Style::default().fg(colors[i]),
)]);
latency_texts.push(span);
}
Paragraph::new(latency_texts).block(
Block::default()
.title(Span::raw(format!("{name} Latency")))
.borders(Borders::ALL),
)
}
/// Average/High/Low spans
fn statis_spans<'a>(data: &Vec<f64>, unit: &'static str) -> Vec<Line<'a>> {
vec![
Line::from(vec![Span::styled(
format!(
"Average: {:.2} {unit}",
data.iter().sum::<f64>() / data.len() as f64
),
Style::default().fg(Color::LightBlue),
)]),
Line::from(vec![Span::styled(
format!(
"Lowest: {:.2} {unit}",
data.iter()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN)
),
Style::default().fg(Color::Reset),
)]),
Line::from(vec![Span::styled(
format!(
"Highest: {:.2} {unit}",
data.iter()
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN)
),
Style::default().fg(Color::Reset),
)]),
]
}
/// Latency histogram data
fn latency_histogram_data(latency: &[f64], bins: usize) -> Vec<(String, u64)> {
let histo_data: Vec<(String, u64)> = {
let histo = crate::utils::histogram(latency, bins);
histo
.into_iter()
.map(|(label, v)| (format!("{label:.2}"), v as u64))
.collect()
};
histo_data
}
/// Latency Histogram
fn latency_histogram<'a>(
histo_data_str: &'a Vec<(&'a str, u64)>,
name: &'static str,
) -> BarChart<'a> {
BarChart::default()
.block(
Block::default()
.title(format!("{name} latency histogram"))
.style(Style::default().fg(Color::LightYellow).bg(Color::Reset))
.borders(Borders::ALL),
)
.data(histo_data_str.as_slice())
}
/// Latency/Throughput chart
fn latency_throughput_chart<'a>(
latency_throughput: &'a Vec<(f64, f64)>,
batch_sizes: &'a [u32],
zoom: bool,
name: &'static str,
) -> Chart<'a> {
let latency_iter = latency_throughput.iter().map(|(l, _)| l);
let throughput_iter = latency_throughput.iter().map(|(_, t)| t);
// Get extreme values
let min_latency: f64 = *latency_iter
.clone()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN);
let max_latency: f64 = *latency_iter
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN);
let min_throughput: f64 = *throughput_iter
.clone()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN);
let max_throughput: f64 = *throughput_iter
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN);
// Char min max values
let min_x = if zoom {
((min_latency - 0.05 * min_latency) / 100.0).floor() * 100.0
} else {
0.0
};
let max_x = ((max_latency + 0.05 * max_latency) / 100.0).ceil() * 100.0;
let step_x = (max_x - min_x) / 4.0;
// Chart min max values
let min_y = if zoom {
((min_throughput - 0.05 * min_throughput) / 100.0).floor() * 100.0
} else {
0.0
};
let max_y = ((max_throughput + 0.05 * max_throughput) / 100.0).ceil() * 100.0;
let step_y = (max_y - min_y) / 4.0;
// Labels
let mut x_labels = vec![Span::styled(
format!("{min_x:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
)];
for i in 0..3 {
x_labels.push(Span::styled(
format!("{:.2}", min_x + ((i + 1) as f64 * step_x)),
Style::default().fg(Color::Gray).bg(Color::Reset),
));
}
x_labels.push(Span::styled(
format!("{max_x:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
));
// Labels
let mut y_labels = vec![Span::styled(
format!("{min_y:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
)];
for i in 0..3 {
y_labels.push(Span::styled(
format!("{:.2}", min_y + ((i + 1) as f64 * step_y)),
Style::default().fg(Color::Gray).bg(Color::Reset),
));
}
y_labels.push(Span::styled(
format!("{max_y:.2}"),
Style::default()
.add_modifier(Modifier::BOLD)
.fg(Color::Gray)
.bg(Color::Reset),
));
// Chart dataset
let colors = color_vec();
let datasets: Vec<Dataset> = (0..latency_throughput.len())
.map(|i| {
let color_idx = i % colors.len();
Dataset::default()
.name(batch_sizes[i].to_string())
.marker(symbols::Marker::Block)
.style(Style::default().fg(colors[color_idx]))
.graph_type(GraphType::Scatter)
.data(&latency_throughput[i..(i + 1)])
})
.collect();
// Chart
Chart::new(datasets)
.style(Style::default().fg(Color::Cyan).bg(Color::Reset))
.block(
Block::default()
.title(Span::styled(
format!("{name} throughput over latency"),
Style::default().fg(Color::Gray).bg(Color::Reset),
))
.borders(Borders::ALL),
)
.x_axis(
Axis::default()
.title("ms")
.style(Style::default().fg(Color::Gray).bg(Color::Reset))
.labels(x_labels)
.bounds([min_x, max_x]),
)
.y_axis(
Axis::default()
.title("tokens/secs")
.style(Style::default().fg(Color::Gray).bg(Color::Reset))
.labels(y_labels)
.bounds([min_y, max_y]),
)
}
// Colors for latency/throughput chart
fn color_vec() -> Vec<Color> {
vec![
Color::Red,
Color::Green,
Color::Yellow,
Color::Blue,
Color::Magenta,
Color::Cyan,
Color::Gray,
Color::DarkGray,
Color::LightRed,
Color::LightGreen,
Color::LightYellow,
Color::LightBlue,
Color::LightMagenta,
Color::LightCyan,
]
}
| 0 |
hf_public_repos/text-generation-inference/benchmark | hf_public_repos/text-generation-inference/benchmark/src/generation.rs | use std::time::{Duration, Instant};
use text_generation_client::{
Batch, CachedBatch, ClientError, NextTokenChooserParameters, Request, ShardedClient,
StoppingCriteriaParameters,
};
use tokenizers::{Tokenizer, TruncationDirection};
use tokio::sync::{broadcast, mpsc};
const LOREM_IPSUM: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";
#[derive(Debug, Clone)]
pub(crate) struct Prefill {
pub(crate) latency: Duration,
pub(crate) throughput: f64,
}
#[derive(Debug, Clone)]
pub(crate) struct Decode {
pub(crate) latency: Duration,
pub(crate) token_latency: Duration,
pub(crate) throughput: f64,
}
#[derive(Debug)]
pub(crate) enum Message {
Warmup,
Prefill(Prefill),
Decode(Decode),
EndRun,
EndBatch,
}
/// Benchmarking task
#[allow(clippy::too_many_arguments)]
pub(crate) async fn generation_task(
tokenizer: Tokenizer,
batch_size: Vec<u32>,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
parameters: NextTokenChooserParameters,
client: ShardedClient,
run_sender: mpsc::Sender<Result<Message, ClientError>>,
mut shutdown_receiver: broadcast::Receiver<()>,
_shutdown_guard_sender: mpsc::Sender<()>,
) {
// End task if a message is received on shutdown_receiver
// _shutdown_guard_sender will be dropped once the task is finished
tokio::select! {
res = generate_runs(tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender.clone()) => {
if let Err(err) = res {
run_sender.send(Err(err)).await.unwrap_or(());
}
},
_ = shutdown_receiver.recv() => {}
}
}
/// Benchmark prefill/decode
#[allow(clippy::too_many_arguments)]
async fn generate_runs(
tokenizer: Tokenizer,
batch_size: Vec<u32>,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
parameters: NextTokenChooserParameters,
mut client: ShardedClient,
run_sender: mpsc::Sender<Result<Message, ClientError>>,
) -> Result<(), ClientError> {
// Create a dummy sequence
let sequence = create_sequence(sequence_length, tokenizer);
for b in batch_size {
// Warmups on batch size
for _ in 0..warmups {
let (_, decode_batch) = prefill(
sequence.clone(),
sequence_length,
b,
decode_length,
parameters.clone(),
top_n_tokens,
&mut client,
)
.await?;
let _ = decode(decode_batch, &mut client).await?;
// Send warmup message
run_sender.send(Ok(Message::Warmup)).await.unwrap_or(());
}
for _ in 0..n_runs {
let (prefill, decode_batch) = prefill(
sequence.clone(),
sequence_length,
b,
decode_length,
parameters.clone(),
top_n_tokens,
&mut client,
)
.await?;
// Send prefill message
run_sender
.send(Ok(Message::Prefill(prefill)))
.await
.unwrap_or(());
let decode = decode(decode_batch, &mut client).await?;
// Send decode message
run_sender
.send(Ok(Message::Decode(decode)))
.await
.unwrap_or(());
// Send run ended message
run_sender.send(Ok(Message::EndRun)).await.unwrap_or(());
}
// Batch ended
run_sender.send(Ok(Message::EndBatch)).await.unwrap_or(());
}
Ok(())
}
// Run a prefill step
async fn prefill(
sequence: String,
sequence_length: u32,
batch_size: u32,
decode_length: u32,
parameters: NextTokenChooserParameters,
top_n_tokens: Option<u32>,
client: &mut ShardedClient,
) -> Result<(Prefill, CachedBatch), ClientError> {
// Create requests
let requests = (0..batch_size)
.map(|id| Request {
id: id.into(),
prefill_logprobs: false,
inputs: sequence.clone(),
truncate: sequence_length,
parameters: Some(parameters.clone()),
stopping_parameters: Some(StoppingCriteriaParameters {
max_new_tokens: decode_length,
stop_sequences: vec![],
ignore_eos_token: true, // Will not stop even if a eos token is generated
}),
top_n_tokens: top_n_tokens.unwrap_or(0),
})
.collect();
let batch = Batch {
id: 0,
requests,
size: batch_size,
max_tokens: batch_size * (sequence_length + decode_length),
};
// Run prefill
let start_time = Instant::now();
let (_, decode_batch, _) = client.prefill(batch.clone()).await?;
// Get latency
let latency = start_time.elapsed();
// Compute throughput from latency and batch size
let throughput = batch_size as f64 / latency.as_secs_f64();
// Decode batch cannot be empty
let decode_batch = decode_batch.expect("decode_batch is None. This is a bug.");
let step = Prefill {
latency,
throughput,
};
Ok((step, decode_batch))
}
/// Run a full decode
async fn decode(batch: CachedBatch, client: &mut ShardedClient) -> Result<Decode, ClientError> {
let mut decode_length = 0;
let batch_size = batch.size;
let start_time = Instant::now();
// Full decode over decode length
let mut next_batch = Some(batch);
while let Some(batch) = next_batch {
let result = client.decode(vec![batch]).await?;
next_batch = result.1;
decode_length += 1;
}
// Get latency
let latency = start_time.elapsed();
let token_latency = latency / decode_length;
// Compute throughput from latency, batch size and decode length
let throughput = (batch_size * decode_length) as f64 / latency.as_secs_f64();
let step = Decode {
latency,
token_latency,
throughput,
};
Ok(step)
}
/// Create a dummy sequence of the correct length
fn create_sequence(sequence_length: u32, tokenizer: Tokenizer) -> String {
let lorem_ipsum_length = tokenizer.encode(LOREM_IPSUM, true).unwrap().len();
// Repeat lorem ipsum to cover sequence length
let string_sequence =
LOREM_IPSUM.repeat((0..sequence_length).step_by(lorem_ipsum_length).len());
// Encode sequence
let mut encoding = tokenizer.encode(string_sequence, true).unwrap();
// Truncate to sequence_length
encoding.truncate(sequence_length as usize, 0, TruncationDirection::Left);
// Decode
tokenizer.decode(encoding.get_ids(), false).unwrap()
}
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/proto/generate.proto | syntax = "proto3";
package generate.v2;
service TextGenerationService {
/// Model Info
rpc Info (InfoRequest) returns (InfoResponse) {}
/// Service discovery
rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {}
/// Empties batch cache
rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse);
/// Remove requests from a cached batch
rpc FilterBatch (FilterBatchRequest) returns (FilterBatchResponse);
/// Warmup the model and compute max cache size
rpc Warmup (WarmupRequest) returns (WarmupResponse);
/// Prefill batch and decode first token
rpc Prefill (PrefillRequest) returns (PrefillResponse);
/// Decode token for a list of prefilled batches
rpc Decode (DecodeRequest) returns (DecodeResponse);
/// Health check
rpc Health (HealthRequest) returns (HealthResponse);
}
message HealthRequest {}
message HealthResponse {}
/// Empty request
message InfoRequest {}
message InfoResponse {
bool requires_padding = 1;
string dtype = 2;
string device_type = 3;
optional uint32 window_size = 4;
uint32 speculate = 5;
}
/// Empty request
message ServiceDiscoveryRequest {}
message ServiceDiscoveryResponse {
/// Other shards urls
repeated string urls = 1;
}
message ClearCacheRequest {
/// Optional batch id
optional uint64 id = 1;
}
/// Empty response
message ClearCacheResponse {}
message NextTokenChooserParameters {
/// exponential scaling output probability distribution
float temperature = 1;
/// restricting to the k highest probability elements
uint32 top_k = 2;
/// restricting to top tokens summing to prob_cut_off <= prob_cut_off
float top_p = 3;
/// restricting to top tokens summing to prob_cut_off <= prob_cut_off
float typical_p = 4;
/// apply sampling on the logits
bool do_sample = 5;
/// random seed for sampling
uint64 seed = 6;
/// repetition penalty
float repetition_penalty = 7;
/// token watermarking using "A Watermark for Large Language Models"
bool watermark = 8;
}
message StoppingCriteriaParameters {
/// Maximum number of generated tokens
uint32 max_new_tokens = 1;
/// Optional stopping sequences
repeated string stop_sequences = 2;
/// Ignore end of sequence token
/// used for benchmarking
bool ignore_eos_token = 3;
}
message Request {
/// Request ID
uint64 id = 1;
/// The generation context
string inputs = 2;
/// Context truncation
uint32 truncate = 3;
/// Next Token Chooser Parameters
NextTokenChooserParameters parameters = 4;
/// Stopping Criteria Parameters
StoppingCriteriaParameters stopping_parameters = 5;
/// Return prefill logprobs
bool prefill_logprobs = 6;
/// Return most likely n tokens
uint32 top_n_tokens = 7;
}
message Batch {
/// Batch ID
uint64 id = 1;
/// Individual requests
repeated Request requests = 2;
/// Batch size (==len(requests))
uint32 size = 3;
/// Maximum number of tokens this batch will grow to
uint32 max_tokens = 4;
}
message CachedBatch {
/// Batch ID
uint64 id = 1;
/// Individual requests ids
repeated uint64 request_ids = 2;
/// Batch size (==len(requests))
uint32 size = 3;
/// Maximum number of tokens this batch will grow to
uint32 max_tokens = 4;
}
enum FinishReason {
FINISH_REASON_LENGTH = 0;
FINISH_REASON_EOS_TOKEN = 1;
FINISH_REASON_STOP_SEQUENCE = 2;
}
message GeneratedText {
/// Output
string text = 1;
/// Number of generated tokens
uint32 generated_tokens = 2;
/// Finish reason
FinishReason finish_reason = 3;
/// Seed
optional uint64 seed = 4;
}
message Tokens {
/// Token IDs
repeated uint32 ids = 1;
/// Logprobs
repeated float logprobs = 2;
/// tokens
repeated string texts = 3;
/// special
repeated bool is_special = 4;
}
message Generation {
/// Request ID
uint64 request_id = 1;
/// Prefill tokens (optional)
Tokens prefill_tokens = 2;
Tokens tokens = 3;
/// Complete generated text
optional GeneratedText generated_text = 4;
/// Top tokens
repeated Tokens top_tokens = 5;
}
message FilterBatchRequest {
/// Batch ID
uint64 batch_id = 1;
/// Requests to keep
repeated uint64 request_ids = 2;
}
message FilterBatchResponse {
/// Filtered Batch (cached)
CachedBatch batch = 1;
}
message PrefillRequest {
/// Batch
Batch batch = 1;
}
message PrefillResponse {
/// Generation
repeated Generation generations = 1;
/// Next batch (cached)
optional CachedBatch batch = 2;
/// Forward elapsed time in nanoseconds
uint64 forward_ns = 3;
/// Decode elapsed time in nanoseconds
uint64 decode_ns = 4;
/// Total elapsed time in nanoseconds
uint64 total_ns = 5;
}
message DecodeRequest {
/// Cached batches
repeated CachedBatch batches = 1;
}
message DecodeResponse {
/// Decodes
repeated Generation generations = 1;
/// Next batch (cached)
optional CachedBatch batch = 2;
/// Forward elapsed time in nanoseconds
uint64 forward_ns = 3;
/// Decode elapsed time in nanoseconds
uint64 decode_ns = 4;
/// Total elapsed time in nanoseconds
uint64 total_ns = 5;
/// Concatenate elapsed time in nanoseconds
optional uint64 concat_ns = 6;
}
message WarmupRequest {
/// Batch to warmup on
Batch batch = 1;
uint32 max_input_length = 2;
uint32 max_prefill_tokens = 3;
uint32 max_total_tokens = 4;
}
/// Empty response
message WarmupResponse {
/// Maximum number of tokens supported by the model
optional uint32 max_supported_total_tokens = 1;
}
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/router/build.rs | use std::error::Error;
use vergen::EmitBuilder;
fn main() -> Result<(), Box<dyn Error>> {
// Try to get the git sha from the local git repository
if EmitBuilder::builder()
.fail_on_error()
.git_sha(false)
.emit()
.is_err()
{
// Unable to get the git sha
if let Ok(sha) = std::env::var("GIT_SHA") {
// Set it from an env var
println!("cargo:rustc-env=VERGEN_GIT_SHA={sha}");
}
}
// Set docker label if present
if let Ok(label) = std::env::var("DOCKER_LABEL") {
// Set it from an env var
println!("cargo:rustc-env=DOCKER_LABEL={label}");
}
Ok(())
}
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/router/README.md | # Router
Also named `webserver` throughout the docs.
This router is handling most of the logic to handle the "batches" tell
when to pass new `prefill` requests and pausing `decode` requests, which ones etc...
It uses gRPC to communicate with the shards which can therefore be kept
much simpler and focus on having the most efficient forward passes as possible.
## Continuous batching
One important feature of `text-generation-inference` is enabled
by this `router`.
Continuous batching is the act of regularly running queries in the same
`forward` step of the LLM (a "batch") and also removing them when they are
finished.
In order for continuous batching to be useful, you need to have more compute available
with respect to the memory requirements of your model. This is essentially true for
LLMs and the larger the model, the truer it gets (since you have to pool multiple
GPUs to load the model, you effectively have a lot of compute power at your hands).
Static batching is the act of doing several queries at the same time, but usually
this is controlled by the client, and therefore the amount of batching is decided
beforehand.
For text-generation, and LLMs which are memory bound we can try to be much more
efficient with the available compute, by having client sending us single queries,
and let the router mix&match queries into or out of batches to make the use the
compute the most efficiently. This is possible because for LLMs the total compute
for running the model is much bigger than doing mix&match of the batches themselves.
### Simple continuous batching
text-generation works by feeding a prompt to a model, and iteratively calling
`forward` on the model to produce new text, 1 token at a time.
The first idea is simple, when a query arrives, we start working on it directly.
When new queries arrive, we simply wait for the current `forward` to be finished
then batch the current running prompt with the new query, and call `forward`.
Whenever either query is finished: either the model produce EOS (end of sentence) token
or the query reached the allowed limit. We simply drop it from the batch, remove
all the allocated memory and we can continue with the rest until nothing is left.
This simple idea generalizes very well and we could potentially stack many requests
in the same batch.
One thing to note, is that queries can be potentially run with different parameters
meaning different way to choose the next token (sampling, not sampling, temperature, top_k etc..). This is not problematic for the proposed approach we just need to do the sampling
independantly on each member of the batch.
### Prefill, decode and past key values
In order to make LLMs and text-generation efficient, there's actually a very powerful
trick that can be used, which is the "caching" of some attention matrices. [More on that
in the first part of this blog](https://huggingface.co/blog/accelerated-inference#getting-to-the-first-10x-speedup)
What this means, is that the first "pass" of a prompt is different from the subsequent
"forward" passes. Since for the first one we have to compute the entire attention matrix, whereas in the follow-ups only require to compute the new token attention.
The first pass is called `prefill` throughout this codebase where as the follow-ups are called `decode`.
Since `prefill` is much more expensive than `decode` we don't want to do it all the time,
but a currently running query is probably doing `decode`. If we want to do the continuous
batching as explained previously we need to run `prefill` at some point in order to create
the attention matrix required to be able to join the `decode` group.
`text-generation-inference` uses a bunch of different strategies and parameters in
order to enable you to find the sweet spot between exploiting the hardware and perceived latency.
With no continuous batching at all, latency is going to be super good, but throughput (meaning
the total number of requests allowed in a given timeframe) is going to be super bad (since it's essentially 1).
With static batching, you can probably reach the maximum throughput (by using the maximum total batch size applicable to your hardware), but the latency is super bad since in order to have maximum throughput you need to wait for requests to come in before processing.
With continuous batching you can find a sweet spot. In general latency is the most critical
parameter users care about. But a 2x latency slowdown for 10x more users on the same
hardware is an acceptable tradeoff.
## Token streaming
This is a very important aspect of client UX. As mentionned above, latency is the
most critical perceived quality of an LLM API.
With token streaming, the server can start answering after the first `prefill` pass
directly, without waiting for all the generation to be done. For extremely long queries
this means clients can start to see something happening orders of magnitude before
the work is done. Seeing something in progress allows them to cut short if it's not
what's wanted but also it "feels" better.
| 0 |
hf_public_repos/text-generation-inference | hf_public_repos/text-generation-inference/router/Cargo.toml | [package]
name = "text-generation-router"
description = "Text Generation Webserver"
build = "build.rs"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[lib]
path = "src/lib.rs"
[[bin]]
name = "text-generation-router"
path = "src/main.rs"
[dependencies]
async-stream = "0.3.5"
axum = { version = "0.6.20", features = ["json"] }
axum-tracing-opentelemetry = "0.14.1"
text-generation-client = { path = "client" }
clap = { version = "4.4.5", features = ["derive", "env"] }
futures = "0.3.28"
hf-hub = { version = "0.3.0", features = ["tokio"] }
metrics = "0.21.1"
metrics-exporter-prometheus = { version = "0.12.1", features = [] }
nohash-hasher = "0.2.0"
opentelemetry = { version = "0.20.0", features = ["rt-tokio"] }
opentelemetry-otlp = "0.13.0"
rand = "0.8.5"
reqwest = { version = "0.11.20", features = [] }
serde = "1.0.188"
serde_json = "1.0.107"
thiserror = "1.0.48"
tokenizers = { version = "0.14.0", features = ["http"] }
tokio = { version = "1.32.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] }
tokio-stream = "0.1.14"
tower-http = { version = "0.4.4", features = ["cors"] }
tracing = "0.1.37"
tracing-opentelemetry = "0.21.0"
tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] }
utoipa = { version = "3.5.0", features = ["axum_extras"] }
utoipa-swagger-ui = { version = "3.1.5", features = ["axum"] }
ngrok = { version = "0.13.1", features = ["axum"], optional = true }
init-tracing-opentelemetry = { version = "0.14.1", features = ["opentelemetry-otlp"] }
minijinja = "1.0.10"
futures-util = "0.3.30"
[build-dependencies]
vergen = { version = "8.2.5", features = ["build", "git", "gitcl"] }
[features]
default = ["ngrok"]
ngrok = ["dep:ngrok"]
| 0 |
hf_public_repos/text-generation-inference/router | hf_public_repos/text-generation-inference/router/client/build.rs | use std::fs;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("cargo:rerun-if-changed=../../proto/generate.proto");
fs::create_dir("src/pb").unwrap_or(());
let mut config = prost_build::Config::new();
config.protoc_arg("--experimental_allow_proto3_optional");
tonic_build::configure()
.build_client(true)
.build_server(false)
.out_dir("src/pb")
.include_file("mod.rs")
.compile_with_config(config, &["../../proto/generate.proto"], &["../../proto"])
.unwrap_or_else(|e| panic!("protobuf compilation failed: {e}"));
Ok(())
}
| 0 |
hf_public_repos/text-generation-inference/router | hf_public_repos/text-generation-inference/router/client/Cargo.toml | [package]
name = "text-generation-client"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[dependencies]
futures = "^0.3"
grpc-metadata = { path = "../grpc-metadata" }
prost = "^0.12"
thiserror = "^1.0"
tokio = { version = "^1.32", features = ["sync"] }
tonic = "^0.10"
tower = "^0.4"
tracing = "^0.1"
[build-dependencies]
tonic-build = "0.10.1"
prost-build = "0.12.1"
| 0 |
hf_public_repos/text-generation-inference/router/client | hf_public_repos/text-generation-inference/router/client/src/lib.rs | //! Text Generation gRPC client library
mod client;
#[allow(clippy::derive_partial_eq_without_eq)]
mod pb;
mod sharded_client;
pub use client::Client;
pub use pb::generate::v2::HealthResponse;
pub use pb::generate::v2::InfoResponse as ShardInfo;
pub use pb::generate::v2::{
Batch, CachedBatch, FinishReason, GeneratedText, Generation, NextTokenChooserParameters,
Request, StoppingCriteriaParameters, Tokens,
};
pub use sharded_client::ShardedClient;
use thiserror::Error;
use tonic::transport;
use tonic::Status;
#[derive(Error, Debug, Clone)]
pub enum ClientError {
#[error("Could not connect to Text Generation server: {0}")]
Connection(String),
#[error("Server error: {0}")]
Generation(String),
#[error("Sharded results are empty")]
EmptyResults,
}
impl From<Status> for ClientError {
fn from(err: Status) -> Self {
let err = Self::Generation(err.message().to_string());
tracing::error!("{err}");
err
}
}
impl From<transport::Error> for ClientError {
fn from(err: transport::Error) -> Self {
let err = Self::Connection(err.to_string());
tracing::error!("{err}");
err
}
}
pub type Result<T> = std::result::Result<T, ClientError>;
| 0 |
hf_public_repos/text-generation-inference/router/client | hf_public_repos/text-generation-inference/router/client/src/sharded_client.rs | use crate::client::{DecodeTimings, PrefillTimings};
/// Multi shard Client
use crate::{Batch, CachedBatch, Client, Generation, HealthResponse, ShardInfo};
use crate::{ClientError, Result};
use futures::future::join_all;
use tonic::transport::Uri;
use tracing::instrument;
#[derive(Debug, Clone)]
/// Text Generation Inference gRPC multi client
pub struct ShardedClient {
clients: Vec<Client>,
}
impl ShardedClient {
fn new(clients: Vec<Client>) -> Self {
Self { clients }
}
/// Create a new ShardedClient from a master client. The master client will communicate with
/// the other shards and returns all uris/unix sockets with the `service_discovery` gRPC method.
async fn from_master_client(mut master_client: Client) -> Result<Self> {
// Get all uris/unix sockets from the master client
let uris = master_client.service_discovery().await?;
let futures = uris.into_iter().map(Client::connect_uds);
let clients: Result<Vec<Client>> = join_all(futures).await.into_iter().collect();
Ok(Self::new(clients?))
}
/// Returns a client connected to the given uri
pub async fn connect(uri: Uri) -> Result<Self> {
let master_client = Client::connect(uri).await?;
Self::from_master_client(master_client).await
}
/// Returns a client connected to the given unix socket
pub async fn connect_uds(path: String) -> Result<Self> {
let master_client = Client::connect_uds(path).await?;
Self::from_master_client(master_client).await
}
/// Get the model info
#[instrument(skip(self))]
pub async fn info(&mut self) -> Result<ShardInfo> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.info())
.collect();
join_all(futures).await.pop().unwrap()
}
/// GRPC health check
#[instrument(skip(self))]
pub async fn health(&mut self) -> Result<HealthResponse> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.health())
.collect();
join_all(futures).await.pop().unwrap()
}
/// Clear the past generations cache
#[instrument(skip(self))]
pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.clear_cache(batch_id))
.collect();
join_all(futures).await.into_iter().collect()
}
/// Filter a cached batch
#[instrument(skip(self))]
pub async fn filter_batch(
&mut self,
batch_id: u64,
request_ids: Vec<u64>,
) -> Result<Option<CachedBatch>> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.filter_batch(batch_id, request_ids.clone())))
.collect();
// all shards return the same message
join_all(futures).await.pop().unwrap()
}
/// Warmup on a max size batch
///
/// Returns the maximum amount of tokens supported by the hardware
#[instrument(skip(self))]
pub async fn warmup(
&mut self,
max_input_length: u32,
max_prefill_tokens: u32,
max_total_tokens: u32,
) -> Result<Option<u32>> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| {
Box::pin(client.warmup(max_input_length, max_prefill_tokens, max_total_tokens))
})
.collect();
// Take the minimum value
let results = join_all(futures)
.await
.into_iter()
.collect::<Result<Vec<Option<u32>>>>()?;
Ok(results.into_iter().flatten().min())
}
/// Generate one token for each request in the given batch
///
/// Returns Generation for each request in batch
/// and the next cached batch
#[instrument(skip_all, fields(id = & batch.id, size = & batch.size))]
pub async fn prefill(
&mut self,
batch: Batch,
) -> Result<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.prefill(batch.clone())))
.collect();
let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)>> =
join_all(futures).await.into_iter().collect();
let mut results = results?;
let (mut generations, next_batch, mut timings) =
results.pop().ok_or(ClientError::EmptyResults)?;
// Merge generations from different model shards
for (mut shard_generations, _, shard_timings) in results.into_iter() {
generations.append(&mut shard_generations);
// Return the timings of the slowest shard
if shard_timings.total > timings.total {
timings = shard_timings;
}
}
Ok((generations, next_batch, timings))
}
/// Generate one token for each request in the given cached batches
///
/// Returns Generation for each request in batches
/// and the next cached batch
#[instrument(skip_all, fields(size = batches.iter().map(| batch | {batch.size}).sum::< u32 > ()))]
pub async fn decode(
&mut self,
batches: Vec<CachedBatch>,
) -> Result<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.decode(batches.clone())))
.collect();
let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)>> =
join_all(futures).await.into_iter().collect();
let mut results = results?;
let (mut generations, next_batch, mut timings) =
results.pop().ok_or(ClientError::EmptyResults)?;
// Merge generations from different model shards
for (mut shard_generations, _, shard_timings) in results.into_iter() {
generations.append(&mut shard_generations);
// Return the timings of the slowest shard
if shard_timings.total > timings.total {
timings = shard_timings;
}
}
Ok((generations, next_batch, timings))
}
}
| 0 |
hf_public_repos/text-generation-inference/router/client | hf_public_repos/text-generation-inference/router/client/src/client.rs | /// Single shard Client
use crate::pb::generate::v2::text_generation_service_client::TextGenerationServiceClient;
use crate::pb::generate::v2::*;
use crate::Result;
use grpc_metadata::InjectTelemetryContext;
use std::cmp::min;
use std::time::Duration;
use tonic::transport::{Channel, Uri};
use tracing::instrument;
/// Text Generation Inference gRPC client
#[derive(Debug, Clone)]
pub struct Client {
stub: TextGenerationServiceClient<Channel>,
}
impl Client {
/// Returns a client connected to the given url
pub async fn connect(uri: Uri) -> Result<Self> {
let channel = Channel::builder(uri).connect().await?;
Ok(Self {
stub: TextGenerationServiceClient::new(channel),
})
}
/// Returns a client connected to the given unix socket
pub async fn connect_uds(path: String) -> Result<Self> {
let channel = Channel::from_shared("http://[::]:50051".to_string())
.unwrap()
.connect_with_connector(tower::service_fn(move |_: Uri| {
tokio::net::UnixStream::connect(path.clone())
}))
.await?;
Ok(Self {
stub: TextGenerationServiceClient::new(channel),
})
}
/// Returns a list of uris or unix sockets of all shards
#[instrument(skip(self))]
pub async fn service_discovery(&mut self) -> Result<Vec<String>> {
let request = tonic::Request::new(ServiceDiscoveryRequest {}).inject_context();
let response = self.stub.service_discovery(request).await?;
let urls = response
.into_inner()
.urls
.into_iter()
// Remove unix socket prefix
.map(|url| match url.strip_prefix("unix://") {
None => url,
Some(stripped_url) => stripped_url.to_string(),
})
.collect();
Ok(urls)
}
/// Get model info
#[instrument(skip(self))]
pub async fn info(&mut self) -> Result<InfoResponse> {
let request = tonic::Request::new(InfoRequest {}).inject_context();
let response = self.stub.info(request).await?.into_inner();
Ok(response)
}
/// Get model health
#[instrument(skip(self))]
pub async fn health(&mut self) -> Result<HealthResponse> {
let request = tonic::Request::new(HealthRequest {}).inject_context();
let response = self.stub.health(request).await?.into_inner();
Ok(response)
}
/// Clear the past generations cache
#[instrument(skip(self))]
pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> {
let request = tonic::Request::new(ClearCacheRequest { id: batch_id }).inject_context();
self.stub.clear_cache(request).await?;
Ok(())
}
/// Filter a cached batch
#[instrument(skip(self))]
pub async fn filter_batch(
&mut self,
batch_id: u64,
request_ids: Vec<u64>,
) -> Result<Option<CachedBatch>> {
let request = tonic::Request::new(FilterBatchRequest {
batch_id,
request_ids,
})
.inject_context();
let filtered_batch = self.stub.filter_batch(request).await?.into_inner();
Ok(filtered_batch.batch)
}
/// Warmup on a max size batch
///
/// Returns the maximum amount of tokens supported by the hardware
#[instrument(skip_all)]
pub async fn warmup(
&mut self,
max_input_length: u32,
max_prefill_tokens: u32,
max_total_tokens: u32,
) -> Result<Option<u32>> {
let mut n_tokens = 0;
let mut requests = Vec::new();
// Create requests
while n_tokens < max_prefill_tokens {
let truncate = min(max_input_length, max_prefill_tokens - n_tokens);
requests.push(Request {
id: 0,
// We truncate the input on the server side to be sure that it has the correct size
inputs: "_test ".to_string().repeat(max_input_length as usize),
truncate,
// Set sampling parameters to also take these ops into account in the max memory
parameters: Some(NextTokenChooserParameters {
temperature: 0.9,
top_k: 10,
top_p: 0.9,
typical_p: 0.9,
do_sample: false,
seed: 0,
repetition_penalty: 1.2,
watermark: true,
}),
stopping_parameters: Some(StoppingCriteriaParameters {
max_new_tokens: max_total_tokens - truncate,
stop_sequences: vec![],
ignore_eos_token: true,
}),
prefill_logprobs: true,
top_n_tokens: 20,
});
n_tokens += max_input_length;
}
let batch = Batch {
id: 0,
size: requests.len() as u32,
requests,
max_tokens: 0,
};
let request = tonic::Request::new(WarmupRequest {
batch: Some(batch),
max_input_length,
max_prefill_tokens,
max_total_tokens,
})
.inject_context();
let response = self.stub.warmup(request).await?.into_inner();
Ok(response.max_supported_total_tokens)
}
/// Generate one token for each request in the given batch
///
/// Returns Generation for each request in batch
/// and the next cached batch
#[instrument(skip_all, fields(id = &batch.id, size = &batch.size))]
pub async fn prefill(
&mut self,
batch: Batch,
) -> Result<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)> {
let request = tonic::Request::new(PrefillRequest { batch: Some(batch) }).inject_context();
let response = self.stub.prefill(request).await?.into_inner();
Ok((
response.generations,
response.batch,
PrefillTimings::new(response.forward_ns, response.decode_ns, response.total_ns),
))
}
/// Generate one token for each request in the given cached batches
///
/// Returns Generation for each request in batches
/// and the next cached batch
#[instrument(skip_all, fields(size = batches.iter().map(|batch|{batch.size}).sum::<u32>()))]
pub async fn decode(
&mut self,
batches: Vec<CachedBatch>,
) -> Result<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)> {
let request = tonic::Request::new(DecodeRequest { batches }).inject_context();
let response = self.stub.decode(request).await?.into_inner();
Ok((
response.generations,
response.batch,
DecodeTimings::new(
response.concat_ns,
response.forward_ns,
response.decode_ns,
response.total_ns,
),
))
}
}
pub struct PrefillTimings {
pub forward: Duration,
pub decode: Duration,
pub total: Duration,
}
impl PrefillTimings {
fn new(forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self {
Self {
forward: Duration::from_nanos(forward_ns),
decode: Duration::from_nanos(decode_ns),
total: Duration::from_nanos(total_ns),
}
}
}
pub struct DecodeTimings {
pub concat: Option<Duration>,
pub forward: Duration,
pub decode: Duration,
pub total: Duration,
}
impl DecodeTimings {
fn new(concat_ns: Option<u64>, forward_ns: u64, decode_ns: u64, total_ns: u64) -> Self {
Self {
concat: concat_ns.map(|v| Duration::from_nanos(v)),
forward: Duration::from_nanos(forward_ns),
decode: Duration::from_nanos(decode_ns),
total: Duration::from_nanos(total_ns),
}
}
}
| 0 |
hf_public_repos/text-generation-inference/router | hf_public_repos/text-generation-inference/router/grpc-metadata/Cargo.toml | [package]
name = "grpc-metadata"
version = "0.1.0"
edition = "2021"
[dependencies]
opentelemetry = "^0.20"
tonic = "^0.10"
tracing = "^0.1"
tracing-opentelemetry = "^0.21"
| 0 |
hf_public_repos/text-generation-inference/router/grpc-metadata | hf_public_repos/text-generation-inference/router/grpc-metadata/src/lib.rs | //! A crate to extract and inject a OpenTelemetry context from and to a gRPC request.
//! Inspired by: https://github.com/open-telemetry/opentelemetry-rust gRPC examples
use opentelemetry::global;
use opentelemetry::propagation::{Extractor, Injector};
use tracing_opentelemetry::OpenTelemetrySpanExt;
/// Extract context metadata from a gRPC request's metadata
struct MetadataExtractor<'a>(pub &'a tonic::metadata::MetadataMap);
impl<'a> Extractor for MetadataExtractor<'a> {
/// Get a value for a key from the MetadataMap. If the value can't be converted to &str, returns None
fn get(&self, key: &str) -> Option<&str> {
self.0.get(key).and_then(|metadata| metadata.to_str().ok())
}
/// Collect all the keys from the MetadataMap.
fn keys(&self) -> Vec<&str> {
self.0
.keys()
.map(|key| match key {
tonic::metadata::KeyRef::Ascii(v) => v.as_str(),
tonic::metadata::KeyRef::Binary(v) => v.as_str(),
})
.collect::<Vec<_>>()
}
}
/// Inject context in the metadata of a gRPC request.
struct MetadataInjector<'a>(pub &'a mut tonic::metadata::MetadataMap);
impl<'a> Injector for MetadataInjector<'a> {
/// Set a key and value in the MetadataMap. Does nothing if the key or value are not valid inputs
fn set(&mut self, key: &str, value: String) {
if let Ok(key) = tonic::metadata::MetadataKey::from_bytes(key.as_bytes()) {
if let Ok(val) = value.parse() {
self.0.insert(key, val);
}
}
}
}
/// Get a context from the global context and inject the span into a gRPC request's metadata.
fn inject(metadata: &mut tonic::metadata::MetadataMap) {
global::get_text_map_propagator(|propagator| {
propagator.inject_context(
&tracing::Span::current().context(),
&mut MetadataInjector(metadata),
)
})
}
pub trait InjectTelemetryContext {
fn inject_context(self) -> Self;
}
impl<T> InjectTelemetryContext for tonic::Request<T> {
fn inject_context(mut self) -> Self {
inject(self.metadata_mut());
self
}
}
| 0 |
hf_public_repos/text-generation-inference/router | hf_public_repos/text-generation-inference/router/src/validation.rs | /// Payload validation logic
use crate::validation::ValidationError::{BestOfSampling, BestOfSeed, EmptyInput};
use crate::{GenerateParameters, GenerateRequest};
use rand::{thread_rng, Rng};
use text_generation_client::{NextTokenChooserParameters, StoppingCriteriaParameters};
use thiserror::Error;
use tokenizers::tokenizer::Tokenizer;
use tokenizers::TruncationDirection;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tracing::{instrument, Span};
/// Validation
#[derive(Debug, Clone)]
pub struct Validation {
/// Validation parameters
max_best_of: usize,
max_stop_sequences: usize,
max_top_n_tokens: u32,
max_input_length: usize,
max_total_tokens: usize,
/// Channel to communicate with the background tokenization task
sender: Option<mpsc::UnboundedSender<TokenizerRequest>>,
}
impl Validation {
pub(crate) fn new(
workers: usize,
tokenizer: Option<Tokenizer>,
max_best_of: usize,
max_stop_sequences: usize,
max_top_n_tokens: u32,
max_input_length: usize,
max_total_tokens: usize,
) -> Self {
// If we have a fast tokenizer
let sender = if let Some(tokenizer) = tokenizer {
// Create round robin channel
let (validation_sender, validation_round_robin_receiver) = mpsc::unbounded_channel();
let mut senders = Vec::with_capacity(workers);
// Create workers
for _ in 0..workers {
let tokenizer_clone = tokenizer.clone();
let (tokenizer_sender, tokenizer_receiver) = mpsc::unbounded_channel();
senders.push(tokenizer_sender);
// Spawn worker
tokio::task::spawn_blocking(move || {
tokenizer_worker(tokenizer_clone, tokenizer_receiver)
});
}
// Create tokenization round robin task
tokio::spawn(round_robin_task(validation_round_robin_receiver, senders));
Some(validation_sender)
} else {
None
};
Self {
max_best_of,
sender,
max_stop_sequences,
max_top_n_tokens,
max_input_length,
max_total_tokens,
}
}
#[instrument(skip(self, inputs))]
async fn validate_input(
&self,
inputs: String,
truncate: Option<usize>,
max_new_tokens: Option<u32>,
) -> Result<(String, usize, u32), ValidationError> {
// If we have a fast tokenizer
if let Some(sender) = &self.sender {
// Create response channel
let (response_sender, response_receiver) = oneshot::channel();
// Send request to the background validation task
// Unwrap is safe here
sender
.send(((inputs, truncate), response_sender, Span::current()))
.unwrap();
// Await on response channel
// Unwrap is safe here
let (inputs, input_length) = response_receiver.await.unwrap()?;
// Get total tokens
let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
max_new_tokens
} else {
self.max_total_tokens.saturating_sub(input_length) as u32
};
let total_tokens = input_length + max_new_tokens as usize;
// Validate MaxTotalTokens
if total_tokens > self.max_total_tokens {
return Err(ValidationError::MaxTotalTokens(
self.max_total_tokens,
input_length,
max_new_tokens,
));
}
// Validate InputLength
if input_length > self.max_input_length {
return Err(ValidationError::InputLength(
self.max_input_length,
input_length,
));
}
metrics::histogram!("tgi_request_input_length", input_length as f64);
Ok((inputs, input_length, max_new_tokens))
}
// Return inputs without validation
else {
// In this case, we don't know the real length in tokens of the inputs
// However, the inputs will be truncated by the python servers
// We make sure that truncate + max_new_tokens <= self.max_total_tokens
let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
max_new_tokens
} else if let Some(truncate) = truncate {
self.max_total_tokens.saturating_sub(truncate) as u32
} else {
return Err(ValidationError::UnsetMaxNewTokens);
};
let input_length = truncate.unwrap_or(self.max_input_length);
// Validate MaxNewTokens
if (input_length as u32 + max_new_tokens) > self.max_total_tokens as u32 {
return Err(ValidationError::MaxNewTokens(
self.max_total_tokens - self.max_input_length,
max_new_tokens,
));
}
Ok((inputs, input_length, max_new_tokens))
}
}
/// Validate a payload and get the number of tokens in the input
#[instrument(skip_all)]
pub(crate) async fn validate(
&self,
request: GenerateRequest,
) -> Result<ValidGenerateRequest, ValidationError> {
let GenerateParameters {
best_of,
temperature,
repetition_penalty,
top_k,
top_p,
typical_p,
do_sample,
max_new_tokens,
stop: stop_sequences,
truncate,
seed,
watermark,
decoder_input_details,
top_n_tokens,
..
} = request.parameters;
// sampling must be true when best_of > 1
let best_of = best_of.unwrap_or(1);
let sampling = do_sample
|| temperature.is_some()
|| top_k.is_some()
|| top_p.is_some()
|| typical_p.is_some();
if best_of > 1 && !sampling {
return Err(BestOfSampling);
}
let temperature = temperature.unwrap_or(1.0);
if temperature <= 0.0 {
return Err(ValidationError::Temperature);
}
let repetition_penalty = repetition_penalty.unwrap_or(1.0);
if repetition_penalty <= 0.0 {
return Err(ValidationError::RepetitionPenalty);
}
// Different because the proto default value is not a valid value
// for the user
let top_p = top_p
.map(|value| {
if value <= 0.0 || value >= 1.0 {
return Err(ValidationError::TopP);
}
Ok(value)
})
.unwrap_or(Ok(1.0))?;
let typical_p = typical_p
.map(|value| {
if value <= 0.0 || value >= 1.0 {
return Err(ValidationError::TypicalP);
}
Ok(value)
})
.unwrap_or(Ok(1.0))?;
let top_k: u32 = top_k
.map(|value| {
if value <= 0 {
return Err(ValidationError::TopK);
}
Ok(value as u32)
})
.unwrap_or(Ok(0))?;
if max_new_tokens == Some(0) {
return Err(ValidationError::NegativeMaxNewTokens);
}
if stop_sequences.len() > self.max_stop_sequences {
return Err(ValidationError::StopSequence(
self.max_stop_sequences,
stop_sequences.len(),
));
}
// If seed is None, assign a random one
let seed = match seed {
None => thread_rng().gen(),
Some(seed) => {
if best_of > 1 {
return Err(BestOfSeed);
}
seed
}
};
let top_n_tokens = top_n_tokens
.map(|value| {
if value > self.max_top_n_tokens {
return Err(ValidationError::TopNTokens(self.max_top_n_tokens, value));
}
Ok(value)
})
.unwrap_or(Ok(0))?;
// Check if inputs is empty
if request.inputs.is_empty() {
return Err(EmptyInput);
}
// Check if truncate is strictly positive and less than max_input_length
let truncate = truncate
.map(|value| {
if value == 0 || value > self.max_input_length {
return Err(ValidationError::Truncate(self.max_input_length, value));
}
Ok(Some(value))
})
.unwrap_or(Ok(None))?;
// Validate inputs
let (inputs, input_length, max_new_tokens) = self
.validate_input(request.inputs, truncate, max_new_tokens)
.await?;
let parameters = NextTokenChooserParameters {
temperature,
repetition_penalty,
top_k,
top_p,
typical_p,
do_sample,
seed,
watermark,
};
let stopping_parameters = StoppingCriteriaParameters {
max_new_tokens,
stop_sequences,
ignore_eos_token: false,
};
metrics::histogram!("tgi_request_max_new_tokens", max_new_tokens as f64);
Ok(ValidGenerateRequest {
inputs,
decoder_input_details,
input_length: input_length as u32,
truncate: truncate.unwrap_or(self.max_input_length) as u32,
parameters,
stopping_parameters,
top_n_tokens,
})
}
/// Validate the best_of parameter
#[instrument(skip_all)]
pub(crate) fn validate_best_of(&self, best_of: usize) -> Result<usize, ValidationError> {
if self.max_best_of == 1 && best_of != 1 {
return Err(ValidationError::BestOfDisabled);
}
if best_of > self.max_best_of {
return Err(ValidationError::BestOf(self.max_best_of, best_of));
}
Ok(best_of)
}
}
/// Round robin tokenization task
async fn round_robin_task(
mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>,
senders: Vec<mpsc::UnboundedSender<TokenizerRequest>>,
) {
loop {
for sender in &senders {
match receiver.recv().await {
None => return,
Some(request) => sender.send(request).unwrap(),
};
}
}
}
/// Start tokenization workers
fn tokenizer_worker(tokenizer: Tokenizer, mut receiver: mpsc::UnboundedReceiver<TokenizerRequest>) {
// Loop over requests
while let Some(((inputs, truncate), response_tx, parent_span)) = receiver.blocking_recv() {
parent_span.in_scope(|| {
response_tx
.send(prepare_input(inputs, truncate, &tokenizer))
.unwrap_or(())
})
}
}
/// Get input length and optionally truncate it
fn prepare_input(
inputs: String,
truncate: Option<usize>,
tokenizer: &Tokenizer,
) -> Result<(String, usize), ValidationError> {
// Get the number of tokens in the input
let mut encoding = tokenizer
.encode(inputs.clone(), true)
.map_err(|err| ValidationError::Tokenizer(err.to_string()))?;
// Optionally truncate
let (inputs, input_length) = match truncate {
// Truncate is some and < encoding length
Some(truncate) if truncate < encoding.len() => {
// truncate encoding and decode new inputs
encoding.truncate(truncate, 0, TruncationDirection::Left);
let inputs = tokenizer
.decode(encoding.get_ids(), false)
.map_err(|err| ValidationError::Tokenizer(err.to_string()))?;
(inputs, encoding.len())
}
// Nothing to do
_ => (inputs, encoding.len()),
};
Ok((inputs, input_length))
}
type TokenizerRequest = (
(String, Option<usize>),
oneshot::Sender<Result<(String, usize), ValidationError>>,
Span,
);
#[derive(Debug, Clone)]
pub(crate) struct ValidGenerateRequest {
pub inputs: String,
pub input_length: u32,
pub truncate: u32,
pub decoder_input_details: bool,
pub parameters: NextTokenChooserParameters,
pub stopping_parameters: StoppingCriteriaParameters,
pub top_n_tokens: u32,
}
#[derive(Error, Debug)]
pub enum ValidationError {
#[error("`best_of` must be > 0 and <= {0}. Given: {1}")]
BestOf(usize, usize),
#[error("`best_of` != 1 is not allowed for this endpoint")]
BestOfDisabled,
#[error("you must use sampling when `best_of` is > 1")]
BestOfSampling,
#[error("`seed` must not be set when `best_of` > 1")]
BestOfSeed,
#[error("`best_of` != 1 is not supported when streaming tokens")]
BestOfStream,
#[error("`top_n_tokens` must be >= 0 and <= {0}. Given: {1}")]
TopNTokens(u32, u32),
#[error("`top_n_tokens` != 0 is not allowed for this endpoint")]
TopNTokensDisabled,
#[error("`decoder_input_details` == true is not supported when streaming tokens")]
PrefillDetailsStream,
#[error("`temperature` must be strictly positive")]
Temperature,
#[error("`repetition_penalty` must be strictly positive")]
RepetitionPenalty,
#[error("`top_p` must be > 0.0 and < 1.0")]
TopP,
#[error("`top_k` must be strictly positive")]
TopK,
#[error("`truncate` must be strictly positive and less than {0}. Given: {1}")]
Truncate(usize, usize),
#[error("`typical_p` must be > 0.0 and < 1.0")]
TypicalP,
#[error("one of `max_new_tokens` or `truncate` must be set if a fast tokenizer is not in use")]
UnsetMaxNewTokens,
#[error("`max_new_tokens` must be strictly positive")]
NegativeMaxNewTokens,
#[error("`max_new_tokens` must be <= {0}. Given: {1}")]
MaxNewTokens(usize, u32),
#[error("`inputs` tokens + `max_new_tokens` must be <= {0}. Given: {1} `inputs` tokens and {2} `max_new_tokens`")]
MaxTotalTokens(usize, usize, u32),
#[error("`inputs` must have less than {0} tokens. Given: {1}")]
InputLength(usize, usize),
#[error("`inputs` cannot be empty")]
EmptyInput,
#[error("`stop` supports up to {0} stop sequences. Given: {1}")]
StopSequence(usize, usize),
#[error("tokenizer error {0}")]
Tokenizer(String),
}
#[cfg(test)]
mod tests {
use super::*;
use crate::default_parameters;
use crate::tests::get_tokenizer;
#[tokio::test]
async fn test_validation_max_new_tokens() {
let tokenizer = None;
let max_best_of = 2;
let max_stop_sequence = 3;
let max_top_n_tokens = 4;
let max_input_length = 5;
let max_total_tokens = 6;
let workers = 1;
let validation = Validation::new(
workers,
tokenizer,
max_best_of,
max_stop_sequence,
max_top_n_tokens,
max_input_length,
max_total_tokens,
);
let max_new_tokens = 10;
match validation
.validate_input("Hello".to_string(), None, Some(max_new_tokens))
.await
{
Err(ValidationError::MaxNewTokens(1, 10)) => (),
_ => panic!("Unexpected not max new tokens"),
}
}
#[tokio::test]
async fn test_validation_input_length() {
let tokenizer = Some(get_tokenizer().await);
let max_best_of = 2;
let max_stop_sequence = 3;
let max_top_n_tokens = 4;
let max_input_length = 5;
let max_total_tokens = 6;
let workers = 1;
let validation = Validation::new(
workers,
tokenizer,
max_best_of,
max_stop_sequence,
max_top_n_tokens,
max_input_length,
max_total_tokens,
);
let max_new_tokens = 10;
match validation
.validate_input("Hello".to_string(), None, Some(max_new_tokens))
.await
{
Err(ValidationError::MaxTotalTokens(6, 1, 10)) => (),
_ => panic!("Unexpected not max new tokens"),
}
}
#[tokio::test]
async fn test_validation_best_of_sampling() {
let tokenizer = Some(get_tokenizer().await);
let max_best_of = 2;
let max_stop_sequence = 3;
let max_top_n_tokens = 4;
let max_input_length = 5;
let max_total_tokens = 6;
let workers = 1;
let validation = Validation::new(
workers,
tokenizer,
max_best_of,
max_stop_sequence,
max_top_n_tokens,
max_input_length,
max_total_tokens,
);
match validation
.validate(GenerateRequest {
inputs: "Hello".to_string(),
parameters: GenerateParameters {
best_of: Some(2),
do_sample: false,
..default_parameters()
},
})
.await
{
Err(ValidationError::BestOfSampling) => (),
_ => panic!("Unexpected not best of sampling"),
}
}
#[tokio::test]
async fn test_validation_top_p() {
let tokenizer = Some(get_tokenizer().await);
let max_best_of = 2;
let max_stop_sequence = 3;
let max_top_n_tokens = 4;
let max_input_length = 5;
let max_total_tokens = 106;
let workers = 1;
let validation = Validation::new(
workers,
tokenizer,
max_best_of,
max_stop_sequence,
max_top_n_tokens,
max_input_length,
max_total_tokens,
);
match validation
.validate(GenerateRequest {
inputs: "Hello".to_string(),
parameters: GenerateParameters {
top_p: Some(1.0),
max_new_tokens: Some(5),
..default_parameters()
},
})
.await
{
Err(ValidationError::TopP) => (),
_ => panic!("Unexpected top_p"),
}
match validation
.validate(GenerateRequest {
inputs: "Hello".to_string(),
parameters: GenerateParameters {
top_p: Some(0.99),
max_new_tokens: Some(5),
..default_parameters()
},
})
.await
{
Ok(_) => (),
_ => panic!("Unexpected top_p error"),
}
let valid_request = validation
.validate(GenerateRequest {
inputs: "Hello".to_string(),
parameters: GenerateParameters {
top_p: None,
max_new_tokens: Some(5),
..default_parameters()
},
})
.await
.unwrap();
// top_p == 1.0 is invalid for users to ask for but it's the default resolved value.
assert_eq!(valid_request.parameters.top_p, 1.0);
}
#[tokio::test]
async fn test_validation_top_n_tokens() {
let tokenizer = Some(get_tokenizer().await);
let max_best_of = 2;
let max_stop_sequences = 3;
let max_top_n_tokens = 4;
let max_input_length = 5;
let max_total_tokens = 106;
let workers = 1;
let validation = Validation::new(
workers,
tokenizer,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_length,
max_total_tokens,
);
match validation
.validate(GenerateRequest {
inputs: "Hello".to_string(),
parameters: GenerateParameters {
top_n_tokens: Some(5),
max_new_tokens: Some(5),
..default_parameters()
},
})
.await
{
Err(ValidationError::TopNTokens(4, 5)) => (),
_ => panic!("Unexpected top_n_tokens"),
}
validation
.validate(GenerateRequest {
inputs: "Hello".to_string(),
parameters: GenerateParameters {
top_n_tokens: Some(4),
max_new_tokens: Some(5),
..default_parameters()
},
})
.await
.unwrap();
validation
.validate(GenerateRequest {
inputs: "Hello".to_string(),
parameters: GenerateParameters {
top_n_tokens: Some(0),
max_new_tokens: Some(5),
..default_parameters()
},
})
.await
.unwrap();
let valid_request = validation
.validate(GenerateRequest {
inputs: "Hello".to_string(),
parameters: GenerateParameters {
top_n_tokens: None,
max_new_tokens: Some(5),
..default_parameters()
},
})
.await
.unwrap();
assert_eq!(valid_request.top_n_tokens, 0);
}
}
| 0 |
hf_public_repos/text-generation-inference/router | hf_public_repos/text-generation-inference/router/src/main.rs | use axum::http::HeaderValue;
use clap::Parser;
use hf_hub::api::tokio::{Api, ApiBuilder, ApiRepo};
use hf_hub::{Repo, RepoType};
use opentelemetry::sdk::propagation::TraceContextPropagator;
use opentelemetry::sdk::trace;
use opentelemetry::sdk::trace::Sampler;
use opentelemetry::sdk::Resource;
use opentelemetry::{global, KeyValue};
use opentelemetry_otlp::WithExportConfig;
use std::fs::File;
use std::io::BufReader;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::path::Path;
use text_generation_client::{ClientError, ShardedClient};
use text_generation_router::{server, HubModelInfo, HubTokenizerConfig};
use thiserror::Error;
use tokenizers::Tokenizer;
use tower_http::cors::AllowOrigin;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::{EnvFilter, Layer};
/// App Configuration
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
#[clap(default_value = "128", long, env)]
max_concurrent_requests: usize,
#[clap(default_value = "2", long, env)]
max_best_of: usize,
#[clap(default_value = "4", long, env)]
max_stop_sequences: usize,
#[clap(default_value = "5", long, env)]
max_top_n_tokens: u32,
#[clap(default_value = "1024", long, env)]
max_input_length: usize,
#[clap(default_value = "2048", long, env)]
max_total_tokens: usize,
#[clap(default_value = "1.2", long, env)]
waiting_served_ratio: f32,
#[clap(default_value = "4096", long, env)]
max_batch_prefill_tokens: u32,
#[clap(long, env)]
max_batch_total_tokens: Option<u32>,
#[clap(default_value = "20", long, env)]
max_waiting_tokens: usize,
#[clap(default_value = "0.0.0.0", long, env)]
hostname: String,
#[clap(default_value = "3000", long, short, env)]
port: u16,
#[clap(default_value = "/tmp/text-generation-server-0", long, env)]
master_shard_uds_path: String,
#[clap(default_value = "bigscience/bloom", long, env)]
tokenizer_name: String,
#[clap(long, env)]
tokenizer_config_path: Option<String>,
#[clap(long, env)]
revision: Option<String>,
#[clap(default_value = "2", long, env)]
validation_workers: usize,
#[clap(long, env)]
json_output: bool,
#[clap(long, env)]
otlp_endpoint: Option<String>,
#[clap(long, env)]
cors_allow_origin: Option<Vec<String>>,
#[clap(long, env)]
ngrok: bool,
#[clap(long, env)]
ngrok_authtoken: Option<String>,
#[clap(long, env)]
ngrok_edge: Option<String>,
}
#[tokio::main]
async fn main() -> Result<(), RouterError> {
// Get args
let args = Args::parse();
// Pattern match configuration
let Args {
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_length,
max_total_tokens,
waiting_served_ratio,
max_batch_prefill_tokens,
max_batch_total_tokens,
max_waiting_tokens,
hostname,
port,
master_shard_uds_path,
tokenizer_name,
tokenizer_config_path,
revision,
validation_workers,
json_output,
otlp_endpoint,
cors_allow_origin,
ngrok,
ngrok_authtoken,
ngrok_edge,
} = args;
// Launch Tokio runtime
init_logging(otlp_endpoint, json_output);
// Validate args
if max_input_length >= max_total_tokens {
return Err(RouterError::ArgumentValidation(
"`max_input_length` must be < `max_total_tokens`".to_string(),
));
}
if max_input_length as u32 > max_batch_prefill_tokens {
return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be >= `max_input_length`. Given: {max_batch_prefill_tokens} and {max_input_length}")));
}
if validation_workers == 0 {
return Err(RouterError::ArgumentValidation(
"`validation_workers` must be > 0".to_string(),
));
}
if let Some(ref max_batch_total_tokens) = max_batch_total_tokens {
if max_batch_prefill_tokens > *max_batch_total_tokens {
return Err(RouterError::ArgumentValidation(format!("`max_batch_prefill_tokens` must be <= `max_batch_total_tokens`. Given: {max_batch_prefill_tokens} and {max_batch_total_tokens}")));
}
if max_total_tokens as u32 > *max_batch_total_tokens {
return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_batch_total_tokens}")));
}
}
// CORS allowed origins
// map to go inside the option and then map to parse from String to HeaderValue
// Finally, convert to AllowOrigin
let cors_allow_origin: Option<AllowOrigin> = cors_allow_origin.map(|cors_allow_origin| {
AllowOrigin::list(
cors_allow_origin
.iter()
.map(|origin| origin.parse::<HeaderValue>().unwrap()),
)
});
// Parse Huggingface hub token
let authorization_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok();
// Tokenizer instance
// This will only be used to validate payloads
let local_path = Path::new(&tokenizer_name);
let local_model = local_path.exists() && local_path.is_dir();
// Load tokenizer config
// This will be used to format the chat template
let local_tokenizer_config_path =
tokenizer_config_path.unwrap_or("tokenizer_config.json".to_string());
let local_tokenizer_config = Path::new(&local_tokenizer_config_path).exists();
// Shared API builder initialization
let api_builder = || {
let mut builder = ApiBuilder::new()
.with_progress(false)
.with_token(authorization_token);
if let Ok(cache_dir) = std::env::var("HUGGINGFACE_HUB_CACHE") {
builder = builder.with_cache_dir(cache_dir.into());
}
builder
};
// Decide if we need to use the API based on the revision and local path
let use_api = revision.is_some() || !local_path.exists() || !local_path.is_dir();
// Initialize API if needed
let api = if use_api {
tracing::info!("Using the Hugging Face API");
match api_builder().build() {
Ok(api) => Some(api),
Err(_) => {
tracing::warn!("Unable to build the Hugging Face API");
None
}
}
} else {
None
};
// Load tokenizer and model info
let (tokenizer, model_info) = if local_model {
let tokenizer = Tokenizer::from_file(local_path.join("tokenizer.json")).ok();
let model_info = HubModelInfo {
model_id: tokenizer_name.to_string(),
sha: None,
pipeline_tag: None,
};
(tokenizer, model_info)
} else if let Some(api) = api.clone() {
let api_repo = api.repo(Repo::with_revision(
tokenizer_name.to_string(),
RepoType::Model,
revision.clone().unwrap_or_else(|| "main".to_string()),
));
let tokenizer = match api_repo.get("tokenizer.json").await {
Ok(tokenizer_filename) => Tokenizer::from_file(tokenizer_filename).ok(),
Err(_) => get_base_tokenizer(&api, &api_repo).await,
};
let model_info = get_model_info(&api_repo).await.unwrap_or_else(|| {
tracing::warn!("Could not retrieve model info from the Hugging Face hub.");
HubModelInfo {
model_id: tokenizer_name.to_string(),
sha: None,
pipeline_tag: None,
}
});
(tokenizer, model_info)
} else {
// No API and no local model
return Err(RouterError::ArgumentValidation(
"No local model found and no revision specified".to_string(),
));
};
// Load tokenizer config if found locally, or check if we can get it from the API if needed
let tokenizer_config = if local_tokenizer_config {
tracing::info!("Using local tokenizer config");
HubTokenizerConfig::from_file(&local_tokenizer_config_path)
} else if let Some(api) = api {
tracing::info!("Using the Hugging Face API to retrieve tokenizer config");
get_tokenizer_config(&api.repo(Repo::with_revision(
tokenizer_name.to_string(),
RepoType::Model,
revision.unwrap_or_else(|| "main".to_string()),
)))
.await
.unwrap_or_else(|| {
tracing::warn!("Could not retrieve tokenizer config from the Hugging Face hub.");
HubTokenizerConfig::default()
})
} else {
tracing::warn!("Could not find tokenizer config locally and no revision specified");
HubTokenizerConfig::default()
};
if tokenizer.is_none() {
tracing::warn!("Could not find a fast tokenizer implementation for {tokenizer_name}");
tracing::warn!("Rust input length validation and truncation is disabled");
}
// if pipeline-tag == text-generation we default to return_full_text = true
let compat_return_full_text = match &model_info.pipeline_tag {
None => {
tracing::warn!("no pipeline tag found for model {tokenizer_name}");
false
}
Some(pipeline_tag) => pipeline_tag.as_str() == "text-generation",
};
// Instantiate sharded client from the master unix socket
let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path)
.await
.map_err(RouterError::Connection)?;
// Clear the cache; useful if the webserver rebooted
sharded_client
.clear_cache(None)
.await
.map_err(RouterError::Cache)?;
// Get info from the shard
let shard_info = sharded_client.info().await.map_err(RouterError::Info)?;
// Warmup model
tracing::info!("Warming up model");
let max_supported_batch_total_tokens = match sharded_client
.warmup(
max_input_length as u32,
max_batch_prefill_tokens,
max_total_tokens as u32,
)
.await
.map_err(RouterError::Warmup)?
{
// Older models do not support automatic max-batch-total-tokens
None => {
let max_batch_total_tokens = max_batch_total_tokens
.unwrap_or(16000.max((max_total_tokens as u32).max(max_batch_prefill_tokens)));
tracing::warn!("Model does not support automatic max batch total tokens");
max_batch_total_tokens
}
// Flash attention models return their max supported total tokens
Some(max_supported_batch_total_tokens) => {
// Warn if user added his own max-batch-total-tokens as we will ignore it
if max_batch_total_tokens.is_some() {
tracing::warn!(
"`--max-batch-total-tokens` is deprecated for Flash \
Attention models."
);
tracing::warn!(
"Inferred max batch total tokens: {max_supported_batch_total_tokens}"
);
}
if max_total_tokens as u32 > max_supported_batch_total_tokens {
return Err(RouterError::ArgumentValidation(format!("`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {max_total_tokens} and {max_supported_batch_total_tokens}")));
}
max_supported_batch_total_tokens
}
};
tracing::info!("Setting max batch total tokens to {max_supported_batch_total_tokens}");
tracing::info!("Connected");
let addr = match hostname.parse() {
Ok(ip) => SocketAddr::new(ip, port),
Err(_) => {
tracing::warn!("Invalid hostname, defaulting to 0.0.0.0");
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port)
}
};
// Run server
server::run(
model_info,
shard_info,
compat_return_full_text,
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_length,
max_total_tokens,
waiting_served_ratio,
max_batch_prefill_tokens,
max_supported_batch_total_tokens,
max_waiting_tokens,
sharded_client,
tokenizer,
validation_workers,
addr,
cors_allow_origin,
ngrok,
ngrok_authtoken,
ngrok_edge,
tokenizer_config,
)
.await?;
Ok(())
}
/// Init logging using env variables LOG_LEVEL and LOG_FORMAT:
/// - otlp_endpoint is an optional URL to an Open Telemetry collector
/// - LOG_LEVEL may be TRACE, DEBUG, INFO, WARN or ERROR (default to INFO)
/// - LOG_FORMAT may be TEXT or JSON (default to TEXT)
fn init_logging(otlp_endpoint: Option<String>, json_output: bool) {
let mut layers = Vec::new();
// STDOUT/STDERR layer
let fmt_layer = tracing_subscriber::fmt::layer()
.with_file(true)
.with_line_number(true);
let fmt_layer = match json_output {
true => fmt_layer.json().flatten_event(true).boxed(),
false => fmt_layer.boxed(),
};
layers.push(fmt_layer);
// OpenTelemetry tracing layer
if let Some(otlp_endpoint) = otlp_endpoint {
global::set_text_map_propagator(TraceContextPropagator::new());
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(
opentelemetry_otlp::new_exporter()
.tonic()
.with_endpoint(otlp_endpoint),
)
.with_trace_config(
trace::config()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
"text-generation-inference.router",
)]))
.with_sampler(Sampler::AlwaysOn),
)
.install_batch(opentelemetry::runtime::Tokio);
if let Ok(tracer) = tracer {
layers.push(tracing_opentelemetry::layer().with_tracer(tracer).boxed());
init_tracing_opentelemetry::init_propagator().unwrap();
};
}
// Filter events with LOG_LEVEL
let env_filter =
EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info"));
tracing_subscriber::registry()
.with(env_filter)
.with(layers)
.init();
}
/// get model info from the Huggingface Hub
pub async fn get_model_info(api: &ApiRepo) -> Option<HubModelInfo> {
let response = api.info_request().send().await.ok()?;
if response.status().is_success() {
let hub_model_info: HubModelInfo =
serde_json::from_str(&response.text().await.ok()?).ok()?;
if let Some(sha) = &hub_model_info.sha {
tracing::info!(
"Serving revision {sha} of model {}",
hub_model_info.model_id
);
}
Some(hub_model_info)
} else {
None
}
}
/// get base tokenizer
pub async fn get_base_tokenizer(api: &Api, api_repo: &ApiRepo) -> Option<Tokenizer> {
let config_filename = api_repo.get("config.json").await.ok()?;
// Open the file in read-only mode with buffer.
let file = File::open(config_filename).ok()?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of `User`.
let config: serde_json::Value = serde_json::from_reader(reader).ok()?;
if let Some(serde_json::Value::String(base_model_id)) = config.get("base_model_name_or_path") {
let api_base_repo = api.repo(Repo::with_revision(
base_model_id.to_string(),
RepoType::Model,
"main".to_string(),
));
let tokenizer_filename = api_base_repo.get("tokenizer.json").await.ok()?;
Tokenizer::from_file(tokenizer_filename).ok()
} else {
None
}
}
/// get tokenizer_config from the Huggingface Hub
pub async fn get_tokenizer_config(api_repo: &ApiRepo) -> Option<HubTokenizerConfig> {
let tokenizer_config_filename = api_repo.get("tokenizer_config.json").await.ok()?;
// Open the file in read-only mode with buffer.
let file = File::open(tokenizer_config_filename).ok()?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of 'HubTokenizerConfig'.
let tokenizer_config: HubTokenizerConfig = serde_json::from_reader(reader).ok()?;
Some(tokenizer_config)
}
#[derive(Debug, Error)]
enum RouterError {
#[error("Argument validation error: {0}")]
ArgumentValidation(String),
#[error("Unable to connect to the Python model shards: {0}")]
Connection(ClientError),
#[error("Unable to clear the Python model shards cache: {0}")]
Cache(ClientError),
#[error("Unable to get the Python model shards info: {0}")]
Info(ClientError),
#[error("Unable to warmup the Python model shards: {0}")]
Warmup(ClientError),
#[error("Tokio runtime failed to start: {0}")]
Tokio(#[from] std::io::Error),
#[error("Axum webserver failed: {0}")]
Axum(#[from] axum::BoxError),
}
| 0 |
hf_public_repos/text-generation-inference/router | hf_public_repos/text-generation-inference/router/src/lib.rs | mod health;
/// Text Generation Inference Webserver
mod infer;
mod queue;
pub mod server;
mod validation;
use infer::{Infer, InferError, InferStreamResponse};
use queue::{Entry, Queue};
use serde::{Deserialize, Serialize};
use tokio::sync::OwnedSemaphorePermit;
use tokio_stream::wrappers::UnboundedReceiverStream;
use utoipa::ToSchema;
use validation::Validation;
/// Type alias for generation responses
pub(crate) type GenerateStreamResponse = (
OwnedSemaphorePermit,
u32, // input_length
UnboundedReceiverStream<Result<InferStreamResponse, InferError>>,
);
/// Hub type
#[derive(Clone, Debug, Deserialize)]
pub struct HubModelInfo {
#[serde(rename(deserialize = "id"))]
pub model_id: String,
pub sha: Option<String>,
pub pipeline_tag: Option<String>,
}
#[derive(Clone, Deserialize, Default)]
pub struct HubTokenizerConfig {
#[serde(default)]
pub chat_template: Option<String>,
}
impl HubTokenizerConfig {
pub fn from_file(filename: &str) -> Self {
let content = std::fs::read_to_string(filename).unwrap();
serde_json::from_str(&content).unwrap_or_default()
}
}
#[derive(Clone, Debug, Serialize, ToSchema)]
pub struct Info {
/// Model info
#[schema(example = "bigscience/blomm-560m")]
pub model_id: String,
#[schema(nullable = true, example = "e985a63cdc139290c5f700ff1929f0b5942cced2")]
pub model_sha: Option<String>,
#[schema(example = "torch.float16")]
pub model_dtype: String,
#[schema(example = "cuda")]
pub model_device_type: String,
#[schema(nullable = true, example = "text-generation")]
pub model_pipeline_tag: Option<String>,
/// Router Parameters
#[schema(example = "128")]
pub max_concurrent_requests: usize,
#[schema(example = "2")]
pub max_best_of: usize,
#[schema(example = "4")]
pub max_stop_sequences: usize,
#[schema(example = "1024")]
pub max_input_length: usize,
#[schema(example = "2048")]
pub max_total_tokens: usize,
#[schema(example = "1.2")]
pub waiting_served_ratio: f32,
#[schema(example = "32000")]
pub max_batch_total_tokens: u32,
#[schema(example = "20")]
pub max_waiting_tokens: usize,
#[schema(example = "2")]
pub validation_workers: usize,
/// Router Info
#[schema(example = "0.5.0")]
pub version: &'static str,
#[schema(nullable = true, example = "null")]
pub sha: Option<&'static str>,
#[schema(nullable = true, example = "null")]
pub docker_label: Option<&'static str>,
}
#[derive(Clone, Debug, Deserialize, ToSchema)]
pub(crate) struct GenerateParameters {
#[serde(default)]
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 1)]
pub best_of: Option<usize>,
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
nullable = true,
default = "null",
example = 0.5
)]
pub temperature: Option<f32>,
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
nullable = true,
default = "null",
example = 1.03
)]
pub repetition_penalty: Option<f32>,
#[serde(default)]
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 10)]
pub top_k: Option<i32>,
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
maximum = 1.0,
nullable = true,
default = "null",
example = 0.95
)]
pub top_p: Option<f32>,
#[serde(default)]
#[schema(
exclusive_minimum = 0.0,
maximum = 1.0,
nullable = true,
default = "null",
example = 0.95
)]
pub typical_p: Option<f32>,
#[serde(default)]
#[schema(default = "false", example = true)]
pub do_sample: bool,
#[serde(default = "default_max_new_tokens")]
#[schema(nullable = true, default = "100", example = "20")]
pub max_new_tokens: Option<u32>,
#[serde(default)]
#[schema(nullable = true, default = "null", example = false)]
pub return_full_text: Option<bool>,
#[serde(default)]
#[schema(inline, max_items = 4, example = json ! (["photographer"]))]
pub stop: Vec<String>,
#[serde(default)]
#[schema(nullable = true, default = "null", example = "null")]
pub truncate: Option<usize>,
#[serde(default)]
#[schema(default = "false", example = true)]
pub watermark: bool,
#[serde(default)]
#[schema(default = "true")]
pub details: bool,
#[serde(default)]
#[schema(default = "true")]
pub decoder_input_details: bool,
#[serde(default)]
#[schema(
exclusive_minimum = 0,
nullable = true,
default = "null",
example = "null"
)]
pub seed: Option<u64>,
#[serde(default)]
#[schema(exclusive_minimum = 0, nullable = true, default = "null", example = 5)]
pub top_n_tokens: Option<u32>,
}
fn default_max_new_tokens() -> Option<u32> {
Some(100)
}
fn default_parameters() -> GenerateParameters {
GenerateParameters {
best_of: None,
temperature: None,
repetition_penalty: None,
top_k: None,
top_p: None,
typical_p: None,
do_sample: true,
max_new_tokens: default_max_new_tokens(),
return_full_text: None,
stop: Vec::new(),
truncate: None,
watermark: false,
details: false,
decoder_input_details: false,
seed: None,
top_n_tokens: None,
}
}
#[derive(Clone, Deserialize, Serialize)]
pub(crate) struct ChatCompletion {
pub id: String,
pub object: String,
pub created: u64,
pub model: String,
pub system_fingerprint: String,
pub choices: Vec<ChatCompletionComplete>,
pub usage: Usage,
}
#[derive(Clone, Deserialize, Serialize)]
pub(crate) struct ChatCompletionComplete {
pub index: u32,
pub message: Message,
pub logprobs: Option<Vec<f32>>,
pub finish_reason: String,
}
#[derive(Clone, Deserialize, Serialize)]
pub(crate) struct Usage {
pub prompt_tokens: u32,
pub completion_tokens: u32,
pub total_tokens: u32,
}
impl ChatCompletion {
pub(crate) fn new(
model: String,
system_fingerprint: String,
output: String,
created: u64,
details: Details,
return_logprobs: bool,
) -> Self {
Self {
id: String::new(),
object: "text_completion".into(),
created,
model,
system_fingerprint,
choices: vec![ChatCompletionComplete {
index: 0,
message: Message {
role: "assistant".into(),
content: output,
},
logprobs: return_logprobs
.then(|| details.tokens.iter().map(|t| t.logprob).collect()),
finish_reason: details.finish_reason.to_string(),
}],
usage: Usage {
prompt_tokens: details.prefill.len() as u32,
completion_tokens: details.generated_tokens,
total_tokens: details.prefill.len() as u32 + details.generated_tokens,
},
}
}
}
#[derive(Clone, Deserialize, Serialize)]
pub(crate) struct ChatCompletionChunk {
pub id: String,
pub object: String,
pub created: u64,
pub model: String,
pub system_fingerprint: String,
pub choices: Vec<ChatCompletionChoice>,
}
#[derive(Clone, Deserialize, Serialize)]
pub(crate) struct ChatCompletionChoice {
pub index: u32,
pub delta: ChatCompletionDelta,
pub logprobs: Option<f32>,
pub finish_reason: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub(crate) struct ChatCompletionDelta {
pub role: String,
pub content: String,
}
impl ChatCompletionChunk {
pub(crate) fn new(
model: String,
system_fingerprint: String,
delta: String,
created: u64,
index: u32,
logprobs: Option<f32>,
finish_reason: Option<String>,
) -> Self {
Self {
id: String::new(),
object: "text_completion".to_string(),
created,
model,
system_fingerprint,
choices: vec![ChatCompletionChoice {
index,
delta: ChatCompletionDelta {
role: "assistant".to_string(),
content: delta,
},
logprobs,
finish_reason,
}],
}
}
}
fn default_request_messages() -> Vec<Message> {
vec![Message {
role: "user".to_string(),
content: "My name is David and I".to_string(),
}]
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct ChatRequest {
/// UNUSED
#[schema(example = "bigscience/blomm-560m")]
/// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
pub model: String, /* NOTE: UNUSED */
/// A list of messages comprising the conversation so far.
#[serde(default = "default_request_messages")]
pub messages: Vec<Message>,
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,
/// decreasing the model's likelihood to repeat the same line verbatim.
#[serde(default)]
pub frequency_penalty: Option<f32>,
/// UNUSED
/// Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens
/// (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,
/// the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
/// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should
/// result in a ban or exclusive selection of the relevant token.
#[serde(default)]
pub logit_bias: Option<Vec<f32>>,
/// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each
/// output token returned in the content of message.
#[serde(default)]
pub logprobs: Option<bool>,
/// UNUSED
/// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with
/// an associated log probability. logprobs must be set to true if this parameter is used.
#[serde(default)]
pub top_logprobs: Option<u32>,
/// The maximum number of tokens that can be generated in the chat completion.
#[serde(default)]
pub max_tokens: Option<u32>,
/// UNUSED
/// How many chat completion choices to generate for each input message. Note that you will be charged based on the
/// number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
#[serde(default)]
pub n: Option<u32>,
/// UNUSED
/// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
/// increasing the model's likelihood to talk about new topics
#[serde(default)]
pub presence_penalty: Option<f32>,
#[serde(default = "bool::default")]
pub stream: bool,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
}
#[derive(Clone, Deserialize, ToSchema, Serialize)]
pub(crate) struct Message {
#[schema(example = "user")]
pub role: String,
#[schema(example = "My name is David and I")]
pub content: String,
}
#[derive(Clone, Debug, Deserialize, ToSchema)]
pub(crate) struct GenerateRequest {
#[schema(example = "My name is Olivier and I")]
pub inputs: String,
#[serde(default = "default_parameters")]
pub parameters: GenerateParameters,
}
#[derive(Clone, Debug, Deserialize, ToSchema)]
pub(crate) struct CompatGenerateRequest {
#[schema(example = "My name is Olivier and I")]
pub inputs: String,
#[serde(default = "default_parameters")]
pub parameters: GenerateParameters,
#[serde(default)]
#[schema(default = "false")]
pub stream: bool,
}
impl From<CompatGenerateRequest> for GenerateRequest {
fn from(req: CompatGenerateRequest) -> Self {
Self {
inputs: req.inputs,
parameters: req.parameters,
}
}
}
#[derive(Debug, Serialize, ToSchema)]
pub struct PrefillToken {
#[schema(example = 0)]
id: u32,
#[schema(example = "test")]
text: String,
#[schema(nullable = true, example = - 0.34)]
logprob: f32,
}
#[derive(Debug, Serialize, ToSchema)]
pub struct Token {
#[schema(example = 0)]
id: u32,
#[schema(example = "test")]
text: String,
#[schema(nullable = true, example = - 0.34)]
logprob: f32,
#[schema(example = "false")]
special: bool,
}
#[derive(Serialize, ToSchema)]
#[serde(rename_all(serialize = "snake_case"))]
pub(crate) enum FinishReason {
#[schema(rename = "length")]
Length,
#[serde(rename = "eos_token")]
#[schema(rename = "eos_token")]
EndOfSequenceToken,
#[schema(rename = "stop_sequence")]
StopSequence,
}
impl std::fmt::Display for FinishReason {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FinishReason::Length => write!(f, "length"),
FinishReason::EndOfSequenceToken => write!(f, "eos_token"),
FinishReason::StopSequence => write!(f, "stop_sequence"),
}
}
}
#[derive(Serialize, ToSchema)]
pub(crate) struct BestOfSequence {
#[schema(example = "test")]
pub generated_text: String,
#[schema(example = "length")]
pub finish_reason: FinishReason,
#[schema(example = 1)]
pub generated_tokens: u32,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
pub prefill: Vec<PrefillToken>,
pub tokens: Vec<Token>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub top_tokens: Vec<Vec<Token>>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct Details {
#[schema(example = "length")]
pub finish_reason: FinishReason,
#[schema(example = 1)]
pub generated_tokens: u32,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
pub prefill: Vec<PrefillToken>,
pub tokens: Vec<Token>,
#[serde(skip_serializing_if = "Option::is_none")]
pub best_of_sequences: Option<Vec<BestOfSequence>>,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub top_tokens: Vec<Vec<Token>>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct GenerateResponse {
#[schema(example = "test")]
pub generated_text: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub details: Option<Details>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct StreamDetails {
#[schema(example = "length")]
pub finish_reason: FinishReason,
#[schema(example = 1)]
pub generated_tokens: u32,
#[schema(nullable = true, example = 42)]
pub seed: Option<u64>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct StreamResponse {
pub index: u32,
pub token: Token,
#[serde(skip_serializing_if = "Vec::is_empty")]
pub top_tokens: Vec<Token>,
#[schema(nullable = true, default = "null", example = "test")]
pub generated_text: Option<String>,
#[schema(nullable = true, default = "null")]
pub details: Option<StreamDetails>,
}
#[derive(Serialize, ToSchema)]
pub(crate) struct ErrorResponse {
pub error: String,
pub error_type: String,
}
#[cfg(test)]
mod tests {
use std::io::Write;
use tokenizers::Tokenizer;
pub(crate) async fn get_tokenizer() -> Tokenizer {
let filename = std::path::Path::new("tokenizer.json");
if !filename.exists() {
let content = reqwest::get("https://huggingface.co/gpt2/raw/main/tokenizer.json")
.await
.unwrap()
.bytes()
.await
.unwrap();
let tmp_filename = "tokenizer.json.temp";
let mut file = std::fs::File::create(tmp_filename).unwrap();
file.write_all(&content).unwrap();
// Re-check if another process has written this file maybe.
if !filename.exists() {
std::fs::rename(tmp_filename, filename).unwrap()
}
}
Tokenizer::from_file("tokenizer.json").unwrap()
}
}
| 0 |
hf_public_repos/text-generation-inference/router | hf_public_repos/text-generation-inference/router/src/health.rs | use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use text_generation_client::{
Batch, NextTokenChooserParameters, Request, ShardedClient, StoppingCriteriaParameters,
};
// Note: Request ids and batch ids cannot collide.
const LIVENESS_ID: u64 = u64::MAX;
const BATCH_ID: u64 = u64::MAX;
#[derive(Clone, Debug)]
pub(crate) struct Health {
client: ShardedClient,
generation_health: Arc<AtomicBool>,
}
impl Health {
pub(crate) fn new(client: ShardedClient, generation_health: Arc<AtomicBool>) -> Self {
Self {
client,
generation_health,
}
}
pub(crate) async fn check(&mut self) -> bool {
if self.generation_health.load(Ordering::SeqCst) {
// Generation is healthy, we only check that the shards are answering gRPC calls
self.client.health().await.is_ok()
} else {
// Generation is unhealthy or have not sent any generation request yet
// Dummy batch of 1 token and 1 generated token
let liveness_request = Request {
id: LIVENESS_ID,
inputs: "liveness".to_string(),
truncate: 10,
prefill_logprobs: false,
parameters: Some(NextTokenChooserParameters {
temperature: 1.0,
top_k: 0,
top_p: 1.0,
typical_p: 1.0,
do_sample: false,
seed: 0,
repetition_penalty: 1.0,
watermark: false,
}),
stopping_parameters: Some(StoppingCriteriaParameters {
max_new_tokens: 1,
stop_sequences: vec![],
ignore_eos_token: false,
}),
top_n_tokens: 0,
};
let batch = Batch {
id: BATCH_ID,
requests: vec![liveness_request],
size: 1,
max_tokens: 2,
};
// Skips the queue
let value = self.client.prefill(batch).await.is_ok();
// Update generation health
self.generation_health.store(value, Ordering::SeqCst);
value
}
}
}
| 0 |
hf_public_repos/text-generation-inference/router | hf_public_repos/text-generation-inference/router/src/queue.rs | use crate::infer::InferError;
use crate::infer::InferStreamResponse;
use crate::validation::ValidGenerateRequest;
use nohash_hasher::{BuildNoHashHasher, IntMap};
use std::cmp::min;
use std::collections::VecDeque;
use text_generation_client::{Batch, Request};
use tokio::sync::{mpsc, oneshot};
use tokio::time::Instant;
use tracing::{info_span, instrument, Span};
/// Queue entry
#[derive(Debug)]
pub(crate) struct Entry {
/// Request
pub request: ValidGenerateRequest,
/// Response sender to communicate between the Infer struct and the batching_task
pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>,
/// Span that will live as long as entry
pub span: Span,
/// Temporary span used as a guard when logging inference, wait times...
pub temp_span: Option<Span>,
/// Instant when this entry was queued
pub queue_time: Instant,
/// Instant when this entry was added to a batch
pub batch_time: Option<Instant>,
}
/// Request Queue
#[derive(Debug, Clone)]
pub(crate) struct Queue {
/// Channel to communicate with the background queue task
queue_sender: mpsc::UnboundedSender<QueueCommand>,
}
impl Queue {
pub(crate) fn new(
requires_padding: bool,
block_size: u32,
window_size: Option<u32>,
speculate: u32,
) -> Self {
// Create channel
let (queue_sender, queue_receiver) = mpsc::unbounded_channel();
// Launch background queue task
tokio::spawn(queue_task(
requires_padding,
block_size,
window_size,
speculate,
queue_receiver,
));
Self { queue_sender }
}
/// Append an entry to the queue
#[instrument(skip_all)]
pub(crate) fn append(&self, entry: Entry) {
// Send append command to the background task managing the state
// Unwrap is safe here
self.queue_sender
.send(QueueCommand::Append(Box::new(entry), Span::current()))
.unwrap();
}
// Get the next batch
#[instrument(skip(self))]
pub(crate) async fn next_batch(
&self,
min_size: Option<usize>,
prefill_token_budget: u32,
token_budget: u32,
) -> Option<NextBatch> {
// Create response channel
let (response_sender, response_receiver) = oneshot::channel();
// Send next batch command to the background task managing the state
// Unwrap is safe here
self.queue_sender
.send(QueueCommand::NextBatch {
min_size,
prefill_token_budget,
token_budget,
response_sender,
span: Span::current(),
})
.unwrap();
// Await on response channel
// Unwrap is safe here
response_receiver.await.unwrap()
}
}
// Background task responsible of the queue state
async fn queue_task(
requires_padding: bool,
block_size: u32,
window_size: Option<u32>,
speculate: u32,
mut receiver: mpsc::UnboundedReceiver<QueueCommand>,
) {
let mut state = State::new(requires_padding, block_size, window_size, speculate);
while let Some(cmd) = receiver.recv().await {
match cmd {
QueueCommand::Append(entry, span) => {
span.in_scope(|| state.append(*entry));
metrics::increment_gauge!("tgi_queue_size", 1.0);
}
QueueCommand::NextBatch {
min_size,
prefill_token_budget,
token_budget,
response_sender,
span,
} => span.in_scope(|| {
let next_batch = state.next_batch(min_size, prefill_token_budget, token_budget);
response_sender.send(next_batch).unwrap();
metrics::gauge!("tgi_queue_size", state.entries.len() as f64);
}),
}
}
}
/// Queue State
#[derive(Debug)]
struct State {
/// Queue entries organized in a Vec
entries: VecDeque<(u64, Entry)>,
/// Id of the next entry
next_id: u64,
/// Id of the next batch
next_batch_id: u64,
/// Whether the model is using padding
requires_padding: bool,
/// Paged Attention block size
block_size: u32,
/// Sliding window
window_size: Option<u32>,
/// Speculation amount
speculate: u32,
}
impl State {
fn new(
requires_padding: bool,
block_size: u32,
window_size: Option<u32>,
speculate: u32,
) -> Self {
Self {
entries: VecDeque::with_capacity(128),
next_id: 0,
next_batch_id: 0,
requires_padding,
block_size,
window_size,
speculate,
}
}
/// Append an entry to the queue
fn append(&mut self, mut entry: Entry) {
// Create a span that will live as long as the entry is in the queue waiting to be batched
let queue_span = info_span!(parent: &entry.span, "queued");
entry.temp_span = Some(queue_span);
// Push entry in the queue
self.entries.push_back((self.next_id, entry));
self.next_id += 1;
}
// Get the next batch
fn next_batch(
&mut self,
min_size: Option<usize>,
prefill_token_budget: u32,
token_budget: u32,
) -> Option<NextBatch> {
if self.entries.is_empty() {
return None;
}
// Check if we have enough entries
if let Some(min_size) = min_size {
if self.entries.len() < min_size {
return None;
}
}
// Create span for this batch to add context to inference calls
let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty);
next_batch_span.follows_from(&Span::current());
let mut batch_requests = Vec::with_capacity(self.entries.len());
let mut batch_entries =
IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default());
let mut max_input_length = 0;
let mut prefill_tokens: u32 = 0;
let mut decode_tokens: u32 = 0;
// Pop entries starting from the front of the queue
while let Some((id, mut entry)) = self.entries.pop_front() {
// Filter entries where the response receiver was dropped (== entries where the request
// was dropped by the client)
if entry.response_tx.is_closed() {
metrics::increment_counter!("tgi_request_failure", "err" => "dropped");
continue;
}
if self.requires_padding {
// We pad to max input length in the Python shards
// We need to take these padding tokens into the equation
max_input_length = max_input_length.max(entry.request.input_length);
prefill_tokens = (batch_requests.len() + 1) as u32 * max_input_length
} else {
// pad to block size
prefill_tokens += ((entry.request.input_length + self.block_size - 1)
/ self.block_size)
* self.block_size;
}
if self.requires_padding {
decode_tokens += entry.request.stopping_parameters.max_new_tokens;
} else {
let max_new_tokens = match self.window_size {
None => entry.request.stopping_parameters.max_new_tokens,
Some(window_size) => min(
window_size.saturating_sub(entry.request.input_length),
entry.request.stopping_parameters.max_new_tokens,
),
};
// pad to block size
decode_tokens +=
((max_new_tokens + self.block_size - 1) / self.block_size) * self.block_size;
}
if prefill_tokens > prefill_token_budget
|| (prefill_tokens + decode_tokens + self.speculate) > token_budget
{
// Entry is over budget
// Add it back to the front
self.entries.push_front((id, entry));
break;
}
// Create a new span to link the batch back to this entry
let entry_batch_span = info_span!(parent: &entry.span, "infer");
// Add relationships
next_batch_span.follows_from(&entry_batch_span);
entry_batch_span.follows_from(&next_batch_span);
// Update entry
entry.temp_span = Some(entry_batch_span);
batch_requests.push(Request {
id,
prefill_logprobs: entry.request.decoder_input_details,
inputs: entry.request.inputs.clone(),
truncate: entry.request.truncate,
parameters: Some(entry.request.parameters.clone()),
stopping_parameters: Some(entry.request.stopping_parameters.clone()),
top_n_tokens: entry.request.top_n_tokens,
});
// Set batch_time
entry.batch_time = Some(Instant::now());
// Insert in batch_entries IntMap
batch_entries.insert(id, entry);
}
// Empty batch
if batch_requests.is_empty() {
return None;
}
// Check if our batch is big enough
if let Some(min_size) = min_size {
// Batch is too small
if batch_requests.len() < min_size {
// Add back entries to the queue in the correct order
for r in batch_requests.into_iter().rev() {
let id = r.id;
let entry = batch_entries.remove(&id).unwrap();
self.entries.push_front((id, entry));
}
return None;
}
}
// Final batch size
let size = batch_requests.len() as u32;
next_batch_span.record("batch_size", size);
let batch = Batch {
id: self.next_batch_id,
requests: batch_requests,
size,
max_tokens: (prefill_tokens + decode_tokens),
};
// Increment batch id
self.next_batch_id += 1;
metrics::histogram!("tgi_batch_next_size", batch.size as f64);
Some((batch_entries, batch, next_batch_span))
}
}
type NextBatch = (IntMap<u64, Entry>, Batch, Span);
#[derive(Debug)]
enum QueueCommand {
Append(Box<Entry>, Span),
NextBatch {
min_size: Option<usize>,
prefill_token_budget: u32,
token_budget: u32,
response_sender: oneshot::Sender<Option<NextBatch>>,
span: Span,
},
}
#[cfg(test)]
mod tests {
use super::*;
use text_generation_client::{NextTokenChooserParameters, StoppingCriteriaParameters};
use tracing::info_span;
fn default_entry() -> (
Entry,
mpsc::UnboundedReceiver<Result<InferStreamResponse, InferError>>,
) {
let (response_tx, receiver_tx) = mpsc::unbounded_channel();
let entry = Entry {
request: ValidGenerateRequest {
inputs: "".to_string(),
input_length: 0,
truncate: 0,
decoder_input_details: false,
parameters: NextTokenChooserParameters {
temperature: 0.0,
top_k: 0,
top_p: 0.0,
typical_p: 0.0,
do_sample: false,
seed: 0,
repetition_penalty: 0.0,
watermark: false,
},
stopping_parameters: StoppingCriteriaParameters {
ignore_eos_token: false,
max_new_tokens: 1,
stop_sequences: vec![],
},
top_n_tokens: 0,
},
response_tx,
span: info_span!("entry"),
temp_span: None,
queue_time: Instant::now(),
batch_time: None,
};
(entry, receiver_tx)
}
#[test]
fn test_append() {
let mut state = State::new(false, 1, None, 0);
let (entry, _guard) = default_entry();
assert_eq!(state.next_id, 0);
assert_eq!(state.entries.len(), 0);
state.append(entry);
assert_eq!(state.next_id, 1);
assert_eq!(state.entries.len(), 1);
let (id, _) = state.entries.remove(0).unwrap();
assert_eq!(id, 0);
}
#[test]
fn test_next_batch_empty() {
let mut state = State::new(false, 1, None, 0);
assert!(state.next_batch(None, 1, 1).is_none());
assert!(state.next_batch(Some(1), 1, 1).is_none());
}
#[test]
fn test_next_batch_min_size() {
let mut state = State::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
state.append(entry1);
state.append(entry2);
let (entries, batch, _) = state.next_batch(None, 2, 2).unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&0));
assert!(entries.contains_key(&1));
assert!(entries.get(&0).unwrap().batch_time.is_some());
assert!(entries.get(&1).unwrap().batch_time.is_some());
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 2);
assert_eq!(state.next_id, 2);
assert_eq!(state.entries.len(), 0);
assert_eq!(state.next_batch_id, 1);
let (entry3, _guard3) = default_entry();
state.append(entry3);
assert!(state.next_batch(Some(2), 2, 2).is_none());
assert_eq!(state.next_id, 3);
assert_eq!(state.entries.len(), 1);
let (id, _) = state.entries.remove(0).unwrap();
assert_eq!(id, 2);
}
#[test]
fn test_next_batch_token_budget() {
let mut state = State::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
state.append(entry1);
state.append(entry2);
let (entries, batch, _) = state.next_batch(None, 1, 1).unwrap();
assert_eq!(entries.len(), 1);
assert!(entries.contains_key(&0));
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 1);
assert_eq!(state.next_id, 2);
assert_eq!(state.entries.len(), 1);
assert_eq!(state.next_batch_id, 1);
let (entry3, _guard3) = default_entry();
state.append(entry3);
let (entries, batch, _) = state.next_batch(None, 3, 3).unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&1));
assert!(entries.contains_key(&2));
assert_eq!(batch.id, 1);
assert_eq!(batch.size, 2);
assert_eq!(state.next_id, 3);
assert_eq!(state.entries.len(), 0);
assert_eq!(state.next_batch_id, 2);
}
#[tokio::test]
async fn test_queue_append() {
let queue = Queue::new(false, 1, None, 0);
let (entry, _guard) = default_entry();
queue.append(entry);
}
#[tokio::test]
async fn test_queue_next_batch_empty() {
let queue = Queue::new(false, 1, None, 0);
assert!(queue.next_batch(None, 1, 1).await.is_none());
assert!(queue.next_batch(Some(1), 1, 1).await.is_none());
}
#[tokio::test]
async fn test_queue_next_batch_min_size() {
let queue = Queue::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
queue.append(entry1);
queue.append(entry2);
let (entries, batch, _) = queue.next_batch(None, 2, 2).await.unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&0));
assert!(entries.contains_key(&1));
assert!(entries.get(&0).unwrap().batch_time.is_some());
assert!(entries.get(&1).unwrap().batch_time.is_some());
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 2);
let (entry3, _guard3) = default_entry();
queue.append(entry3);
// Not enough requests pending
assert!(queue.next_batch(Some(2), 2, 2).await.is_none());
// Not enough token budget
assert!(queue.next_batch(Some(1), 0, 0).await.is_none());
// Ok
let (entries2, batch2, _) = queue.next_batch(Some(1), 2, 2).await.unwrap();
assert_eq!(entries2.len(), 1);
assert!(entries2.contains_key(&2));
assert!(entries2.get(&2).unwrap().batch_time.is_some());
assert_eq!(batch2.id, 1);
assert_eq!(batch2.size, 1);
}
#[tokio::test]
async fn test_queue_next_batch_token_budget() {
let queue = Queue::new(false, 1, None, 0);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
queue.append(entry1);
queue.append(entry2);
let (entries, batch, _) = queue.next_batch(None, 1, 1).await.unwrap();
assert_eq!(entries.len(), 1);
assert!(entries.contains_key(&0));
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 1);
let (entry3, _guard3) = default_entry();
queue.append(entry3);
let (entries, batch, _) = queue.next_batch(None, 3, 3).await.unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&1));
assert!(entries.contains_key(&2));
assert_eq!(batch.id, 1);
assert_eq!(batch.size, 2);
}
#[tokio::test]
async fn test_queue_next_batch_token_speculate() {
let queue = Queue::new(false, 1, None, 2);
let (entry1, _guard1) = default_entry();
let (entry2, _guard2) = default_entry();
queue.append(entry1);
queue.append(entry2);
// Budget of 1 is not enough
assert!(queue.next_batch(None, 1, 1).await.is_none());
let (entries, batch, _) = queue.next_batch(None, 6, 6).await.unwrap();
assert_eq!(entries.len(), 2);
assert!(entries.contains_key(&0));
assert!(entries.contains_key(&1));
assert_eq!(batch.id, 0);
assert_eq!(batch.size, 2);
}
#[tokio::test]
async fn test_queue_next_batch_dropped_receiver() {
let queue = Queue::new(false, 1, None, 0);
let (entry, _) = default_entry();
queue.append(entry);
assert!(queue.next_batch(None, 1, 1).await.is_none());
}
}
| 0 |