No such file or directory: 'ldconfig'
I'm trying to run the chat example on a A100 GPU (with the specified dependencies installed like described in the model page) but I get an error related to a ldconfig binary, like this:
File "/cfs/home/u021543/test_cogvlm.py", line 27, in <module>
outputs = model.generate(**inputs, **gen_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/transformers/generation/utils.py", line 1673, in generate
return self.greedy_search(
^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/transformers/generation/utils.py", line 2521, in greedy_search
outputs = self(
^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/.cache/huggingface/modules/transformers_modules/THUDM/cogvlm-chat-hf/2ecffe1437c99c459f202cce1458e8ccfa4a34c5/modeling_cogvlm.py", line 610, in forward
outputs = self.model(
^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/.cache/huggingface/modules/transformers_modules/THUDM/cogvlm-chat-hf/2ecffe1437c99c459f202cce1458e8ccfa4a34c5/modeling_cogvlm.py", line 392, in forward
return self.llm_forward(
^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/.cache/huggingface/modules/transformers_modules/THUDM/cogvlm-chat-hf/2ecffe1437c99c459f202cce1458e8ccfa4a34c5/modeling_cogvlm.py", line 476, in llm_forward
layer_outputs = decoder_layer(
^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/.cache/huggingface/modules/transformers_modules/THUDM/cogvlm-chat-hf/2ecffe1437c99c459f202cce1458e8ccfa4a34c5/modeling_cogvlm.py", line 249, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/.cache/huggingface/modules/transformers_modules/THUDM/cogvlm-chat-hf/2ecffe1437c99c459f202cce1458e8ccfa4a34c5/modeling_cogvlm.py", line 197, in forward
query_states, key_states = self.rotary_emb(query_states, key_states, position_ids=position_ids, max_seqlen=position_ids.max() + 1)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/.cache/huggingface/modules/transformers_modules/THUDM/cogvlm-chat-hf/2ecffe1437c99c459f202cce1458e8ccfa4a34c5/util.py", line 469, in forward
q = apply_rotary_emb_func(
^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/.cache/huggingface/modules/transformers_modules/THUDM/cogvlm-chat-hf/2ecffe1437c99c459f202cce1458e8ccfa4a34c5/util.py", line 329, in apply_rotary_emb
return ApplyRotaryEmb.apply(
^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/torch/autograd/function.py", line 539, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/.cache/huggingface/modules/transformers_modules/THUDM/cogvlm-chat-hf/2ecffe1437c99c459f202cce1458e8ccfa4a34c5/util.py", line 255, in forward
out = apply_rotary(
^^^^^^^^^^^^^
File "/cfs/home/u021543/.cache/huggingface/modules/transformers_modules/THUDM/cogvlm-chat-hf/2ecffe1437c99c459f202cce1458e8ccfa4a34c5/util.py", line 212, in apply_rotary
rotary_kernel[grid](
File "<string>", line 63, in rotary_kernel
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/triton/compiler/compiler.py", line 425, in compile
so_path = make_stub(name, signature, constants)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/triton/compiler/make_launcher.py", line 39, in make_stub
so = _build(name, src_path, tmpdir)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/triton/common/build.py", line 61, in _build
cuda_lib_dirs = libcuda_dirs()
^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/site-packages/triton/common/build.py", line 21, in libcuda_dirs
libs = subprocess.check_output(["ldconfig", "-p"]).decode()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/subprocess.py", line 466, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/subprocess.py", line 548, in run
with Popen(*popenargs, **kwargs) as process:
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/cfs/home/u021543/miniconda3/lib/python3.11/subprocess.py", line 1026, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/cfs/home/u021543/miniconda3/lib/python3.11/subprocess.py", line 1950, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'ldconfig'
I went into <venv-path>/site-packages/triton/common/build.py
and changed subprocess.check_output(["ldconfig", "-p"]).decode()
to subprocess.check_output(["/sbin/ldconfig", "-p"]).decode()
. Even after adding ldconfig
to path since check_output
isn't called with shell=True
, you don't have access to env variables. I ran into more issues installing the latest version of triton so the hotfix was the best solution.
the modeling_cogvlm.py is updated. the latest version of the model now includes a rotary embedding, which has no dependency on triton.
This update should address the issue you were experiencing. Please check and let me know if your problem has been resolved.