{"input": "\"\"\"Chain that takes in an input and produces an action and action input.\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport logging\nimport time\nfrom abc import abstractmethod\nfrom pathlib import Path\nfrom typing import (\n Any,\n AsyncIterator,\n Callable,\n Dict,\n Iterator,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nimport yaml\nfrom langchain_core._api import deprecated\nfrom langchain_core.agents import AgentAction, AgentFinish, AgentStep\nfrom langchain_core.callbacks import (\n AsyncCallbackManagerForChainRun,\n AsyncCallbackManagerForToolRun,\n BaseCallbackManager,\n CallbackManagerForChainRun,\n CallbackManagerForToolRun,\n Callbacks,\n)\nfrom langchain_core.exceptions import OutputParserException\nfrom langchain_core.language_models import BaseLanguageModel\nfrom langchain_core.messages import BaseMessage\nfrom langchain_core.output_parsers import BaseOutputParser\nfrom langchain_core.prompts import BasePromptTemplate\nfrom langchain_core.prompts.few_shot import FewShotPromptTemplate\nfrom langchain_core.prompts.prompt import PromptTemplate\nfrom langchain_core.pydantic_v1 import BaseModel, root_validator\nfrom langchain_core.runnables import Runnable, RunnableConfig, ensure_config\nfrom langchain_core.runnables.utils import AddableDict\nfrom langchain_core.tools import BaseTool\nfrom langchain_core.utils.input import get_color_mapping\n\nfrom langchain.agents.agent_iterator import AgentExecutorIterator\nfrom langchain.agents.agent_types import AgentType\nfrom langchain.agents.tools import InvalidTool\nfrom langchain.chains.base import Chain\nfrom langchain.chains.llm import LLMChain\nfrom langchain.utilities.asyncio import asyncio_timeout\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseSingleActionAgent(BaseModel):\n \"\"\"Base Single Action Agent class.\"\"\"\n\n @property\n def return_values(self) -> List[str]:\n \"\"\"Return values of the agent.\"\"\"\n return [\"output\"]\n\n def get_allowed_tools(self) -> Optional[List[str]]:\n return None\n\n @abstractmethod\n def plan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n \"\"\"Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with observations\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n\n @abstractmethod\n async def aplan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n \"\"\"Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with observations\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n\n @property\n @abstractmethod\n def input_keys(self) -> List[str]:\n \"\"\"Return the input keys.\n\n :meta private:\n \"\"\"\n\n def return_stopped_response(\n self,\n early_stopping_method: str,\n intermediate_steps: List[Tuple[AgentAction, str]],\n **kwargs: Any,\n ) -> AgentFinish:\n \"\"\"Return response when agent has been stopped due to max iterations.\"\"\"\n if early_stopping_method == \"force\":\n # `force` just returns a constant string\n return AgentFinish(\n {\"output\": \"Agent stopped due to iteration limit or time limit.\"}, \"\"\n )\n else:\n raise ValueError(\n f\"Got unsupported early_stopping_method `{early_stopping_method}`\"\n )\n\n @classmethod\n def from_llm_and_tools(\n cls,\n llm: BaseLanguageModel,\n tools: Sequence[BaseTool],\n callback_manager: Optional[BaseCallbackManager] = None,\n **kwargs: Any,\n ) -> BaseSingleActionAgent:\n raise NotImplementedError\n\n @property\n def _agent_type(self) -> str:\n \"\"\"Return Identifier of agent type.\"\"\"\n raise NotImplementedError\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Return dictionary representation of agent.\"\"\"\n _dict = super().dict()\n try:\n _type = self._agent_type\n except NotImplementedError:\n _type = None\n if isinstance(_type, AgentType):\n _dict[\"_type\"] = str(_type.value)\n elif _type is not None:\n _dict[\"_type\"] = _type\n return _dict\n\n def save(self, file_path: Union[Path, str]) -> None:\n \"\"\"Save the agent.\n\n Args:\n file_path: Path to file to save the agent to.\n\n Example:\n .. code-block:: python\n\n # If working with agent executor\n agent.agent.save(file_path=\"path/agent.yaml\")\n \"\"\"\n # Convert file to Path object.\n if isinstance(file_path, str):\n save_path = Path(file_path)\n else:\n save_path = file_path\n\n directory_path = save_path.parent\n directory_path.mkdir(parents=True, exist_ok=True)\n\n # Fetch dictionary to save\n agent_dict = self.dict()\n if \"_type\" not in agent_dict:\n raise NotImplementedError(f\"Agent {self} does not support saving\")\n\n if save_path.suffix == \".json\":\n with open(file_path, \"w\") as f:\n json.dump(agent_dict, f, indent=4)\n elif save_path.suffix == \".yaml\":\n with open(file_path, \"w\") as f:\n yaml.dump(agent_dict, f, default_flow_style=False)\n else:\n raise ValueError(f\"{save_path} must be json or yaml\")\n\n def tool_run_logging_kwargs(self) -> Dict:\n return {}\n\n\nclass BaseMultiActionAgent(BaseModel):\n \"\"\"Base Multi Action Agent class.\"\"\"\n\n @property\n def return_values(self) -> List[str]:\n \"\"\"Return values of the agent.\"\"\"\n return [\"output\"]\n\n def get_allowed_tools(self) -> Optional[List[str]]:\n return None\n\n @abstractmethod\n def plan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[List[AgentAction], AgentFinish]:\n \"\"\"Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with the observations.\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Actions specifying what tool to use.\n \"\"\"\n\n @abstractmethod\n async def aplan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[List[AgentAction], AgentFinish]:\n \"\"\"Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with the observations.\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Actions specifying what tool to use.\n \"\"\"\n\n @property\n @abstractmethod\n def input_keys(self) -> List[str]:\n \"\"\"Return the input keys.\n\n :meta private:\n \"\"\"\n\n def return_stopped_response(\n self,\n early_stopping_method: str,\n intermediate_steps: List[Tuple[AgentAction, str]],\n **kwargs: Any,\n ) -> AgentFinish:\n \"\"\"Return response when agent has been stopped due to max iterations.\"\"\"\n if early_stopping_method == \"force\":\n # `force` just returns a constant string\n return AgentFinish({\"output\": \"Agent stopped due to max iterations.\"}, \"\")\n else:\n raise ValueError(\n f\"Got unsupported early_stopping_method `{early_stopping_method}`\"\n )\n\n @property\n def _agent_type(self) -> str:\n \"\"\"Return Identifier of agent type.\"\"\"\n raise NotImplementedError\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Return dictionary representation of agent.\"\"\"\n _dict = super().dict()\n try:\n _dict[\"_type\"] = str(self._agent_type)\n except NotImplementedError:\n pass\n return _dict\n\n def save(self, file_path: Union[Path, str]) -> None:\n \"\"\"Save the agent.\n\n Args:\n file_path: Path to file to save the agent to.\n\n Example:\n .. code-block:: python\n\n # If working with agent executor\n agent.agent.save(file_path=\"path/agent.yaml\")\n \"\"\"\n # Convert file to Path object.\n if isinstance(file_path, str):\n save_path = Path(file_path)\n else:\n save_path = file_path\n\n # Fetch dictionary to save\n agent_dict = self.dict()\n if \"_type\" not in agent_dict:\n raise NotImplementedError(f\"Agent {self} does not support saving.\")\n\n directory_path = save_path.parent\n directory_path.mkdir(parents=True, exist_ok=True)\n\n if save_path.suffix == \".json\":\n with open(file_path, \"w\") as f:\n json.dump(agent_dict, f, indent=4)\n elif save_path.suffix == \".yaml\":\n with open(file_path, \"w\") as f:\n yaml.dump(agent_dict, f, default_flow_style=False)\n else:\n raise ValueError(f\"{save_path} must be json or yaml\")\n\n def tool_run_logging_kwargs(self) -> Dict:\n return {}\n\n\nclass AgentOutputParser(BaseOutputParser[Union[AgentAction, AgentFinish]]):\n \"\"\"Base class for parsing agent output into agent action/finish.\"\"\"\n\n @abstractmethod\n def parse(self, text: str) -> Union[AgentAction, AgentFinish]:\n \"\"\"Parse text into agent action/finish.\"\"\"\n\n\nclass MultiActionAgentOutputParser(\n BaseOutputParser[Union[List[AgentAction], AgentFinish]]\n):\n \"\"\"Base class for parsing agent output into agent actions/finish.\"\"\"\n\n @abstractmethod\n def parse(self, text: str) -> Union[List[AgentAction], AgentFinish]:\n \"\"\"Parse text into agent actions/finish.\"\"\"\n\n\nclass RunnableAgent(BaseSingleActionAgent):\n \"\"\"Agent powered by runnables.\"\"\"\n\n runnable: Runnable[dict, Union[AgentAction, AgentFinish]]\n \"\"\"Runnable to call to get agent action.\"\"\"\n input_keys_arg: List[str] = []\n return_keys_arg: List[str] = []\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def return_values(self) -> List[str]:\n \"\"\"Return values of the agent.\"\"\"\n return self.return_keys_arg\n\n @property\n def input_keys(self) -> List[str]:\n return self.input_keys_arg\n\n def plan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n \"\"\"Based on past history and current inputs, decide what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with the observations.\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n inputs = {**kwargs, **{\"intermediate_steps\": intermediate_steps}}\n # Use streaming to make sure that the underlying LLM is invoked in a streaming\n # fashion to make it possible to get access to the individual LLM tokens\n # when using stream_log with the Agent Executor.\n # Because the response from the plan is not a generator, we need to\n # accumulate the output into final output and return that.\n final_output: Any = None\n for chunk in self.runnable.stream(inputs, config={\"callbacks\": callbacks}):\n if final_output is None:\n final_output = chunk\n else:\n final_output += chunk\n\n return final_output\n\n async def aplan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[\n AgentAction,\n AgentFinish,\n ]:\n \"\"\"Based on past history and current inputs, decide what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with observations\n callbacks: Callbacks to run.\n **kwargs: User inputs\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n inputs = {**kwargs, **{\"intermediate_steps\": intermediate_steps}}\n final_output: Any = None\n # Use streaming to make sure that the underlying LLM is invoked in a streaming\n # fashion to make it possible to get access to the individual LLM tokens\n # when using stream_log with the Agent Executor.\n # Because the response from the plan is not a generator, we need to\n # accumulate the output into final output and return that.\n async for chunk in self.runnable.astream(\n inputs, config={\"callbacks\": callbacks}\n ):\n if final_output is None:\n final_output = chunk\n else:\n final_output += chunk\n return final_output\n\n\nclass RunnableMultiActionAgent(BaseMultiActionAgent):\n \"\"\"Agent powered by runnables.\"\"\"\n\n runnable: Runnable[dict, Union[List[AgentAction], AgentFinish]]\n \"\"\"Runnable to call to get agent actions.\"\"\"\n input_keys_arg: List[str] = []\n return_keys_arg: List[str] = []\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def return_values(self) -> List[str]:\n \"\"\"Return values of the agent.\"\"\"\n return self.return_keys_arg\n\n @property\n def input_keys(self) -> List[str]:\n \"\"\"Return the input keys.\n\n Returns:\n List of input keys.\n \"\"\"\n return self.input_keys_arg\n\n def plan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[\n List[AgentAction],\n AgentFinish,\n ]:\n \"\"\"Based on past history and current inputs, decide what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with the observations.\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n inputs = {**kwargs, **{\"intermediate_steps\": intermediate_steps}}\n # Use streaming to make sure that the underlying LLM is invoked in a streaming\n # fashion to make it possible to get access to the individual LLM tokens\n # when using stream_log with the Agent Executor.\n # Because the response from the plan is not a generator, we need to\n # accumulate the output into final output and return that.\n final_output: Any = None\n for chunk in self.runnable.stream(inputs, config={\"callbacks\": callbacks}):\n if final_output is None:\n final_output = chunk\n else:\n final_output += chunk\n\n return final_output\n\n async def aplan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[\n List[AgentAction],\n AgentFinish,\n ]:\n \"\"\"Based on past history and current inputs, decide what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with observations\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n inputs = {**kwargs, **{\"intermediate_steps\": intermediate_steps}}\n # Use streaming to make sure that the underlying LLM is invoked in a streaming\n # fashion to make it possible to get access to the individual LLM tokens\n # when using stream_log with the Agent Executor.\n # Because the response from the plan is not a generator, we need to\n # accumulate the output into final output and return that.\n final_output: Any = None\n async for chunk in self.runnable.astream(\n inputs, config={\"callbacks\": callbacks}\n ):\n if final_output is None:\n final_output = chunk\n else:\n final_output += chunk\n\n return final_output\n\n\n@deprecated(\n \"0.1.0\",\n alternative=(\n \"Use new agent constructor methods like create_react_agent, create_json_agent, \"\n \"create_structured_chat_agent, etc.\"\n ),\n removal=\"0.2.0\",\n)\nclass LLMSingleActionAgent(BaseSingleActionAgent):\n \"\"\"Base class for single action agents.\"\"\"\n\n llm_chain: LLMChain\n \"\"\"LLMChain to use for agent.\"\"\"\n output_parser: AgentOutputParser\n \"\"\"Output parser to use for agent.\"\"\"\n stop: List[str]\n \"\"\"List of strings to stop on.\"\"\"\n\n @property\n def input_keys(self) -> List[str]:\n \"\"\"Return the input keys.\n\n Returns:\n List of input keys.\n \"\"\"\n return list(set(self.llm_chain.input_keys) - {\"intermediate_steps\"})\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Return dictionary representation of agent.\"\"\"\n _dict = super().dict()\n del _dict[\"output_parser\"]\n return _dict\n\n def plan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n \"\"\"Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with the observations.\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n output = self.llm_chain.run(\n intermediate_steps=intermediate_steps,\n stop=self.stop,\n callbacks=callbacks,\n **kwargs,\n )\n return self.output_parser.parse(output)\n\n async def aplan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n \"\"\"Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with observations\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n output = await self.llm_chain.arun(\n intermediate_steps=intermediate_steps,\n stop=self.stop,\n callbacks=callbacks,\n **kwargs,\n )\n return self.output_parser.parse(output)\n\n def tool_run_logging_kwargs(self) -> Dict:\n return {\n \"llm_prefix\": \"\",\n \"observation_prefix\": \"\" if len(self.stop) == 0 else self.stop[0],\n }\n\n\n@deprecated(\n \"0.1.0\",\n alternative=(\n \"Use new agent constructor methods like create_react_agent, create_json_agent, \"\n \"create_structured_chat_agent, etc.\"\n ),\n removal=\"0.2.0\",\n)\nclass Agent(BaseSingleActionAgent):\n \"\"\"Agent that calls the language model and deciding the action.\n\n This is driven by an LLMChain. The prompt in the LLMChain MUST include\n a variable called \"agent_scratchpad\" where the agent can put its\n intermediary work.\n \"\"\"\n\n llm_chain: LLMChain\n output_parser: AgentOutputParser\n allowed_tools: Optional[List[str]] = None\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Return dictionary representation of agent.\"\"\"\n _dict = super().dict()\n del _dict[\"output_parser\"]\n return _dict\n\n def get_allowed_tools(self) -> Optional[List[str]]:\n return self.allowed_tools\n\n @property\n def return_values(self) -> List[str]:\n return [\"output\"]\n\n def _fix_text(self, text: str) -> str:\n \"\"\"Fix the text.\"\"\"\n raise ValueError(\"fix_text not implemented for this agent.\")\n\n @property\n def _stop(self) -> List[str]:\n return [\n f\"\\n{self.observation_prefix.rstrip()}\",\n f\"\\n\\t{self.observation_prefix.rstrip()}\",\n ]\n\n def _construct_scratchpad(\n self, intermediate_steps: List[Tuple[AgentAction, str]]\n ) -> Union[str, List[BaseMessage]]:\n \"\"\"Construct the scratchpad that lets the agent continue its thought process.\"\"\"\n thoughts = \"\"\n for action, observation in intermediate_steps:\n thoughts += action.log\n thoughts += f\"\\n{self.observation_prefix}{observation}\\n{self.llm_prefix}\"\n return thoughts\n\n def plan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n \"\"\"Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with observations\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)\n full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs)\n return self.output_parser.parse(full_output)\n\n async def aplan(\n self,\n intermediate_steps: List[Tuple[AgentAction, str]],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n \"\"\"Given input, decided what to do.\n\n Args:\n intermediate_steps: Steps the LLM has taken to date,\n along with observations\n callbacks: Callbacks to run.\n **kwargs: User inputs.\n\n Returns:\n Action specifying what tool to use.\n \"\"\"\n full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)\n full_output = await self.llm_chain.apredict(callbacks=callbacks, **full_inputs)\n agent_output = await self.output_parser.aparse(full_output)\n return agent_output\n\n def get_full_inputs(\n self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n ) -> Dict[str, Any]:\n \"\"\"Create the full inputs for the LLMChain from intermediate steps.\"\"\"\n thoughts = self._construct_scratchpad(intermediate_steps)\n new_inputs = {\"agent_scratchpad\": thoughts, \"stop\": self._stop}\n full_inputs = {**kwargs, **new_inputs}\n return full_inputs\n\n @property\n def input_keys(self) -> List[str]:\n \"\"\"Return the input keys.\n\n :meta private:\n \"\"\"\n return list(set(self.llm_chain.input_keys) - {\"agent_scratchpad\"})\n\n @root_validator()\n def validate_prompt(cls, values: Dict) -> Dict:\n \"\"\"Validate that prompt matches format.\"\"\"\n prompt = values[\"llm_chain\"].prompt\n if \"agent_scratchpad\" not in prompt.input_variables:\n logger.warning(\n \"`agent_scratchpad` should be a variable in prompt.input_variables.\"\n \" Did not find it, so adding it at the end.\"\n )\n prompt.input_variables.append(\"agent_scratchpad\")\n if isinstance(prompt, PromptTemplate):\n prompt.template += \"\\n{agent_scratchpad}\"\n elif isinstance(prompt, FewShotPromptTemplate):\n prompt.suffix += \"\\n{agent_scratchpad}\"\n else:\n raise ValueError(f\"Got unexpected prompt type {type(prompt)}\")\n return values\n\n @property\n @abstractmethod\n def observation_prefix(self) -> str:\n \"\"\"Prefix to append the observation with.\"\"\"\n\n @property\n @abstractmethod\n def llm_prefix(self) -> str:\n \"\"\"Prefix to append the LLM call with.\"\"\"\n\n @classmethod\n @abstractmethod\n def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:\n \"\"\"Create a prompt for this class.\"\"\"\n\n @classmethod\n def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:\n \"\"\"Validate that appropriate tools are passed in.\"\"\"\n pass\n\n @classmethod\n @abstractmethod\n def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:\n \"\"\"Get default output parser for this class.\"\"\"\n\n @classmethod\n def from_llm_and_tools(\n cls,\n llm: BaseLanguageModel,\n tools: Sequence[BaseTool],\n callback_manager: Optional[BaseCallbackManager] = None,\n output_parser: Optional[AgentOutputParser] = None,\n **kwargs: Any,\n ) -> Agent:\n \"\"\"Construct an agent from an LLM and tools.\"\"\"\n cls._validate_tools(tools)\n llm_chain = LLMChain(\n llm=llm,\n prompt=cls.create_prompt(tools),\n callback_manager=callback_manager,\n )\n tool_names = [tool.name for tool in tools]\n _output_parser = output_parser or cls._get_default_output_parser()\n return cls(\n llm_chain=llm_chain,\n allowed_tools=tool_names,\n output_parser=_output_parser,\n **kwargs,\n )\n\n def return_stopped_response(\n self,\n early_stopping_method: str,\n intermediate_steps: List[Tuple[AgentAction, str]],\n **kwargs: Any,\n ) -> AgentFinish:\n \"\"\"Return response when agent has been stopped due to max iterations.\"\"\"\n if early_stopping_method == \"force\":\n # `force` just returns a constant string\n return AgentFinish(\n {\"output\": \"Agent stopped due to iteration limit or time limit.\"}, \"\"\n )\n elif early_stopping_method == \"generate\":\n # Generate does one final forward pass\n thoughts = \"\"\n for action, observation in intermediate_steps:\n thoughts += action.log\n thoughts += (\n f\"\\n{self.observation_prefix}{observation}\\n{self.llm_prefix}\"\n )\n # Adding to the previous steps, we now tell the LLM to make a final pred\n thoughts += (\n \"\\n\\nI now need to return a final answer based on the previous steps:\"\n )\n new_inputs = {\"agent_scratchpad\": thoughts, \"stop\": self._stop}\n full_inputs = {**kwargs, **new_inputs}\n full_output = self.llm_chain.predict(**full_inputs)\n # We try to extract a final answer\n parsed_output = self.output_parser.parse(full_output)\n if isinstance(parsed_output, AgentFinish):\n # If we can extract, we send the correct stuff\n return parsed_output\n else:\n # If we can extract, but the tool is not the final tool,\n # we just return the full output\n return AgentFinish({\"output\": full_output}, full_output)\n else:\n raise ValueError(\n \"early_stopping_method should be one of `force` or `generate`, \"\n f\"got {early_stopping_method}\"\n )\n\n def tool_run_logging_kwargs(self) -> Dict:\n return {\n \"llm_prefix\": self.llm_prefix,\n \"observation_prefix\": self.observation_prefix,\n }\n\n\nclass ExceptionTool(BaseTool):\n \"\"\"Tool that just returns the query.\"\"\"\n\n name: str = \"_Exception\"\n \"\"\"Name of the tool.\"\"\"\n description: str = \"Exception tool\"\n \"\"\"Description of the tool.\"\"\"\n\n def _run(\n self,\n query: str,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n ) -> str:\n return query\n\n async def _arun(\n self,\n query: str,\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n ) -> str:\n return query\n\n\nNextStepOutput = List[Union[AgentFinish, AgentAction, AgentStep]]\n\n\nclass AgentExecutor(Chain):\n \"\"\"Agent that is using tools.\"\"\"\n\n agent: Union[BaseSingleActionAgent, BaseMultiActionAgent]\n \"\"\"The agent to run for creating a plan and determining actions\n to take at each step of the execution loop.\"\"\"\n tools: Sequence[BaseTool]\n \"\"\"The valid tools the agent can call.\"\"\"\n return_intermediate_steps: bool = False\n \"\"\"Whether to return the agent's trajectory of intermediate steps\n at the end in addition to the final output.\"\"\"\n max_iterations: Optional[int] = 15\n \"\"\"The maximum number of steps to take before ending the execution\n loop.\n \n Setting to 'None' could lead to an infinite loop.\"\"\"\n max_execution_time: Optional[float] = None\n \"\"\"The maximum amount of wall clock time to spend in the execution\n loop.\n \"\"\"\n early_stopping_method: str = \"force\"\n \"\"\"The method to use for early stopping if the agent never\n returns `AgentFinish`. Either 'force' or 'generate'.\n\n `\"force\"` returns a string saying that it stopped because it met a\n time or iteration limit.\n \n `\"generate\"` calls the agent's LLM Chain one final time to generate\n a final answer based on the previous steps.\n \"\"\"\n handle_parsing_errors: Union[\n bool, str, Callable[[OutputParserException], str]\n ] = False\n \"\"\"How to handle errors raised by the agent's output parser.\n Defaults to `False`, which raises the error.\n If `true`, the error will be sent back to the LLM as an observation.\n If a string, the string itself will be sent to the LLM as an observation.\n If a callable function, the function will be called with the exception\n as an argument, and the result of that function will be passed to the agent\n as an observation.\n \"\"\"\n trim_intermediate_steps: Union[\n int, Callable[[List[Tuple[AgentAction, str]]], List[Tuple[AgentAction, str]]]\n ] = -1\n\n @classmethod\n def from_agent_and_tools(\n cls,\n agent: Union[BaseSingleActionAgent, BaseMultiActionAgent],\n tools: Sequence[BaseTool],\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> AgentExecutor:\n \"\"\"Create from agent and tools.\"\"\"\n return cls(\n agent=agent,\n tools=tools,\n callbacks=callbacks,\n **kwargs,\n )\n\n @root_validator()\n def validate_tools(cls, values: Dict) -> Dict:\n \"\"\"Validate that tools are compatible with agent.\"\"\"\n agent = values[\"agent\"]\n tools = values[\"tools\"]\n allowed_tools = agent.get_allowed_tools()\n if allowed_tools is not None:\n if set(allowed_tools) != set([tool.name for tool in tools]):\n raise ValueError(\n f\"Allowed tools ({allowed_tools}) different than \"\n f\"provided tools ({[tool.name for tool in tools]})\"\n )\n return values\n\n @root_validator()\n def validate_return_direct_tool(cls, values: Dict) -> Dict:\n \"\"\"Validate that tools are compatible with agent.\"\"\"\n agent = values[\"agent\"]\n tools = values[\"tools\"]\n if isinstance(agent, BaseMultiActionAgent):\n for tool in tools:\n if tool.return_direct:\n raise ValueError(\n \"Tools that have `return_direct=True` are not allowed \"\n \"in multi-action agents\"\n )\n return values\n\n @root_validator(pre=True)\n def validate_runnable_agent(cls, values: Dict) -> Dict:\n \"\"\"Convert runnable to agent if passed in.\"\"\"\n agent = values[\"agent\"]\n if isinstance(agent, Runnable):\n try:\n output_type = agent.OutputType\n except Exception as _:\n multi_action = False\n else:\n multi_action = output_type == Union[List[AgentAction], AgentFinish]\n\n if multi_action:\n values[\"agent\"] = RunnableMultiActionAgent(runnable=agent)\n else:\n values[\"agent\"] = RunnableAgent(runnable=agent)\n return values\n\n def save(self, file_path: Union[Path, str]) -> None:\n \"\"\"Raise error - saving not supported for Agent Executors.\"\"\"\n raise ValueError(\n \"Saving not supported for agent executors. \"\n \"If you are trying to save the agent, please use the \"\n \"`.save_agent(...)`\"\n )\n\n def save_agent(self, file_path: Union[Path, str]) -> None:\n \"\"\"Save the underlying agent.\"\"\"\n return self.agent.save(file_path)\n\n def iter(\n self,\n inputs: Any,\n callbacks: Callbacks = None,\n *,\n include_run_info: bool = False,\n async_: bool = False, # arg kept for backwards compat, but ignored\n ) -> AgentExecutorIterator:\n \"\"\"Enables iteration over steps taken to reach final output.\"\"\"\n return AgentExecutorIterator(\n self,\n inputs,\n callbacks,\n tags=self.tags,\n include_run_info=include_run_info,\n )\n\n @property\n def input_keys(self) -> List[str]:\n \"\"\"Return the input keys.\n\n :meta private:\n \"\"\"\n return self.agent.input_keys\n\n @property\n def output_keys(self) -> List[str]:\n \"\"\"Return the singular output key.\n\n :meta private:\n \"\"\"\n if self.return_intermediate_steps:\n return self.agent.return_values + [\"intermediate_steps\"]\n else:\n return self.agent.return_values\n\n def lookup_tool(self, name: str) -> BaseTool:\n \"\"\"Lookup tool by name.\"\"\"\n return {tool.name: tool for tool in self.tools}[name]\n\n def _should_continue(self, iterations: int, time_elapsed: float) -> bool:\n if self.max_iterations is not None and iterations >= self.max_iterations:\n return False\n if (\n self.max_execution_time is not None\n and time_elapsed >= self.max_execution_time\n ):\n return False\n\n return True\n\n def _return(\n self,\n output: AgentFinish,\n intermediate_steps: list,\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n if run_manager:\n run_manager.on_agent_finish(output, color=\"green\", verbose=self.verbose)\n final_output = output.return_values\n if self.return_intermediate_steps:\n final_output[\"intermediate_steps\"] = intermediate_steps\n return final_output\n\n async def _areturn(\n self,\n output: AgentFinish,\n intermediate_steps: list,\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n if run_manager:\n await run_manager.on_agent_finish(\n output, color=\"green\", verbose=self.verbose\n )\n final_output = output.return_values\n if self.return_intermediate_steps:\n final_output[\"intermediate_steps\"] = intermediate_steps\n return final_output\n\n def _consume_next_step(\n self, values: NextStepOutput\n ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:\n if isinstance(values[-1], AgentFinish):\n assert len(values) == 1\n return values[-1]\n else:\n return [\n (a.action, a.observation) for a in values if isinstance(a, AgentStep)\n ]\n\n def _take_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:\n return self._consume_next_step(\n [\n a\n for a in self._iter_next_step(\n name_to_tool_map,\n color_mapping,\n inputs,\n intermediate_steps,\n run_manager,\n )\n ]\n )\n\n def _iter_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:\n \"\"\"Take a single step in the thought-action-observation loop.\n\n Override this to take control of how the agent makes and acts on choices.\n \"\"\"\n try:\n intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)\n\n # Call the LLM to see what to do.\n output = self.agent.plan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n except OutputParserException as e:\n if isinstance(self.handle_parsing_errors, bool):\n raise_error = not self.handle_parsing_errors\n else:\n raise_error = False\n if raise_error:\n raise ValueError(\n \"An output parsing error occurred. \"\n \"In order to pass this error back to the agent and have it try \"\n \"again, pass `handle_parsing_errors=True` to the AgentExecutor. \"\n f\"This is the error: {str(e)}\"\n )\n text = str(e)\n if isinstance(self.handle_parsing_errors, bool):\n if e.send_to_llm:\n observation = str(e.observation)\n text = str(e.llm_output)\n else:\n observation = \"Invalid or incomplete response\"\n elif isinstance(self.handle_parsing_errors, str):\n observation = self.handle_parsing_errors\n elif callable(self.handle_parsing_errors):\n observation = self.handle_parsing_errors(e)\n else:\n raise ValueError(\"Got unexpected type of `handle_parsing_errors`\")\n output = AgentAction(\"_Exception\", observation, text)\n if run_manager:\n run_manager.on_agent_action(output, color=\"green\")\n tool_run_kwargs = self.agent.tool_run_logging_kwargs()\n observation = ExceptionTool().run(\n output.tool_input,\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n yield AgentStep(action=output, observation=observation)\n return\n\n # If the tool chosen is the finishing tool, then we end and return.\n if isinstance(output, AgentFinish):\n yield output\n return\n\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n for agent_action in actions:\n yield agent_action\n for agent_action in actions:\n yield self._perform_agent_action(\n name_to_tool_map, color_mapping, agent_action, run_manager\n )\n\n def _perform_agent_action(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n agent_action: AgentAction,\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> AgentStep:\n if run_manager:\n run_manager.on_agent_action(agent_action, color=\"green\")\n # Otherwise we lookup the tool\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self.agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n # We then call the tool on the tool input to get an observation\n observation = tool.run(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n tool_run_kwargs = self.agent.tool_run_logging_kwargs()\n observation = InvalidTool().run(\n {\n \"requested_tool_name\": agent_action.tool,\n \"available_tool_names\": list(name_to_tool_map.keys()),\n },\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n return AgentStep(action=agent_action, observation=observation)\n\n async def _atake_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:\n return self._consume_next_step(\n [\n a\n async for a in self._aiter_next_step(\n name_to_tool_map,\n color_mapping,\n inputs,\n intermediate_steps,\n run_manager,\n )\n ]\n )\n\n async def _aiter_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> AsyncIterator[Union[AgentFinish, AgentAction, AgentStep]]:\n \"\"\"Take a single step in the thought-action-observation loop.\n\n Override this to take control of how the agent makes and acts on choices.\n \"\"\"\n try:\n intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)\n\n # Call the LLM to see what to do.\n output = await self.agent.aplan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n except OutputParserException as e:\n if isinstance(self.handle_parsing_errors, bool):\n raise_error = not self.handle_parsing_errors\n else:\n raise_error = False\n if raise_error:\n raise ValueError(\n \"An output parsing error occurred. \"\n \"In order to pass this error back to the agent and have it try \"\n \"again, pass `handle_parsing_errors=True` to the AgentExecutor. \"\n f\"This is the error: {str(e)}\"\n )\n text = str(e)\n if isinstance(self.handle_parsing_errors, bool):\n if e.send_to_llm:\n observation = str(e.observation)\n text = str(e.llm_output)\n else:\n observation = \"Invalid or incomplete response\"\n elif isinstance(self.handle_parsing_errors, str):\n observation = self.handle_parsing_errors\n elif callable(self.handle_parsing_errors):\n observation = self.handle_parsing_errors(e)\n else:\n raise ValueError(\"Got unexpected type of `handle_parsing_errors`\")\n output = AgentAction(\"_Exception\", observation, text)\n tool_run_kwargs = self.agent.tool_run_logging_kwargs()\n observation = await ExceptionTool().arun(\n output.tool_input,\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n yield AgentStep(action=output, observation=observation)\n return\n\n # If the tool chosen is the finishing tool, then we end and return.\n if isinstance(output, AgentFinish):\n yield output\n return\n\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n for agent_action in actions:\n yield agent_action\n\n # Use asyncio.gather to run multiple tool.arun() calls concurrently\n result = await asyncio.gather(\n *[\n self._aperform_agent_action(\n name_to_tool_map, color_mapping, agent_action, run_manager\n )\n for agent_action in actions\n ],\n )\n\n # TODO This could yield each result as it becomes available\n for chunk in result:\n yield chunk\n\n async def _aperform_agent_action(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n agent_action: AgentAction,\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> AgentStep:\n if run_manager:\n await run_manager.on_agent_action(\n agent_action, verbose=self.verbose, color=\"green\"\n )\n # Otherwise we lookup the tool\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self.agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n # We then call the tool on the tool input to get an observation\n observation = await tool.arun(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n tool_run_kwargs = self.agent.tool_run_logging_kwargs()\n observation = await InvalidTool().arun(\n {\n \"requested_tool_name\": agent_action.tool,\n \"available_tool_names\": list(name_to_tool_map.keys()),\n },\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n return AgentStep(action=agent_action, observation=observation)\n\n def _call(\n self,\n inputs: Dict[str, str],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Run text through and get agent response.\"\"\"\n # Construct a mapping of tool name to tool for easy lookup\n name_to_tool_map = {tool.name: tool for tool in self.tools}\n # We construct a mapping from each tool to a color, used for logging.\n color_mapping = get_color_mapping(\n [tool.name for tool in self.tools], excluded_colors=[\"green\", \"red\"]\n )\n intermediate_steps: List[Tuple[AgentAction, str]] = []\n # Let's start tracking the number of iterations and time elapsed\n iterations = 0\n time_elapsed = 0.0\n start_time = time.time()\n # We now enter the agent loop (until it returns something).\n while self._should_continue(iterations, time_elapsed):\n next_step_output = self._take_next_step(\n name_to_tool_map,\n color_mapping,\n inputs,\n intermediate_steps,\n run_manager=run_manager,\n )\n if isinstance(next_step_output, AgentFinish):\n return self._return(\n next_step_output, intermediate_steps, run_manager=run_manager\n )\n\n intermediate_steps.extend(next_step_output)\n if len(next_step_output) == 1:\n next_step_action = next_step_output[0]\n # See if tool should return directly\n tool_return = self._get_tool_return(next_step_action)\n if tool_return is not None:\n return self._return(\n tool_return, intermediate_steps, run_manager=run_manager\n )\n iterations += 1\n time_elapsed = time.time() - start_time\n output = self.agent.return_stopped_response(\n self.early_stopping_method, intermediate_steps, **inputs\n )\n return self._return(output, intermediate_steps, run_manager=run_manager)\n\n async def _acall(\n self,\n inputs: Dict[str, str],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, str]:\n \"\"\"Run text through and get agent response.\"\"\"\n # Construct a mapping of tool name to tool for easy lookup\n name_to_tool_map = {tool.name: tool for tool in self.tools}\n # We construct a mapping from each tool to a color, used for logging.\n color_mapping = get_color_mapping(\n [tool.name for tool in self.tools], excluded_colors=[\"green\"]\n )\n intermediate_steps: List[Tuple[AgentAction, str]] = []\n # Let's start tracking the number of iterations and time elapsed\n iterations = 0\n time_elapsed = 0.0\n start_time = time.time()\n # We now enter the agent loop (until it returns something).\n try:\n async with asyncio_timeout(self.max_execution_time):\n while self._should_continue(iterations, time_elapsed):\n next_step_output = await self._atake_next_step(\n name_to_tool_map,\n color_mapping,\n inputs,\n intermediate_steps,\n run_manager=run_manager,\n )\n if isinstance(next_step_output, AgentFinish):\n return await self._areturn(\n next_step_output,\n intermediate_steps,\n run_manager=run_manager,\n )\n\n intermediate_steps.extend(next_step_output)\n if len(next_step_output) == 1:\n next_step_action = next_step_output[0]\n # See if tool should return directly\n tool_return = self._get_tool_return(next_step_action)\n if tool_return is not None:\n return await self._areturn(\n tool_return, intermediate_steps, run_manager=run_manager\n )\n\n iterations += 1\n time_elapsed = time.time() - start_time\n output = self.agent.return_stopped_response(\n self.early_stopping_method, intermediate_steps, **inputs\n )\n return await self._areturn(\n output, intermediate_steps, run_manager=run_manager\n )\n except (TimeoutError, asyncio.TimeoutError):\n # stop early when interrupted by the async timeout\n output = self.agent.return_stopped_response(\n self.early_stopping_method, intermediate_steps, **inputs\n )\n return await self._areturn(\n output, intermediate_steps, run_manager=run_manager\n )\n\n def _get_tool_return(\n self, next_step_output: Tuple[AgentAction, str]\n ) -> Optional[AgentFinish]:\n \"\"\"Check if the tool is a returning tool.\"\"\"\n agent_action, observation = next_step_output\n name_to_tool_map = {tool.name: tool for tool in self.tools}\n return_value_key = \"output\"\n if len(self.agent.return_values) > 0:\n return_value_key = self.agent.return_values[0]\n # Invalid tools won't be in the map, so we return False.\n if agent_action.tool in name_to_tool_map:\n if name_to_tool_map[agent_action.tool].return_direct:\n return AgentFinish(\n {return_value_key: observation},\n \"\",\n )\n return None\n\n def _prepare_intermediate_steps(\n self, intermediate_steps: List[Tuple[AgentAction, str]]\n ) -> List[Tuple[AgentAction, str]]:\n if (\n isinstance(self.trim_intermediate_steps, int)\n and self.trim_intermediate_steps > 0\n ):\n return intermediate_steps[-self.trim_intermediate_steps :]\n elif callable(self.trim_intermediate_steps):\n return self.trim_intermediate_steps(intermediate_steps)\n else:\n return intermediate_steps\n\n def stream(\n self,\n input: Union[Dict[str, Any], Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Iterator[AddableDict]:\n \"\"\"Enables streaming over steps taken to reach final output.\"\"\"\n config = ensure_config(config)\n iterator = AgentExecutorIterator(\n self,\n input,\n config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n yield_actions=True,\n **kwargs,\n )\n for step in iterator:\n yield step\n\n async def astream(\n self,\n input: Union[Dict[str, Any], Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> AsyncIterator[AddableDict]:\n \"\"\"Enables streaming over steps taken to reach final output.\"\"\"\n config = ensure_config(config)\n iterator = AgentExecutorIterator(\n self,\n input,\n config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n yield_actions=True,\n **kwargs,\n )\n async for step in iterator:\n yield step\n", "output": ["Config", "LLMSingleActionAgent", "BaseMultiActionAgent", "RunnableMultiActionAgent", "MultiActionAgentOutputParser", "Agent", "BaseSingleActionAgent", "AgentOutputParser", "ExceptionTool", "AgentExecutor", "RunnableAgent"], "metadata": {"file_path": "langchain-master/libs/langchain/langchain/agents/agent.py", "file_length": 14571, "symbol_dict": [{"symbol": "Agent", "type": "mannual_defined_class", "byte_location": 19563, "location": 5309}, {"symbol": "MultiActionAgentOutputParser", "type": "mannual_defined_class", "byte_location": 9940, "location": 2764}, {"symbol": "Config", "type": "mannual_defined_class", "byte_location": 10529, "location": 2939}, {"symbol": "BaseSingleActionAgent", "type": "mannual_defined_class", "byte_location": 1758, "location": 490}, {"symbol": "BaseMultiActionAgent", "type": "mannual_defined_class", "byte_location": 5929, "location": 1648}, {"symbol": "AgentExecutor", "type": "mannual_defined_class", "byte_location": 28269, "location": 7706}, {"symbol": "AgentOutputParser", "type": "mannual_defined_class", "byte_location": 9651, "location": 2685}, {"symbol": "RunnableAgent", "type": "mannual_defined_class", "byte_location": 10260, "location": 2852}, {"symbol": "LLMSingleActionAgent", "type": "mannual_defined_class", "byte_location": 16964, "location": 4579}, {"symbol": "ExceptionTool", "type": "mannual_defined_class", "byte_location": 27681, "location": 7535}, {"symbol": "RunnableMultiActionAgent", "type": "mannual_defined_class", "byte_location": 13433, "location": 3660}]}} {"input": "from __future__ import annotations\n\nimport logging\nimport os\nimport sys\nimport warnings\nfrom typing import (\n AbstractSet,\n Any,\n AsyncIterator,\n Callable,\n Collection,\n Dict,\n Iterator,\n List,\n Literal,\n Mapping,\n Optional,\n Set,\n Tuple,\n Union,\n)\n\nfrom langchain_core._api.deprecation import deprecated\nfrom langchain_core.callbacks import (\n AsyncCallbackManagerForLLMRun,\n CallbackManagerForLLMRun,\n)\nfrom langchain_core.language_models.llms import BaseLLM, create_base_retry_decorator\nfrom langchain_core.outputs import Generation, GenerationChunk, LLMResult\nfrom langchain_core.pydantic_v1 import Field, root_validator\nfrom langchain_core.utils import get_from_dict_or_env, get_pydantic_field_names\nfrom langchain_core.utils.utils import build_extra_kwargs\n\nfrom langchain_community.utils.openai import is_openai_v1\n\nlogger = logging.getLogger(__name__)\n\n\ndef update_token_usage(\n keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]\n) -> None:\n \"\"\"Update token usage.\"\"\"\n _keys_to_use = keys.intersection(response[\"usage\"])\n for _key in _keys_to_use:\n if _key not in token_usage:\n token_usage[_key] = response[\"usage\"][_key]\n else:\n token_usage[_key] += response[\"usage\"][_key]\n\n\ndef _stream_response_to_generation_chunk(\n stream_response: Dict[str, Any],\n) -> GenerationChunk:\n \"\"\"Convert a stream response to a generation chunk.\"\"\"\n if not stream_response[\"choices\"]:\n return GenerationChunk(text=\"\")\n return GenerationChunk(\n text=stream_response[\"choices\"][0][\"text\"],\n generation_info=dict(\n finish_reason=stream_response[\"choices\"][0].get(\"finish_reason\", None),\n logprobs=stream_response[\"choices\"][0].get(\"logprobs\", None),\n ),\n )\n\n\ndef _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:\n \"\"\"Update response from the stream response.\"\"\"\n response[\"choices\"][0][\"text\"] += stream_response[\"choices\"][0][\"text\"]\n response[\"choices\"][0][\"finish_reason\"] = stream_response[\"choices\"][0].get(\n \"finish_reason\", None\n )\n response[\"choices\"][0][\"logprobs\"] = stream_response[\"choices\"][0][\"logprobs\"]\n\n\ndef _streaming_response_template() -> Dict[str, Any]:\n return {\n \"choices\": [\n {\n \"text\": \"\",\n \"finish_reason\": None,\n \"logprobs\": None,\n }\n ]\n }\n\n\ndef _create_retry_decorator(\n llm: Union[BaseOpenAI, OpenAIChat],\n run_manager: Optional[\n Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]\n ] = None,\n) -> Callable[[Any], Any]:\n import openai\n\n errors = [\n openai.error.Timeout,\n openai.error.APIError,\n openai.error.APIConnectionError,\n openai.error.RateLimitError,\n openai.error.ServiceUnavailableError,\n ]\n return create_base_retry_decorator(\n error_types=errors, max_retries=llm.max_retries, run_manager=run_manager\n )\n\n\ndef completion_with_retry(\n llm: Union[BaseOpenAI, OpenAIChat],\n run_manager: Optional[CallbackManagerForLLMRun] = None,\n **kwargs: Any,\n) -> Any:\n \"\"\"Use tenacity to retry the completion call.\"\"\"\n if is_openai_v1():\n return llm.client.create(**kwargs)\n\n retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)\n\n @retry_decorator\n def _completion_with_retry(**kwargs: Any) -> Any:\n return llm.client.create(**kwargs)\n\n return _completion_with_retry(**kwargs)\n\n\nasync def acompletion_with_retry(\n llm: Union[BaseOpenAI, OpenAIChat],\n run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,\n **kwargs: Any,\n) -> Any:\n \"\"\"Use tenacity to retry the async completion call.\"\"\"\n if is_openai_v1():\n return await llm.async_client.create(**kwargs)\n\n retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)\n\n @retry_decorator\n async def _completion_with_retry(**kwargs: Any) -> Any:\n # Use OpenAI's async api https://github.com/openai/openai-python#async-api\n return await llm.client.acreate(**kwargs)\n\n return await _completion_with_retry(**kwargs)\n\n\nclass BaseOpenAI(BaseLLM):\n \"\"\"Base OpenAI large language model class.\"\"\"\n\n @property\n def lc_secrets(self) -> Dict[str, str]:\n return {\"openai_api_key\": \"OPENAI_API_KEY\"}\n\n @classmethod\n def get_lc_namespace(cls) -> List[str]:\n \"\"\"Get the namespace of the langchain object.\"\"\"\n return [\"langchain\", \"llms\", \"openai\"]\n\n @property\n def lc_attributes(self) -> Dict[str, Any]:\n attributes: Dict[str, Any] = {}\n if self.openai_api_base:\n attributes[\"openai_api_base\"] = self.openai_api_base\n\n if self.openai_organization:\n attributes[\"openai_organization\"] = self.openai_organization\n\n if self.openai_proxy:\n attributes[\"openai_proxy\"] = self.openai_proxy\n\n return attributes\n\n @classmethod\n def is_lc_serializable(cls) -> bool:\n return True\n\n client: Any = Field(default=None, exclude=True) #: :meta private:\n async_client: Any = Field(default=None, exclude=True) #: :meta private:\n model_name: str = Field(default=\"gpt-3.5-turbo-instruct\", alias=\"model\")\n \"\"\"Model name to use.\"\"\"\n temperature: float = 0.7\n \"\"\"What sampling temperature to use.\"\"\"\n max_tokens: int = 256\n \"\"\"The maximum number of tokens to generate in the completion.\n -1 returns as many tokens as possible given the prompt and\n the models maximal context size.\"\"\"\n top_p: float = 1\n \"\"\"Total probability mass of tokens to consider at each step.\"\"\"\n frequency_penalty: float = 0\n \"\"\"Penalizes repeated tokens according to frequency.\"\"\"\n presence_penalty: float = 0\n \"\"\"Penalizes repeated tokens.\"\"\"\n n: int = 1\n \"\"\"How many completions to generate for each prompt.\"\"\"\n best_of: int = 1\n \"\"\"Generates best_of completions server-side and returns the \"best\".\"\"\"\n model_kwargs: Dict[str, Any] = Field(default_factory=dict)\n \"\"\"Holds any model parameters valid for `create` call not explicitly specified.\"\"\"\n # When updating this to use a SecretStr\n # Check for classes that derive from this class (as some of them\n # may assume openai_api_key is a str)\n openai_api_key: Optional[str] = Field(default=None, alias=\"api_key\")\n \"\"\"Automatically inferred from env var `OPENAI_API_KEY` if not provided.\"\"\"\n openai_api_base: Optional[str] = Field(default=None, alias=\"base_url\")\n \"\"\"Base URL path for API requests, leave blank if not using a proxy or service \n emulator.\"\"\"\n openai_organization: Optional[str] = Field(default=None, alias=\"organization\")\n \"\"\"Automatically inferred from env var `OPENAI_ORG_ID` if not provided.\"\"\"\n # to support explicit proxy for OpenAI\n openai_proxy: Optional[str] = None\n batch_size: int = 20\n \"\"\"Batch size to use when passing multiple documents to generate.\"\"\"\n request_timeout: Union[float, Tuple[float, float], Any, None] = Field(\n default=None, alias=\"timeout\"\n )\n \"\"\"Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or \n None.\"\"\"\n logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)\n \"\"\"Adjust the probability of specific tokens being generated.\"\"\"\n max_retries: int = 2\n \"\"\"Maximum number of retries to make when generating.\"\"\"\n streaming: bool = False\n \"\"\"Whether to stream the results or not.\"\"\"\n allowed_special: Union[Literal[\"all\"], AbstractSet[str]] = set()\n \"\"\"Set of special tokens that are allowed\u3002\"\"\"\n disallowed_special: Union[Literal[\"all\"], Collection[str]] = \"all\"\n \"\"\"Set of special tokens that are not allowed\u3002\"\"\"\n tiktoken_model_name: Optional[str] = None\n \"\"\"The model name to pass to tiktoken when using this class. \n Tiktoken is used to count the number of tokens in documents to constrain \n them to be under a certain limit. By default, when set to None, this will \n be the same as the embedding model name. However, there are some cases \n where you may want to use this Embedding class with a model name not \n supported by tiktoken. This can include when using Azure embeddings or \n when using one of the many model providers that expose an OpenAI-like \n API but with different models. In those cases, in order to avoid erroring \n when tiktoken is called, you can specify a model name to use here.\"\"\"\n default_headers: Union[Mapping[str, str], None] = None\n default_query: Union[Mapping[str, object], None] = None\n # Configure a custom httpx client. See the\n # [httpx documentation](https://www.python-httpx.org/api/#client) for more details.\n http_client: Union[Any, None] = None\n \"\"\"Optional httpx.Client.\"\"\"\n\n def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore\n \"\"\"Initialize the OpenAI object.\"\"\"\n model_name = data.get(\"model_name\", \"\")\n if (\n model_name.startswith(\"gpt-3.5-turbo\") or model_name.startswith(\"gpt-4\")\n ) and \"-instruct\" not in model_name:\n warnings.warn(\n \"You are trying to use a chat model. This way of initializing it is \"\n \"no longer supported. Instead, please use: \"\n \"`from langchain_community.chat_models import ChatOpenAI`\"\n )\n return OpenAIChat(**data)\n return super().__new__(cls)\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n allow_population_by_field_name = True\n\n @root_validator(pre=True)\n def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Build extra kwargs from additional params that were passed in.\"\"\"\n all_required_field_names = get_pydantic_field_names(cls)\n extra = values.get(\"model_kwargs\", {})\n values[\"model_kwargs\"] = build_extra_kwargs(\n extra, values, all_required_field_names\n )\n return values\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n if values[\"n\"] < 1:\n raise ValueError(\"n must be at least 1.\")\n if values[\"streaming\"] and values[\"n\"] > 1:\n raise ValueError(\"Cannot stream results when n > 1.\")\n if values[\"streaming\"] and values[\"best_of\"] > 1:\n raise ValueError(\"Cannot stream results when best_of > 1.\")\n\n values[\"openai_api_key\"] = get_from_dict_or_env(\n values, \"openai_api_key\", \"OPENAI_API_KEY\"\n )\n values[\"openai_api_base\"] = values[\"openai_api_base\"] or os.getenv(\n \"OPENAI_API_BASE\"\n )\n values[\"openai_proxy\"] = get_from_dict_or_env(\n values,\n \"openai_proxy\",\n \"OPENAI_PROXY\",\n default=\"\",\n )\n values[\"openai_organization\"] = (\n values[\"openai_organization\"]\n or os.getenv(\"OPENAI_ORG_ID\")\n or os.getenv(\"OPENAI_ORGANIZATION\")\n )\n try:\n import openai\n except ImportError:\n raise ImportError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n\n if is_openai_v1():\n client_params = {\n \"api_key\": values[\"openai_api_key\"],\n \"organization\": values[\"openai_organization\"],\n \"base_url\": values[\"openai_api_base\"],\n \"timeout\": values[\"request_timeout\"],\n \"max_retries\": values[\"max_retries\"],\n \"default_headers\": values[\"default_headers\"],\n \"default_query\": values[\"default_query\"],\n \"http_client\": values[\"http_client\"],\n }\n if not values.get(\"client\"):\n values[\"client\"] = openai.OpenAI(**client_params).completions\n if not values.get(\"async_client\"):\n values[\"async_client\"] = openai.AsyncOpenAI(**client_params).completions\n elif not values.get(\"client\"):\n values[\"client\"] = openai.Completion\n else:\n pass\n\n return values\n\n @property\n def _default_params(self) -> Dict[str, Any]:\n \"\"\"Get the default parameters for calling OpenAI API.\"\"\"\n normal_params: Dict[str, Any] = {\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n \"logit_bias\": self.logit_bias,\n }\n\n if self.max_tokens is not None:\n normal_params[\"max_tokens\"] = self.max_tokens\n if self.request_timeout is not None and not is_openai_v1():\n normal_params[\"request_timeout\"] = self.request_timeout\n\n # Azure gpt-35-turbo doesn't support best_of\n # don't specify best_of if it is 1\n if self.best_of > 1:\n normal_params[\"best_of\"] = self.best_of\n\n return {**normal_params, **self.model_kwargs}\n\n def _stream(\n self,\n prompt: str,\n stop: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> Iterator[GenerationChunk]:\n params = {**self._invocation_params, **kwargs, \"stream\": True}\n self.get_sub_prompts(params, [prompt], stop) # this mutates params\n for stream_resp in completion_with_retry(\n self, prompt=prompt, run_manager=run_manager, **params\n ):\n if not isinstance(stream_resp, dict):\n stream_resp = stream_resp.dict()\n chunk = _stream_response_to_generation_chunk(stream_resp)\n yield chunk\n if run_manager:\n run_manager.on_llm_new_token(\n chunk.text,\n chunk=chunk,\n verbose=self.verbose,\n logprobs=chunk.generation_info[\"logprobs\"]\n if chunk.generation_info\n else None,\n )\n\n async def _astream(\n self,\n prompt: str,\n stop: Optional[List[str]] = None,\n run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> AsyncIterator[GenerationChunk]:\n params = {**self._invocation_params, **kwargs, \"stream\": True}\n self.get_sub_prompts(params, [prompt], stop) # this mutates params\n async for stream_resp in await acompletion_with_retry(\n self, prompt=prompt, run_manager=run_manager, **params\n ):\n if not isinstance(stream_resp, dict):\n stream_resp = stream_resp.dict()\n chunk = _stream_response_to_generation_chunk(stream_resp)\n yield chunk\n if run_manager:\n await run_manager.on_llm_new_token(\n chunk.text,\n chunk=chunk,\n verbose=self.verbose,\n logprobs=chunk.generation_info[\"logprobs\"]\n if chunk.generation_info\n else None,\n )\n\n def _generate(\n self,\n prompts: List[str],\n stop: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> LLMResult:\n \"\"\"Call out to OpenAI's endpoint with k unique prompts.\n\n Args:\n prompts: The prompts to pass into the model.\n stop: Optional list of stop words to use when generating.\n\n Returns:\n The full LLM output.\n\n Example:\n .. code-block:: python\n\n response = openai.generate([\"Tell me a joke.\"])\n \"\"\"\n # TODO: write a unit test for this\n params = self._invocation_params\n params = {**params, **kwargs}\n sub_prompts = self.get_sub_prompts(params, prompts, stop)\n choices = []\n token_usage: Dict[str, int] = {}\n # Get the token usage from the response.\n # Includes prompt, completion, and total tokens used.\n _keys = {\"completion_tokens\", \"prompt_tokens\", \"total_tokens\"}\n system_fingerprint: Optional[str] = None\n for _prompts in sub_prompts:\n if self.streaming:\n if len(_prompts) > 1:\n raise ValueError(\"Cannot stream results with multiple prompts.\")\n\n generation: Optional[GenerationChunk] = None\n for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):\n if generation is None:\n generation = chunk\n else:\n generation += chunk\n assert generation is not None\n choices.append(\n {\n \"text\": generation.text,\n \"finish_reason\": generation.generation_info.get(\"finish_reason\")\n if generation.generation_info\n else None,\n \"logprobs\": generation.generation_info.get(\"logprobs\")\n if generation.generation_info\n else None,\n }\n )\n else:\n response = completion_with_retry(\n self, prompt=_prompts, run_manager=run_manager, **params\n )\n if not isinstance(response, dict):\n # V1 client returns the response in an PyDantic object instead of\n # dict. For the transition period, we deep convert it to dict.\n response = response.dict()\n\n choices.extend(response[\"choices\"])\n update_token_usage(_keys, response, token_usage)\n if not system_fingerprint:\n system_fingerprint = response.get(\"system_fingerprint\")\n return self.create_llm_result(\n choices,\n prompts,\n params,\n token_usage,\n system_fingerprint=system_fingerprint,\n )\n\n async def _agenerate(\n self,\n prompts: List[str],\n stop: Optional[List[str]] = None,\n run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> LLMResult:\n \"\"\"Call out to OpenAI's endpoint async with k unique prompts.\"\"\"\n params = self._invocation_params\n params = {**params, **kwargs}\n sub_prompts = self.get_sub_prompts(params, prompts, stop)\n choices = []\n token_usage: Dict[str, int] = {}\n # Get the token usage from the response.\n # Includes prompt, completion, and total tokens used.\n _keys = {\"completion_tokens\", \"prompt_tokens\", \"total_tokens\"}\n system_fingerprint: Optional[str] = None\n for _prompts in sub_prompts:\n if self.streaming:\n if len(_prompts) > 1:\n raise ValueError(\"Cannot stream results with multiple prompts.\")\n\n generation: Optional[GenerationChunk] = None\n async for chunk in self._astream(\n _prompts[0], stop, run_manager, **kwargs\n ):\n if generation is None:\n generation = chunk\n else:\n generation += chunk\n assert generation is not None\n choices.append(\n {\n \"text\": generation.text,\n \"finish_reason\": generation.generation_info.get(\"finish_reason\")\n if generation.generation_info\n else None,\n \"logprobs\": generation.generation_info.get(\"logprobs\")\n if generation.generation_info\n else None,\n }\n )\n else:\n response = await acompletion_with_retry(\n self, prompt=_prompts, run_manager=run_manager, **params\n )\n if not isinstance(response, dict):\n response = response.dict()\n choices.extend(response[\"choices\"])\n update_token_usage(_keys, response, token_usage)\n return self.create_llm_result(\n choices,\n prompts,\n params,\n token_usage,\n system_fingerprint=system_fingerprint,\n )\n\n def get_sub_prompts(\n self,\n params: Dict[str, Any],\n prompts: List[str],\n stop: Optional[List[str]] = None,\n ) -> List[List[str]]:\n \"\"\"Get the sub prompts for llm call.\"\"\"\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n if params[\"max_tokens\"] == -1:\n if len(prompts) != 1:\n raise ValueError(\n \"max_tokens set to -1 not supported for multiple inputs.\"\n )\n params[\"max_tokens\"] = self.max_tokens_for_prompt(prompts[0])\n sub_prompts = [\n prompts[i : i + self.batch_size]\n for i in range(0, len(prompts), self.batch_size)\n ]\n return sub_prompts\n\n def create_llm_result(\n self,\n choices: Any,\n prompts: List[str],\n params: Dict[str, Any],\n token_usage: Dict[str, int],\n *,\n system_fingerprint: Optional[str] = None,\n ) -> LLMResult:\n \"\"\"Create the LLMResult from the choices and prompts.\"\"\"\n generations = []\n n = params.get(\"n\", self.n)\n for i, _ in enumerate(prompts):\n sub_choices = choices[i * n : (i + 1) * n]\n generations.append(\n [\n Generation(\n text=choice[\"text\"],\n generation_info=dict(\n finish_reason=choice.get(\"finish_reason\"),\n logprobs=choice.get(\"logprobs\"),\n ),\n )\n for choice in sub_choices\n ]\n )\n llm_output = {\"token_usage\": token_usage, \"model_name\": self.model_name}\n if system_fingerprint:\n llm_output[\"system_fingerprint\"] = system_fingerprint\n return LLMResult(generations=generations, llm_output=llm_output)\n\n @property\n def _invocation_params(self) -> Dict[str, Any]:\n \"\"\"Get the parameters used to invoke the model.\"\"\"\n openai_creds: Dict[str, Any] = {}\n if not is_openai_v1():\n openai_creds.update(\n {\n \"api_key\": self.openai_api_key,\n \"api_base\": self.openai_api_base,\n \"organization\": self.openai_organization,\n }\n )\n if self.openai_proxy:\n import openai\n\n openai.proxy = {\"http\": self.openai_proxy, \"https\": self.openai_proxy} # type: ignore[assignment] # noqa: E501\n return {**openai_creds, **self._default_params}\n\n @property\n def _identifying_params(self) -> Mapping[str, Any]:\n \"\"\"Get the identifying parameters.\"\"\"\n return {**{\"model_name\": self.model_name}, **self._default_params}\n\n @property\n def _llm_type(self) -> str:\n \"\"\"Return type of llm.\"\"\"\n return \"openai\"\n\n def get_token_ids(self, text: str) -> List[int]:\n \"\"\"Get the token IDs using the tiktoken package.\"\"\"\n # tiktoken NOT supported for Python < 3.8\n if sys.version_info[1] < 8:\n return super().get_num_tokens(text)\n try:\n import tiktoken\n except ImportError:\n raise ImportError(\n \"Could not import tiktoken python package. \"\n \"This is needed in order to calculate get_num_tokens. \"\n \"Please install it with `pip install tiktoken`.\"\n )\n\n model_name = self.tiktoken_model_name or self.model_name\n try:\n enc = tiktoken.encoding_for_model(model_name)\n except KeyError:\n logger.warning(\"Warning: model not found. Using cl100k_base encoding.\")\n model = \"cl100k_base\"\n enc = tiktoken.get_encoding(model)\n\n return enc.encode(\n text,\n allowed_special=self.allowed_special,\n disallowed_special=self.disallowed_special,\n )\n\n @staticmethod\n def modelname_to_contextsize(modelname: str) -> int:\n \"\"\"Calculate the maximum number of tokens possible to generate for a model.\n\n Args:\n modelname: The modelname we want to know the context size for.\n\n Returns:\n The maximum context size\n\n Example:\n .. code-block:: python\n\n max_tokens = openai.modelname_to_contextsize(\"gpt-3.5-turbo-instruct\")\n \"\"\"\n model_token_mapping = {\n \"gpt-4\": 8192,\n \"gpt-4-0314\": 8192,\n \"gpt-4-0613\": 8192,\n \"gpt-4-32k\": 32768,\n \"gpt-4-32k-0314\": 32768,\n \"gpt-4-32k-0613\": 32768,\n \"gpt-3.5-turbo\": 4096,\n \"gpt-3.5-turbo-0301\": 4096,\n \"gpt-3.5-turbo-0613\": 4096,\n \"gpt-3.5-turbo-16k\": 16385,\n \"gpt-3.5-turbo-16k-0613\": 16385,\n \"gpt-3.5-turbo-instruct\": 4096,\n \"text-ada-001\": 2049,\n \"ada\": 2049,\n \"text-babbage-001\": 2040,\n \"babbage\": 2049,\n \"text-curie-001\": 2049,\n \"curie\": 2049,\n \"davinci\": 2049,\n \"text-davinci-003\": 4097,\n \"text-davinci-002\": 4097,\n \"code-davinci-002\": 8001,\n \"code-davinci-001\": 8001,\n \"code-cushman-002\": 2048,\n \"code-cushman-001\": 2048,\n }\n\n # handling finetuned models\n if \"ft-\" in modelname:\n modelname = modelname.split(\":\")[0]\n\n context_size = model_token_mapping.get(modelname, None)\n\n if context_size is None:\n raise ValueError(\n f\"Unknown model: {modelname}. Please provide a valid OpenAI model name.\"\n \"Known models are: \" + \", \".join(model_token_mapping.keys())\n )\n\n return context_size\n\n @property\n def max_context_size(self) -> int:\n \"\"\"Get max context size for this model.\"\"\"\n return self.modelname_to_contextsize(self.model_name)\n\n def max_tokens_for_prompt(self, prompt: str) -> int:\n \"\"\"Calculate the maximum number of tokens possible to generate for a prompt.\n\n Args:\n prompt: The prompt to pass into the model.\n\n Returns:\n The maximum number of tokens to generate for a prompt.\n\n Example:\n .. code-block:: python\n\n max_tokens = openai.max_token_for_prompt(\"Tell me a joke.\")\n \"\"\"\n num_tokens = self.get_num_tokens(prompt)\n return self.max_context_size - num_tokens\n\n\n@deprecated(\n since=\"0.0.10\", removal=\"0.2.0\", alternative_import=\"langchain_openai.OpenAI\"\n)\nclass OpenAI(BaseOpenAI):\n \"\"\"OpenAI large language models.\n\n To use, you should have the ``openai`` python package installed, and the\n environment variable ``OPENAI_API_KEY`` set with your API key.\n\n Any parameters that are valid to be passed to the openai.create call can be passed\n in, even if not explicitly saved on this class.\n\n Example:\n .. code-block:: python\n\n from langchain_community.llms import OpenAI\n openai = OpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n \"\"\"\n\n @classmethod\n def get_lc_namespace(cls) -> List[str]:\n \"\"\"Get the namespace of the langchain object.\"\"\"\n return [\"langchain\", \"llms\", \"openai\"]\n\n @property\n def _invocation_params(self) -> Dict[str, Any]:\n return {**{\"model\": self.model_name}, **super()._invocation_params}\n\n\n@deprecated(\n since=\"0.0.10\", removal=\"0.2.0\", alternative_import=\"langchain_openai.AzureOpenAI\"\n)\nclass AzureOpenAI(BaseOpenAI):\n \"\"\"Azure-specific OpenAI large language models.\n\n To use, you should have the ``openai`` python package installed, and the\n environment variable ``OPENAI_API_KEY`` set with your API key.\n\n Any parameters that are valid to be passed to the openai.create call can be passed\n in, even if not explicitly saved on this class.\n\n Example:\n .. code-block:: python\n\n from langchain_community.llms import AzureOpenAI\n\n openai = AzureOpenAI(model_name=\"gpt-3.5-turbo-instruct\")\n \"\"\"\n\n azure_endpoint: Union[str, None] = None\n \"\"\"Your Azure endpoint, including the resource.\n\n Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.\n\n Example: `https://example-resource.azure.openai.com/`\n \"\"\"\n deployment_name: Union[str, None] = Field(default=None, alias=\"azure_deployment\")\n \"\"\"A model deployment. \n\n If given sets the base client URL to include `/deployments/{azure_deployment}`.\n Note: this means you won't be able to use non-deployment endpoints.\n \"\"\"\n openai_api_version: str = Field(default=\"\", alias=\"api_version\")\n \"\"\"Automatically inferred from env var `OPENAI_API_VERSION` if not provided.\"\"\"\n openai_api_key: Union[str, None] = Field(default=None, alias=\"api_key\")\n \"\"\"Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided.\"\"\"\n azure_ad_token: Union[str, None] = None\n \"\"\"Your Azure Active Directory token.\n\n Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.\n\n For more: \n https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.\n \"\"\" # noqa: E501\n azure_ad_token_provider: Union[Callable[[], str], None] = None\n \"\"\"A function that returns an Azure Active Directory token.\n\n Will be invoked on every request.\n \"\"\"\n openai_api_type: str = \"\"\n \"\"\"Legacy, for openai<1.0.0 support.\"\"\"\n validate_base_url: bool = True\n \"\"\"For backwards compatibility. If legacy val openai_api_base is passed in, try to \n infer if it is a base_url or azure_endpoint and update accordingly.\n \"\"\"\n\n @classmethod\n def get_lc_namespace(cls) -> List[str]:\n \"\"\"Get the namespace of the langchain object.\"\"\"\n return [\"langchain\", \"llms\", \"openai\"]\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n if values[\"n\"] < 1:\n raise ValueError(\"n must be at least 1.\")\n if values[\"streaming\"] and values[\"n\"] > 1:\n raise ValueError(\"Cannot stream results when n > 1.\")\n if values[\"streaming\"] and values[\"best_of\"] > 1:\n raise ValueError(\"Cannot stream results when best_of > 1.\")\n\n # Check OPENAI_KEY for backwards compatibility.\n # TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using\n # other forms of azure credentials.\n values[\"openai_api_key\"] = (\n values[\"openai_api_key\"]\n or os.getenv(\"AZURE_OPENAI_API_KEY\")\n or os.getenv(\"OPENAI_API_KEY\")\n )\n\n values[\"azure_endpoint\"] = values[\"azure_endpoint\"] or os.getenv(\n \"AZURE_OPENAI_ENDPOINT\"\n )\n values[\"azure_ad_token\"] = values[\"azure_ad_token\"] or os.getenv(\n \"AZURE_OPENAI_AD_TOKEN\"\n )\n values[\"openai_api_base\"] = values[\"openai_api_base\"] or os.getenv(\n \"OPENAI_API_BASE\"\n )\n values[\"openai_proxy\"] = get_from_dict_or_env(\n values,\n \"openai_proxy\",\n \"OPENAI_PROXY\",\n default=\"\",\n )\n values[\"openai_organization\"] = (\n values[\"openai_organization\"]\n or os.getenv(\"OPENAI_ORG_ID\")\n or os.getenv(\"OPENAI_ORGANIZATION\")\n )\n values[\"openai_api_version\"] = values[\"openai_api_version\"] or os.getenv(\n \"OPENAI_API_VERSION\"\n )\n values[\"openai_api_type\"] = get_from_dict_or_env(\n values, \"openai_api_type\", \"OPENAI_API_TYPE\", default=\"azure\"\n )\n try:\n import openai\n except ImportError:\n raise ImportError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n if is_openai_v1():\n # For backwards compatibility. Before openai v1, no distinction was made\n # between azure_endpoint and base_url (openai_api_base).\n openai_api_base = values[\"openai_api_base\"]\n if openai_api_base and values[\"validate_base_url\"]:\n if \"/openai\" not in openai_api_base:\n values[\"openai_api_base\"] = (\n values[\"openai_api_base\"].rstrip(\"/\") + \"/openai\"\n )\n warnings.warn(\n \"As of openai>=1.0.0, Azure endpoints should be specified via \"\n f\"the `azure_endpoint` param not `openai_api_base` \"\n f\"(or alias `base_url`). Updating `openai_api_base` from \"\n f\"{openai_api_base} to {values['openai_api_base']}.\"\n )\n if values[\"deployment_name\"]:\n warnings.warn(\n \"As of openai>=1.0.0, if `deployment_name` (or alias \"\n \"`azure_deployment`) is specified then \"\n \"`openai_api_base` (or alias `base_url`) should not be. \"\n \"Instead use `deployment_name` (or alias `azure_deployment`) \"\n \"and `azure_endpoint`.\"\n )\n if values[\"deployment_name\"] not in values[\"openai_api_base\"]:\n warnings.warn(\n \"As of openai>=1.0.0, if `openai_api_base` \"\n \"(or alias `base_url`) is specified it is expected to be \"\n \"of the form \"\n \"https://example-resource.azure.openai.com/openai/deployments/example-deployment. \" # noqa: E501\n f\"Updating {openai_api_base} to \"\n f\"{values['openai_api_base']}.\"\n )\n values[\"openai_api_base\"] += (\n \"/deployments/\" + values[\"deployment_name\"]\n )\n values[\"deployment_name\"] = None\n client_params = {\n \"api_version\": values[\"openai_api_version\"],\n \"azure_endpoint\": values[\"azure_endpoint\"],\n \"azure_deployment\": values[\"deployment_name\"],\n \"api_key\": values[\"openai_api_key\"],\n \"azure_ad_token\": values[\"azure_ad_token\"],\n \"azure_ad_token_provider\": values[\"azure_ad_token_provider\"],\n \"organization\": values[\"openai_organization\"],\n \"base_url\": values[\"openai_api_base\"],\n \"timeout\": values[\"request_timeout\"],\n \"max_retries\": values[\"max_retries\"],\n \"default_headers\": values[\"default_headers\"],\n \"default_query\": values[\"default_query\"],\n \"http_client\": values[\"http_client\"],\n }\n values[\"client\"] = openai.AzureOpenAI(**client_params).completions\n values[\"async_client\"] = openai.AsyncAzureOpenAI(\n **client_params\n ).completions\n\n else:\n values[\"client\"] = openai.Completion\n\n return values\n\n @property\n def _identifying_params(self) -> Mapping[str, Any]:\n return {\n **{\"deployment_name\": self.deployment_name},\n **super()._identifying_params,\n }\n\n @property\n def _invocation_params(self) -> Dict[str, Any]:\n if is_openai_v1():\n openai_params = {\"model\": self.deployment_name}\n else:\n openai_params = {\n \"engine\": self.deployment_name,\n \"api_type\": self.openai_api_type,\n \"api_version\": self.openai_api_version,\n }\n return {**openai_params, **super()._invocation_params}\n\n @property\n def _llm_type(self) -> str:\n \"\"\"Return type of llm.\"\"\"\n return \"azure\"\n\n @property\n def lc_attributes(self) -> Dict[str, Any]:\n return {\n \"openai_api_type\": self.openai_api_type,\n \"openai_api_version\": self.openai_api_version,\n }\n\n\n@deprecated(\n since=\"0.0.1\",\n removal=\"0.2.0\",\n alternative_import=\"langchain_openai.ChatOpenAI\",\n)\nclass OpenAIChat(BaseLLM):\n \"\"\"OpenAI Chat large language models.\n\n To use, you should have the ``openai`` python package installed, and the\n environment variable ``OPENAI_API_KEY`` set with your API key.\n\n Any parameters that are valid to be passed to the openai.create call can be passed\n in, even if not explicitly saved on this class.\n\n Example:\n .. code-block:: python\n\n from langchain_community.llms import OpenAIChat\n openaichat = OpenAIChat(model_name=\"gpt-3.5-turbo\")\n \"\"\"\n\n client: Any = Field(default=None, exclude=True) #: :meta private:\n async_client: Any = Field(default=None, exclude=True) #: :meta private:\n model_name: str = \"gpt-3.5-turbo\"\n \"\"\"Model name to use.\"\"\"\n model_kwargs: Dict[str, Any] = Field(default_factory=dict)\n \"\"\"Holds any model parameters valid for `create` call not explicitly specified.\"\"\"\n # When updating this to use a SecretStr\n # Check for classes that derive from this class (as some of them\n # may assume openai_api_key is a str)\n openai_api_key: Optional[str] = Field(default=None, alias=\"api_key\")\n \"\"\"Automatically inferred from env var `OPENAI_API_KEY` if not provided.\"\"\"\n openai_api_base: Optional[str] = Field(default=None, alias=\"base_url\")\n \"\"\"Base URL path for API requests, leave blank if not using a proxy or service \n emulator.\"\"\"\n # to support explicit proxy for OpenAI\n openai_proxy: Optional[str] = None\n max_retries: int = 6\n \"\"\"Maximum number of retries to make when generating.\"\"\"\n prefix_messages: List = Field(default_factory=list)\n \"\"\"Series of messages for Chat input.\"\"\"\n streaming: bool = False\n \"\"\"Whether to stream the results or not.\"\"\"\n allowed_special: Union[Literal[\"all\"], AbstractSet[str]] = set()\n \"\"\"Set of special tokens that are allowed\u3002\"\"\"\n disallowed_special: Union[Literal[\"all\"], Collection[str]] = \"all\"\n \"\"\"Set of special tokens that are not allowed\u3002\"\"\"\n\n @root_validator(pre=True)\n def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Build extra kwargs from additional params that were passed in.\"\"\"\n all_required_field_names = {field.alias for field in cls.__fields__.values()}\n\n extra = values.get(\"model_kwargs\", {})\n for field_name in list(values):\n if field_name not in all_required_field_names:\n if field_name in extra:\n raise ValueError(f\"Found {field_name} supplied twice.\")\n extra[field_name] = values.pop(field_name)\n values[\"model_kwargs\"] = extra\n return values\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n openai_api_key = get_from_dict_or_env(\n values, \"openai_api_key\", \"OPENAI_API_KEY\"\n )\n openai_api_base = get_from_dict_or_env(\n values,\n \"openai_api_base\",\n \"OPENAI_API_BASE\",\n default=\"\",\n )\n openai_proxy = get_from_dict_or_env(\n values,\n \"openai_proxy\",\n \"OPENAI_PROXY\",\n default=\"\",\n )\n openai_organization = get_from_dict_or_env(\n values, \"openai_organization\", \"OPENAI_ORGANIZATION\", default=\"\"\n )\n try:\n import openai\n\n openai.api_key = openai_api_key\n if openai_api_base:\n openai.api_base = openai_api_base\n if openai_organization:\n openai.organization = openai_organization\n if openai_proxy:\n openai.proxy = {\"http\": openai_proxy, \"https\": openai_proxy} # type: ignore[assignment] # noqa: E501\n except ImportError:\n raise ImportError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n try:\n values[\"client\"] = openai.ChatCompletion\n except AttributeError:\n raise ValueError(\n \"`openai` has no `ChatCompletion` attribute, this is likely \"\n \"due to an old version of the openai package. Try upgrading it \"\n \"with `pip install --upgrade openai`.\"\n )\n warnings.warn(\n \"You are trying to use a chat model. This way of initializing it is \"\n \"no longer supported. Instead, please use: \"\n \"`from langchain_community.chat_models import ChatOpenAI`\"\n )\n return values\n\n @property\n def _default_params(self) -> Dict[str, Any]:\n \"\"\"Get the default parameters for calling OpenAI API.\"\"\"\n return self.model_kwargs\n\n def _get_chat_params(\n self, prompts: List[str], stop: Optional[List[str]] = None\n ) -> Tuple:\n if len(prompts) > 1:\n raise ValueError(\n f\"OpenAIChat currently only supports single prompt, got {prompts}\"\n )\n messages = self.prefix_messages + [{\"role\": \"user\", \"content\": prompts[0]}]\n params: Dict[str, Any] = {**{\"model\": self.model_name}, **self._default_params}\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n if params.get(\"max_tokens\") == -1:\n # for ChatGPT api, omitting max_tokens is equivalent to having no limit\n del params[\"max_tokens\"]\n return messages, params\n\n def _stream(\n self,\n prompt: str,\n stop: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> Iterator[GenerationChunk]:\n messages, params = self._get_chat_params([prompt], stop)\n params = {**params, **kwargs, \"stream\": True}\n for stream_resp in completion_with_retry(\n self, messages=messages, run_manager=run_manager, **params\n ):\n if not isinstance(stream_resp, dict):\n stream_resp = stream_resp.dict()\n token = stream_resp[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n chunk = GenerationChunk(text=token)\n yield chunk\n if run_manager:\n run_manager.on_llm_new_token(token, chunk=chunk)\n\n async def _astream(\n self,\n prompt: str,\n stop: Optional[List[str]] = None,\n run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> AsyncIterator[GenerationChunk]:\n messages, params = self._get_chat_params([prompt], stop)\n params = {**params, **kwargs, \"stream\": True}\n async for stream_resp in await acompletion_with_retry(\n self, messages=messages, run_manager=run_manager, **params\n ):\n if not isinstance(stream_resp, dict):\n stream_resp = stream_resp.dict()\n token = stream_resp[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n chunk = GenerationChunk(text=token)\n yield chunk\n if run_manager:\n await run_manager.on_llm_new_token(token, chunk=chunk)\n\n def _generate(\n self,\n prompts: List[str],\n stop: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> LLMResult:\n if self.streaming:\n generation: Optional[GenerationChunk] = None\n for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):\n if generation is None:\n generation = chunk\n else:\n generation += chunk\n assert generation is not None\n return LLMResult(generations=[[generation]])\n\n messages, params = self._get_chat_params(prompts, stop)\n params = {**params, **kwargs}\n full_response = completion_with_retry(\n self, messages=messages, run_manager=run_manager, **params\n )\n if not isinstance(full_response, dict):\n full_response = full_response.dict()\n llm_output = {\n \"token_usage\": full_response[\"usage\"],\n \"model_name\": self.model_name,\n }\n return LLMResult(\n generations=[\n [Generation(text=full_response[\"choices\"][0][\"message\"][\"content\"])]\n ],\n llm_output=llm_output,\n )\n\n async def _agenerate(\n self,\n prompts: List[str],\n stop: Optional[List[str]] = None,\n run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> LLMResult:\n if self.streaming:\n generation: Optional[GenerationChunk] = None\n async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):\n if generation is None:\n generation = chunk\n else:\n generation += chunk\n assert generation is not None\n return LLMResult(generations=[[generation]])\n\n messages, params = self._get_chat_params(prompts, stop)\n params = {**params, **kwargs}\n full_response = await acompletion_with_retry(\n self, messages=messages, run_manager=run_manager, **params\n )\n if not isinstance(full_response, dict):\n full_response = full_response.dict()\n llm_output = {\n \"token_usage\": full_response[\"usage\"],\n \"model_name\": self.model_name,\n }\n return LLMResult(\n generations=[\n [Generation(text=full_response[\"choices\"][0][\"message\"][\"content\"])]\n ],\n llm_output=llm_output,\n )\n\n @property\n def _identifying_params(self) -> Mapping[str, Any]:\n \"\"\"Get the identifying parameters.\"\"\"\n return {**{\"model_name\": self.model_name}, **self._default_params}\n\n @property\n def _llm_type(self) -> str:\n \"\"\"Return type of llm.\"\"\"\n return \"openai-chat\"\n\n def get_token_ids(self, text: str) -> List[int]:\n \"\"\"Get the token IDs using the tiktoken package.\"\"\"\n # tiktoken NOT supported for Python < 3.8\n if sys.version_info[1] < 8:\n return super().get_token_ids(text)\n try:\n import tiktoken\n except ImportError:\n raise ImportError(\n \"Could not import tiktoken python package. \"\n \"This is needed in order to calculate get_num_tokens. \"\n \"Please install it with `pip install tiktoken`.\"\n )\n\n enc = tiktoken.encoding_for_model(self.model_name)\n return enc.encode(\n text,\n allowed_special=self.allowed_special,\n disallowed_special=self.disallowed_special,\n )\n", "output": ["_create_retry_decorator", "_streaming_response_template", "_stream_response_to_generation_chunk", "completion_with_retry", "def", "_update_response", "update_token_usage", "OpenAI", "BaseOpenAI", "Config", "AzureOpenAI", "OpenAIChat"], "metadata": {"file_path": "langchain-master/libs/community/langchain_community/llms/openai.py", "file_length": 13819, "symbol_dict": [{"symbol": "_streaming_response_template", "type": "mannual_defined_function", "byte_location": 2245, "location": 714}, {"symbol": "_create_retry_decorator", "type": "mannual_defined_function", "byte_location": 2480, "location": 782}, {"symbol": "_stream_response_to_generation_chunk", "type": "mannual_defined_function", "byte_location": 1300, "location": 414}, {"symbol": "_update_response", "type": "mannual_defined_function", "byte_location": 1826, "location": 572}, {"symbol": "completion_with_retry", "type": "mannual_defined_function", "byte_location": 3042, "location": 966}, {"symbol": "update_token_usage", "type": "mannual_defined_function", "byte_location": 910, "location": 285}, {"symbol": "def", "type": "mannual_defined_function", "byte_location": 3560, "location": 1148}, {"symbol": "BaseOpenAI", "type": "mannual_defined_class", "byte_location": 4210, "location": 1369}, {"symbol": "OpenAIChat", "type": "mannual_defined_class", "byte_location": 36898, "location": 10790}, {"symbol": "OpenAI", "type": "mannual_defined_class", "byte_location": 27260, "location": 8002}, {"symbol": "Config", "type": "mannual_defined_class", "byte_location": 9486, "location": 2908}, {"symbol": "AzureOpenAI", "type": "mannual_defined_class", "byte_location": 28196, "location": 8290}]}} {"input": "#!/usr/bin/env python3\n\"\"\"\nrefguide_check.py [OPTIONS] [-- ARGS]\n\n- Check for a NumPy submodule whether the objects in its __all__ dict\n correspond to the objects included in the reference guide.\n- Check docstring examples\n- Check example blocks in RST files\n\nExample of usage::\n\n $ python tools/refguide_check.py\n\nNote that this is a helper script to be able to check if things are missing;\nthe output of this script does need to be checked manually. In some cases\nobjects are left out of the refguide for a good reason (it's an alias of\nanother function, or deprecated, or ...)\n\nAnother use of this helper script is to check validity of code samples\nin docstrings::\n\n $ python tools/refguide_check.py --doctests ma\n\nor in RST-based documentations::\n\n $ python tools/refguide_check.py --rst doc/source\n\n\"\"\"\nimport copy\nimport doctest\nimport inspect\nimport io\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport warnings\nimport docutils.core\nfrom argparse import ArgumentParser\nfrom contextlib import contextmanager, redirect_stderr\nfrom doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL\n\nfrom docutils.parsers.rst import directives\n\nimport sphinx\nimport numpy as np\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))\nfrom numpydoc.docscrape_sphinx import get_doc_object\n\nSKIPBLOCK = doctest.register_optionflag('SKIPBLOCK')\n\n# Enable specific Sphinx directives\nfrom sphinx.directives.other import SeeAlso, Only\ndirectives.register_directive('seealso', SeeAlso)\ndirectives.register_directive('only', Only)\n\n\nBASE_MODULE = \"numpy\"\n\nPUBLIC_SUBMODULES = [\n \"f2py\",\n \"linalg\",\n \"lib\",\n \"lib.format\",\n \"lib.mixins\",\n \"lib.recfunctions\",\n \"lib.scimath\",\n \"lib.stride_tricks\",\n \"lib.npyio\",\n \"lib.introspect\",\n \"lib.array_utils\",\n \"fft\",\n \"char\",\n \"rec\",\n \"ma\",\n \"ma.extras\",\n \"ma.mrecords\",\n \"polynomial\",\n \"polynomial.chebyshev\",\n \"polynomial.hermite\",\n \"polynomial.hermite_e\",\n \"polynomial.laguerre\",\n \"polynomial.legendre\",\n \"polynomial.polynomial\",\n \"matrixlib\",\n \"random\",\n \"strings\",\n \"testing\",\n]\n\n# Docs for these modules are included in the parent module\nOTHER_MODULE_DOCS = {\n 'fftpack.convolve': 'fftpack',\n 'io.wavfile': 'io',\n 'io.arff': 'io',\n}\n\n# these names are known to fail doctesting and we like to keep it that way\n# e.g. sometimes pseudocode is acceptable etc\n#\n# Optionally, a subset of methods can be skipped by setting dict-values\n# to a container of method-names\nDOCTEST_SKIPDICT = {\n # cases where NumPy docstrings import things from SciPy:\n 'numpy.lib.vectorize': None,\n 'numpy.random.standard_gamma': None,\n 'numpy.random.gamma': None,\n 'numpy.random.vonmises': None,\n 'numpy.random.power': None,\n 'numpy.random.zipf': None,\n # cases where NumPy docstrings import things from other 3'rd party libs:\n 'numpy._core.from_dlpack': None,\n # remote / local file IO with DataSource is problematic in doctest:\n 'numpy.lib.npyio.DataSource': None,\n 'numpy.lib.Repository': None,\n}\n\n# Skip non-numpy RST files, historical release notes\n# Any single-directory exact match will skip the directory and all subdirs.\n# Any exact match (like 'doc/release') will scan subdirs but skip files in\n# the matched directory.\n# Any filename will skip that file\nRST_SKIPLIST = [\n 'scipy-sphinx-theme',\n 'sphinxext',\n 'neps',\n 'changelog',\n 'doc/release',\n 'doc/source/release',\n 'doc/release/upcoming_changes',\n 'c-info.ufunc-tutorial.rst',\n 'c-info.python-as-glue.rst',\n 'f2py.getting-started.rst',\n 'f2py-examples.rst',\n 'arrays.nditer.cython.rst',\n 'how-to-verify-bug.rst',\n # See PR 17222, these should be fixed\n 'basics.dispatch.rst',\n 'basics.subclassing.rst',\n 'basics.interoperability.rst',\n 'misc.rst',\n 'TESTS.rst'\n]\n\n# these names are not required to be present in ALL despite being in\n# autosummary:: listing\nREFGUIDE_ALL_SKIPLIST = [\n r'scipy\\.sparse\\.linalg',\n r'scipy\\.spatial\\.distance',\n r'scipy\\.linalg\\.blas\\.[sdczi].*',\n r'scipy\\.linalg\\.lapack\\.[sdczi].*',\n]\n\n# these names are not required to be in an autosummary:: listing\n# despite being in ALL\nREFGUIDE_AUTOSUMMARY_SKIPLIST = [\n # NOTE: should NumPy have a better match between autosummary\n # listings and __all__? For now, TR isn't convinced this is a\n # priority -- focus on just getting docstrings executed / correct\n r'numpy\\.*',\n]\n# deprecated windows in scipy.signal namespace\nfor name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',\n 'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',\n 'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',\n 'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):\n REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\\.signal\\.' + name)\n\nHAVE_MATPLOTLIB = False\n\n\ndef short_path(path, cwd=None):\n \"\"\"\n Return relative or absolute path name, whichever is shortest.\n\n Parameters\n ----------\n path : str or None\n cwd : str or None\n\n Returns\n -------\n str\n Relative path or absolute path based on current working directory\n \"\"\"\n if not isinstance(path, str):\n return path\n if cwd is None:\n cwd = os.getcwd()\n abspath = os.path.abspath(path)\n relpath = os.path.relpath(path, cwd)\n if len(abspath) <= len(relpath):\n return abspath\n return relpath\n\n\ndef find_names(module, names_dict):\n \"\"\"\n Finds the occurrences of function names, special directives like data\n and functions and scipy constants in the docstrings of `module`. The\n following patterns are searched for:\n\n * 3 spaces followed by function name, and maybe some spaces, some\n dashes, and an explanation; only function names listed in\n refguide are formatted like this (mostly, there may be some false\n positives\n * special directives, such as data and function\n * (scipy.constants only): quoted list\n\n The `names_dict` is updated by reference and accessible in calling method\n\n Parameters\n ----------\n module : ModuleType\n The module, whose docstrings is to be searched\n names_dict : dict\n Dictionary which contains module name as key and a set of found\n function names and directives as value\n\n Returns\n -------\n None\n \"\"\"\n patterns = [\n r\"^\\s\\s\\s([a-z_0-9A-Z]+)(\\s+-+.*)?$\",\n r\"^\\.\\. (?:data|function)::\\s*([a-z_0-9A-Z]+)\\s*$\"\n ]\n\n if module.__name__ == 'scipy.constants':\n patterns += [\"^``([a-z_0-9A-Z]+)``\"]\n\n patterns = [re.compile(pattern) for pattern in patterns]\n module_name = module.__name__\n\n for line in module.__doc__.splitlines():\n res = re.search(r\"^\\s*\\.\\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\\s*$\", line)\n if res:\n module_name = res.group(1)\n continue\n\n for pattern in patterns:\n res = re.match(pattern, line)\n if res is not None:\n name = res.group(1)\n entry = '.'.join([module_name, name])\n names_dict.setdefault(module_name, set()).add(name)\n break\n\n\ndef get_all_dict(module):\n \"\"\"\n Return a copy of the __all__ dict with irrelevant items removed.\n\n Parameters\n ----------\n module : ModuleType\n The module whose __all__ dict has to be processed\n\n Returns\n -------\n deprecated : list\n List of callable and deprecated sub modules\n not_deprecated : list\n List of non callable or non deprecated sub modules\n others : list\n List of remaining types of sub modules\n \"\"\"\n if hasattr(module, \"__all__\"):\n all_dict = copy.deepcopy(module.__all__)\n else:\n all_dict = copy.deepcopy(dir(module))\n all_dict = [name for name in all_dict\n if not name.startswith(\"_\")]\n for name in ['absolute_import', 'division', 'print_function']:\n try:\n all_dict.remove(name)\n except ValueError:\n pass\n if not all_dict:\n # Must be a pure documentation module\n all_dict.append('__doc__')\n\n # Modules are almost always private; real submodules need a separate\n # run of refguide_check.\n all_dict = [name for name in all_dict\n if not inspect.ismodule(getattr(module, name, None))]\n\n deprecated = []\n not_deprecated = []\n for name in all_dict:\n f = getattr(module, name, None)\n if callable(f) and is_deprecated(f):\n deprecated.append(name)\n else:\n not_deprecated.append(name)\n\n others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))\n\n return not_deprecated, deprecated, others\n\n\ndef compare(all_dict, others, names, module_name):\n \"\"\"\n Return sets of objects from all_dict.\n Will return three sets:\n {in module_name.__all__},\n {in REFGUIDE*},\n and {missing from others}\n\n Parameters\n ----------\n all_dict : list\n List of non deprecated sub modules for module_name\n others : list\n List of sub modules for module_name\n names : set\n Set of function names or special directives present in\n docstring of module_name\n module_name : ModuleType\n\n Returns\n -------\n only_all : set\n only_ref : set\n missing : set\n \"\"\"\n only_all = set()\n for name in all_dict:\n if name not in names:\n for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:\n if re.match(pat, module_name + '.' + name):\n break\n else:\n only_all.add(name)\n\n only_ref = set()\n missing = set()\n for name in names:\n if name not in all_dict:\n for pat in REFGUIDE_ALL_SKIPLIST:\n if re.match(pat, module_name + '.' + name):\n if name not in others:\n missing.add(name)\n break\n else:\n only_ref.add(name)\n\n return only_all, only_ref, missing\n\n\ndef is_deprecated(f):\n \"\"\"\n Check if module `f` is deprecated\n\n Parameters\n ----------\n f : ModuleType\n\n Returns\n -------\n bool\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"error\")\n try:\n f(**{\"not a kwarg\":None})\n except DeprecationWarning:\n return True\n except Exception:\n pass\n return False\n\n\ndef check_items(all_dict, names, deprecated, others, module_name, dots=True):\n \"\"\"\n Check that `all_dict` is consistent with the `names` in `module_name`\n For instance, that there are no deprecated or extra objects.\n\n Parameters\n ----------\n all_dict : list\n\n names : set\n\n deprecated : list\n\n others : list\n\n module_name : ModuleType\n\n dots : bool\n Whether to print a dot for each check\n\n Returns\n -------\n list\n List of [(name, success_flag, output)...]\n \"\"\"\n num_all = len(all_dict)\n num_ref = len(names)\n\n output = \"\"\n\n output += \"Non-deprecated objects in __all__: %i\\n\" % num_all\n output += \"Objects in refguide: %i\\n\\n\" % num_ref\n\n only_all, only_ref, missing = compare(all_dict, others, names, module_name)\n dep_in_ref = only_ref.intersection(deprecated)\n only_ref = only_ref.difference(deprecated)\n\n if len(dep_in_ref) > 0:\n output += \"Deprecated objects in refguide::\\n\\n\"\n for name in sorted(deprecated):\n output += \" \" + name + \"\\n\"\n\n if len(only_all) == len(only_ref) == len(missing) == 0:\n if dots:\n output_dot('.')\n return [(None, True, output)]\n else:\n if len(only_all) > 0:\n output += \"ERROR: objects in %s.__all__ but not in refguide::\\n\\n\" % module_name\n for name in sorted(only_all):\n output += \" \" + name + \"\\n\"\n\n output += \"\\nThis issue can be fixed by adding these objects to\\n\"\n output += \"the function listing in __init__.py for this module\\n\"\n\n if len(only_ref) > 0:\n output += \"ERROR: objects in refguide but not in %s.__all__::\\n\\n\" % module_name\n for name in sorted(only_ref):\n output += \" \" + name + \"\\n\"\n\n output += \"\\nThis issue should likely be fixed by removing these objects\\n\"\n output += \"from the function listing in __init__.py for this module\\n\"\n output += \"or adding them to __all__.\\n\"\n\n if len(missing) > 0:\n output += \"ERROR: missing objects::\\n\\n\"\n for name in sorted(missing):\n output += \" \" + name + \"\\n\"\n\n if dots:\n output_dot('F')\n return [(None, False, output)]\n\n\ndef validate_rst_syntax(text, name, dots=True):\n \"\"\"\n Validates the doc string in a snippet of documentation\n `text` from file `name`\n Parameters\n ----------\n text : str\n Docstring text\n name : str\n File name for which the doc string is to be validated\n dots : bool\n Whether to print a dot symbol for each check\n Returns\n -------\n (bool, str)\n \"\"\"\n if text is None:\n if dots:\n output_dot('E')\n return False, \"ERROR: %s: no documentation\" % (name,)\n\n ok_unknown_items = set([\n 'mod', 'doc', 'currentmodule', 'autosummary', 'data', 'attr',\n 'obj', 'versionadded', 'versionchanged', 'module', 'class',\n 'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member',\n 'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'\n ])\n\n # Run through docutils\n error_stream = io.StringIO()\n\n def resolve(name, is_label=False):\n return (\"http://foo\", name)\n\n token = ''\n\n docutils.core.publish_doctree(\n text, token,\n settings_overrides = dict(halt_level=5,\n traceback=True,\n default_reference_context='title-reference',\n default_role='emphasis',\n link_base='',\n resolve_name=resolve,\n stylesheet_path='',\n raw_enabled=0,\n file_insertion_enabled=0,\n warning_stream=error_stream))\n\n # Print errors, disregarding unimportant ones\n error_msg = error_stream.getvalue()\n errors = error_msg.split(token)\n success = True\n output = \"\"\n\n for error in errors:\n lines = error.splitlines()\n if not lines:\n continue\n\n m = re.match(r'.*Unknown (?:interpreted text role|directive type) \"(.*)\".*$', lines[0])\n if m:\n if m.group(1) in ok_unknown_items:\n continue\n\n m = re.match(r'.*Error in \"math\" directive:.*unknown option: \"label\"', \" \".join(lines), re.S)\n if m:\n continue\n\n output += name + lines[0] + \"::\\n \" + \"\\n \".join(lines[1:]).rstrip() + \"\\n\"\n success = False\n\n if not success:\n output += \" \" + \"-\"*72 + \"\\n\"\n for lineno, line in enumerate(text.splitlines()):\n output += \" %-4d %s\\n\" % (lineno+1, line)\n output += \" \" + \"-\"*72 + \"\\n\\n\"\n\n if dots:\n output_dot('.' if success else 'F')\n return success, output\n\n\ndef output_dot(msg='.', stream=sys.stderr):\n stream.write(msg)\n stream.flush()\n\n\ndef check_rest(module, names, dots=True):\n \"\"\"\n Check reStructuredText formatting of docstrings\n\n Parameters\n ----------\n module : ModuleType\n\n names : set\n\n Returns\n -------\n result : list\n List of [(module_name, success_flag, output),...]\n \"\"\"\n\n try:\n skip_types = (dict, str, unicode, float, int)\n except NameError:\n # python 3\n skip_types = (dict, str, float, int)\n\n\n results = []\n\n if module.__name__[6:] not in OTHER_MODULE_DOCS:\n results += [(module.__name__,) +\n validate_rst_syntax(inspect.getdoc(module),\n module.__name__, dots=dots)]\n\n for name in names:\n full_name = module.__name__ + '.' + name\n obj = getattr(module, name, None)\n\n if obj is None:\n results.append((full_name, False, \"%s has no docstring\" % (full_name,)))\n continue\n elif isinstance(obj, skip_types):\n continue\n\n if inspect.ismodule(obj):\n text = inspect.getdoc(obj)\n else:\n try:\n text = str(get_doc_object(obj))\n except Exception:\n import traceback\n results.append((full_name, False,\n \"Error in docstring format!\\n\" +\n traceback.format_exc()))\n continue\n\n m = re.search(\"([\\x00-\\x09\\x0b-\\x1f])\", text)\n if m:\n msg = (\"Docstring contains a non-printable character %r! \"\n \"Maybe forgot r\\\"\\\"\\\"?\" % (m.group(1),))\n results.append((full_name, False, msg))\n continue\n\n try:\n src_file = short_path(inspect.getsourcefile(obj))\n except TypeError:\n src_file = None\n\n if src_file:\n file_full_name = src_file + ':' + full_name\n else:\n file_full_name = full_name\n\n results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))\n\n return results\n\n\n### Doctest helpers ####\n\n# the namespace to run examples in\nDEFAULT_NAMESPACE = {'np': np}\n\n# the namespace to do checks in\nCHECK_NAMESPACE = {\n 'np': np,\n 'numpy': np,\n 'assert_allclose': np.testing.assert_allclose,\n 'assert_equal': np.testing.assert_equal,\n # recognize numpy repr's\n 'array': np.array,\n 'matrix': np.matrix,\n 'int64': np.int64,\n 'uint64': np.uint64,\n 'int8': np.int8,\n 'int32': np.int32,\n 'float32': np.float32,\n 'float64': np.float64,\n 'dtype': np.dtype,\n 'nan': np.nan,\n 'inf': np.inf,\n 'StringIO': io.StringIO,\n}\n\n\nclass DTRunner(doctest.DocTestRunner):\n \"\"\"\n The doctest runner\n \"\"\"\n DIVIDER = \"\\n\"\n\n def __init__(self, item_name, checker=None, verbose=None, optionflags=0):\n self._item_name = item_name\n doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,\n optionflags=optionflags)\n\n def _report_item_name(self, out, new_line=False):\n if self._item_name is not None:\n if new_line:\n out(\"\\n\")\n self._item_name = None\n\n def report_start(self, out, test, example):\n self._checker._source = example.source\n return doctest.DocTestRunner.report_start(self, out, test, example)\n\n def report_success(self, out, test, example, got):\n if self._verbose:\n self._report_item_name(out, new_line=True)\n return doctest.DocTestRunner.report_success(self, out, test, example, got)\n\n def report_unexpected_exception(self, out, test, example, exc_info):\n self._report_item_name(out)\n return doctest.DocTestRunner.report_unexpected_exception(\n self, out, test, example, exc_info)\n\n def report_failure(self, out, test, example, got):\n self._report_item_name(out)\n return doctest.DocTestRunner.report_failure(self, out, test,\n example, got)\n\nclass Checker(doctest.OutputChecker):\n \"\"\"\n Check the docstrings\n \"\"\"\n obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')\n vanilla = doctest.OutputChecker()\n rndm_markers = {'# random', '# Random', '#random', '#Random', \"# may vary\",\n \"# uninitialized\", \"#uninitialized\", \"# uninit\"}\n stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',\n 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',\n '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',\n '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',\n '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}\n\n def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):\n self.parse_namedtuples = parse_namedtuples\n self.atol, self.rtol = atol, rtol\n if ns is None:\n self.ns = CHECK_NAMESPACE\n else:\n self.ns = ns\n\n def check_output(self, want, got, optionflags):\n # cut it short if they are equal\n if want == got:\n return True\n\n # skip stopwords in source\n if any(word in self._source for word in self.stopwords):\n return True\n\n # skip random stuff\n if any(word in want for word in self.rndm_markers):\n return True\n\n # skip function/object addresses\n if self.obj_pattern.search(got):\n return True\n\n # ignore comments (e.g. signal.freqresp)\n if want.lstrip().startswith(\"#\"):\n return True\n\n # try the standard doctest\n try:\n if self.vanilla.check_output(want, got, optionflags):\n return True\n except Exception:\n pass\n\n # OK then, convert strings to objects\n try:\n a_want = eval(want, dict(self.ns))\n a_got = eval(got, dict(self.ns))\n except Exception:\n # Maybe we're printing a numpy array? This produces invalid python\n # code: `print(np.arange(3))` produces \"[0 1 2]\" w/o commas between\n # values. So, reinsert commas and retry.\n # TODO: handle (1) abbreviation (`print(np.arange(10000))`), and\n # (2) n-dim arrays with n > 1\n s_want = want.strip()\n s_got = got.strip()\n cond = (s_want.startswith(\"[\") and s_want.endswith(\"]\") and\n s_got.startswith(\"[\") and s_got.endswith(\"]\"))\n if cond:\n s_want = \", \".join(s_want[1:-1].split())\n s_got = \", \".join(s_got[1:-1].split())\n return self.check_output(s_want, s_got, optionflags)\n\n if not self.parse_namedtuples:\n return False\n # suppose that \"want\" is a tuple, and \"got\" is smth like\n # MoodResult(statistic=10, pvalue=0.1).\n # Then convert the latter to the tuple (10, 0.1),\n # and then compare the tuples.\n try:\n num = len(a_want)\n regex = (r'[\\w\\d_]+\\(' +\n ', '.join([r'[\\w\\d_]+=(.+)']*num) +\n r'\\)')\n grp = re.findall(regex, got.replace('\\n', ' '))\n if len(grp) > 1: # no more than one for now\n return False\n # fold it back to a tuple\n got_again = '(' + ', '.join(grp[0]) + ')'\n return self.check_output(want, got_again, optionflags)\n except Exception:\n return False\n\n # ... and defer to numpy\n try:\n return self._do_check(a_want, a_got)\n except Exception:\n # heterog tuple, eg (1, np.array([1., 2.]))\n try:\n return all(self._do_check(w, g) for w, g in zip(a_want, a_got))\n except (TypeError, ValueError):\n return False\n\n def _do_check(self, want, got):\n # This should be done exactly as written to correctly handle all of\n # numpy-comparable objects, strings, and heterogeneous tuples\n try:\n if want == got:\n return True\n except Exception:\n pass\n return np.allclose(want, got, atol=self.atol, rtol=self.rtol)\n\n\ndef _run_doctests(tests, full_name, verbose, doctest_warnings):\n \"\"\"\n Run modified doctests for the set of `tests`.\n\n Parameters\n ----------\n tests : list\n\n full_name : str\n\n verbose : bool\n doctest_warnings : bool\n\n Returns\n -------\n tuple(bool, list)\n Tuple of (success, output)\n \"\"\"\n flags = NORMALIZE_WHITESPACE | ELLIPSIS\n runner = DTRunner(full_name, checker=Checker(), optionflags=flags,\n verbose=verbose)\n\n output = io.StringIO(newline='')\n success = True\n\n # Redirect stderr to the stdout or output\n tmp_stderr = sys.stdout if doctest_warnings else output\n\n @contextmanager\n def temp_cwd():\n cwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n try:\n os.chdir(tmpdir)\n yield tmpdir\n finally:\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n\n # Run tests, trying to restore global state afterward\n cwd = os.getcwd()\n with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \\\n redirect_stderr(tmp_stderr):\n # try to ensure random seed is NOT reproducible\n np.random.seed(None)\n\n ns = {}\n for t in tests:\n # We broke the tests up into chunks to try to avoid PSEUDOCODE\n # This has the unfortunate side effect of restarting the global\n # namespace for each test chunk, so variables will be \"lost\" after\n # a chunk. Chain the globals to avoid this\n t.globs.update(ns)\n t.filename = short_path(t.filename, cwd)\n # Process our options\n if any([SKIPBLOCK in ex.options for ex in t.examples]):\n continue\n fails, successes = runner.run(t, out=output.write, clear_globs=False)\n if fails > 0:\n success = False\n ns = t.globs\n\n output.seek(0)\n return success, output.read()\n\n\ndef check_doctests(module, verbose, ns=None,\n dots=True, doctest_warnings=False):\n \"\"\"\n Check code in docstrings of the module's public symbols.\n\n Parameters\n ----------\n module : ModuleType\n Name of module\n verbose : bool\n Should the result be verbose\n ns : dict\n Name space of module\n dots : bool\n\n doctest_warnings : bool\n\n Returns\n -------\n results : list\n List of [(item_name, success_flag, output), ...]\n \"\"\"\n if ns is None:\n ns = dict(DEFAULT_NAMESPACE)\n\n # Loop over non-deprecated items\n results = []\n\n for name in get_all_dict(module)[0]:\n full_name = module.__name__ + '.' + name\n\n if full_name in DOCTEST_SKIPDICT:\n skip_methods = DOCTEST_SKIPDICT[full_name]\n if skip_methods is None:\n continue\n else:\n skip_methods = None\n\n try:\n obj = getattr(module, name)\n except AttributeError:\n import traceback\n results.append((full_name, False,\n \"Missing item!\\n\" +\n traceback.format_exc()))\n continue\n\n finder = doctest.DocTestFinder()\n try:\n tests = finder.find(obj, name, globs=dict(ns))\n except Exception:\n import traceback\n results.append((full_name, False,\n \"Failed to get doctests!\\n\" +\n traceback.format_exc()))\n continue\n\n if skip_methods is not None:\n tests = [i for i in tests if\n i.name.partition(\".\")[2] not in skip_methods]\n\n success, output = _run_doctests(tests, full_name, verbose,\n doctest_warnings)\n\n if dots:\n output_dot('.' if success else 'F')\n\n results.append((full_name, success, output))\n\n if HAVE_MATPLOTLIB:\n import matplotlib.pyplot as plt\n plt.close('all')\n\n return results\n\n\ndef check_doctests_testfile(fname, verbose, ns=None,\n dots=True, doctest_warnings=False):\n \"\"\"\n Check code in a text file.\n\n Mimic `check_doctests` above, differing mostly in test discovery.\n (which is borrowed from stdlib's doctest.testfile here,\n https://github.com/python-git/python/blob/master/Lib/doctest.py)\n\n Parameters\n ----------\n fname : str\n File name\n verbose : bool\n\n ns : dict\n Name space\n\n dots : bool\n\n doctest_warnings : bool\n\n Returns\n -------\n list\n List of [(item_name, success_flag, output), ...]\n\n Notes\n -----\n\n refguide can be signalled to skip testing code by adding\n ``#doctest: +SKIP`` to the end of the line. If the output varies or is\n random, add ``# may vary`` or ``# random`` to the comment. for example\n\n >>> plt.plot(...) # doctest: +SKIP\n >>> random.randint(0,10)\n 5 # random\n\n We also try to weed out pseudocode:\n * We maintain a list of exceptions which signal pseudocode,\n * We split the text file into \"blocks\" of code separated by empty lines\n and/or intervening text.\n * If a block contains a marker, the whole block is then assumed to be\n pseudocode. It is then not being doctested.\n\n The rationale is that typically, the text looks like this:\n\n blah\n \n >>> from numpy import some_module # pseudocode!\n >>> func = some_module.some_function\n >>> func(42) # still pseudocode\n 146\n \n blah\n \n >>> 2 + 3 # real code, doctest it\n 5\n\n \"\"\"\n if ns is None:\n ns = CHECK_NAMESPACE\n results = []\n\n _, short_name = os.path.split(fname)\n if short_name in DOCTEST_SKIPDICT:\n return results\n\n full_name = fname\n with open(fname, encoding='utf-8') as f:\n text = f.read()\n\n PSEUDOCODE = set(['some_function', 'some_module', 'import example',\n 'ctypes.CDLL', # likely need compiling, skip it\n 'integrate.nquad(func,' # ctypes integrate tutotial\n ])\n\n # split the text into \"blocks\" and try to detect and omit pseudocode blocks.\n parser = doctest.DocTestParser()\n good_parts = []\n base_line_no = 0\n for part in text.split('\\n\\n'):\n try:\n tests = parser.get_doctest(part, ns, fname, fname, base_line_no)\n except ValueError as e:\n if e.args[0].startswith('line '):\n # fix line number since `parser.get_doctest` does not increment\n # the reported line number by base_line_no in the error message\n parts = e.args[0].split()\n parts[1] = str(int(parts[1]) + base_line_no)\n e.args = (' '.join(parts),) + e.args[1:]\n raise\n if any(word in ex.source for word in PSEUDOCODE\n for ex in tests.examples):\n # omit it\n pass\n else:\n # `part` looks like a good code, let's doctest it\n good_parts.append((part, base_line_no))\n base_line_no += part.count('\\n') + 2\n\n # Reassemble the good bits and doctest them:\n tests = []\n for good_text, line_no in good_parts:\n tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no))\n success, output = _run_doctests(tests, full_name, verbose,\n doctest_warnings)\n\n if dots:\n output_dot('.' if success else 'F')\n\n results.append((full_name, success, output))\n\n if HAVE_MATPLOTLIB:\n import matplotlib.pyplot as plt\n plt.close('all')\n\n return results\n\n\ndef iter_included_files(base_path, verbose=0, suffixes=('.rst',)):\n \"\"\"\n Generator function to walk `base_path` and its subdirectories, skipping\n files or directories in RST_SKIPLIST, and yield each file with a suffix in\n `suffixes`\n\n Parameters\n ----------\n base_path : str\n Base path of the directory to be processed\n verbose : int\n\n suffixes : tuple\n\n Yields\n ------\n path\n Path of the directory and its sub directories\n \"\"\"\n if os.path.exists(base_path) and os.path.isfile(base_path):\n yield base_path\n for dir_name, subdirs, files in os.walk(base_path, topdown=True):\n if dir_name in RST_SKIPLIST:\n if verbose > 0:\n sys.stderr.write('skipping files in %s' % dir_name)\n files = []\n for p in RST_SKIPLIST:\n if p in subdirs:\n if verbose > 0:\n sys.stderr.write('skipping %s and subdirs' % p)\n subdirs.remove(p)\n for f in files:\n if (os.path.splitext(f)[1] in suffixes and\n f not in RST_SKIPLIST):\n yield os.path.join(dir_name, f)\n\n\ndef check_documentation(base_path, results, args, dots):\n \"\"\"\n Check examples in any *.rst located inside `base_path`.\n Add the output to `results`.\n\n See Also\n --------\n check_doctests_testfile\n \"\"\"\n for filename in iter_included_files(base_path, args.verbose):\n if dots:\n sys.stderr.write(filename + ' ')\n sys.stderr.flush()\n\n tut_results = check_doctests_testfile(\n filename,\n (args.verbose >= 2), dots=dots,\n doctest_warnings=args.doctest_warnings)\n\n # stub out a \"module\" which is needed when reporting the result\n def scratch():\n pass\n scratch.__name__ = filename\n results.append((scratch, tut_results))\n if dots:\n sys.stderr.write('\\n')\n sys.stderr.flush()\n\n\ndef init_matplotlib():\n \"\"\"\n Check feasibility of matplotlib initialization.\n \"\"\"\n global HAVE_MATPLOTLIB\n\n try:\n import matplotlib\n matplotlib.use('Agg')\n HAVE_MATPLOTLIB = True\n except ImportError:\n HAVE_MATPLOTLIB = False\n\n\ndef main(argv):\n \"\"\"\n Validates the docstrings of all the pre decided set of\n modules for errors and docstring standards.\n \"\"\"\n parser = ArgumentParser(usage=__doc__.lstrip())\n parser.add_argument(\"module_names\", metavar=\"SUBMODULES\", default=[],\n nargs='*', help=\"Submodules to check (default: all public)\")\n parser.add_argument(\"--doctests\", action=\"store_true\",\n help=\"Run also doctests on \")\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0)\n parser.add_argument(\"--doctest-warnings\", action=\"store_true\",\n help=\"Enforce warning checking for doctests\")\n parser.add_argument(\"--rst\", nargs='?', const='doc', default=None,\n help=(\"Run also examples from *rst files \"\n \"discovered walking the directory(s) specified, \"\n \"defaults to 'doc'\"))\n args = parser.parse_args(argv)\n\n modules = []\n names_dict = {}\n\n if not args.module_names:\n args.module_names = list(PUBLIC_SUBMODULES) + [BASE_MODULE]\n\n os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'\n\n module_names = list(args.module_names)\n for name in module_names:\n if name in OTHER_MODULE_DOCS:\n name = OTHER_MODULE_DOCS[name]\n if name not in module_names:\n module_names.append(name)\n\n dots = True\n success = True\n results = []\n errormsgs = []\n\n\n if args.doctests or args.rst:\n init_matplotlib()\n\n for submodule_name in module_names:\n prefix = BASE_MODULE + '.'\n if not (\n submodule_name.startswith(prefix) or\n submodule_name == BASE_MODULE\n ):\n module_name = prefix + submodule_name\n else:\n module_name = submodule_name\n\n __import__(module_name)\n module = sys.modules[module_name]\n\n if submodule_name not in OTHER_MODULE_DOCS:\n find_names(module, names_dict)\n\n if submodule_name in args.module_names:\n modules.append(module)\n\n if args.doctests or not args.rst:\n print(\"Running checks for %d modules:\" % (len(modules),))\n for module in modules:\n if dots:\n sys.stderr.write(module.__name__ + ' ')\n sys.stderr.flush()\n\n all_dict, deprecated, others = get_all_dict(module)\n names = names_dict.get(module.__name__, set())\n\n mod_results = []\n mod_results += check_items(all_dict, names, deprecated, others,\n module.__name__)\n mod_results += check_rest(module, set(names).difference(deprecated),\n dots=dots)\n if args.doctests:\n mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,\n doctest_warnings=args.doctest_warnings)\n\n for v in mod_results:\n assert isinstance(v, tuple), v\n\n results.append((module, mod_results))\n\n if dots:\n sys.stderr.write('\\n')\n sys.stderr.flush()\n\n if args.rst:\n base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')\n rst_path = os.path.relpath(os.path.join(base_dir, args.rst))\n if os.path.exists(rst_path):\n print('\\nChecking files in %s:' % rst_path)\n check_documentation(rst_path, results, args, dots)\n else:\n sys.stderr.write(f'\\ninvalid --rst argument \"{args.rst}\"')\n errormsgs.append('invalid directory argument to --rst')\n if dots:\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n # Report results\n for module, mod_results in results:\n success = all(x[1] for x in mod_results)\n if not success:\n errormsgs.append(f'failed checking {module.__name__}')\n\n if success and args.verbose == 0:\n continue\n\n print(\"\")\n print(\"=\" * len(module.__name__))\n print(module.__name__)\n print(\"=\" * len(module.__name__))\n print(\"\")\n\n for name, success, output in mod_results:\n if name is None:\n if not success or args.verbose >= 1:\n print(output.strip())\n print(\"\")\n elif not success or (args.verbose >= 2 and output.strip()):\n print(name)\n print(\"-\"*len(name))\n print(\"\")\n print(output.strip())\n print(\"\")\n\n if len(errormsgs) == 0:\n print(\"\\nOK: all checks passed!\")\n sys.exit(0)\n else:\n print('\\nERROR: ', '\\n '.join(errormsgs))\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main(argv=sys.argv[1:])\n", "output": ["compare", "check_rest", "get_all_dict", "find_names", "_run_doctests", "check_doctests_testfile", "short_path", "check_items", "output_dot", "check_documentation", "init_matplotlib", "is_deprecated", "iter_included_files", "main", "validate_rst_syntax", "check_doctests", "Checker", "DTRunner"], "metadata": {"file_path": "numpy-main/tools/refguide_check.py", "file_length": 11865, "symbol_dict": [{"symbol": "check_items", "type": "mannual_defined_function", "byte_location": 10525, "location": 3348}, {"symbol": "_run_doctests", "type": "mannual_defined_function", "byte_location": 23887, "location": 7432}, {"symbol": "check_documentation", "type": "mannual_defined_function", "byte_location": 32675, "location": 10096}, {"symbol": "short_path", "type": "mannual_defined_function", "byte_location": 4941, "location": 1713}, {"symbol": "check_doctests", "type": "mannual_defined_function", "byte_location": 25813, "location": 8015}, {"symbol": "compare", "type": "mannual_defined_function", "byte_location": 8804, "location": 2852}, {"symbol": "validate_rst_syntax", "type": "mannual_defined_function", "byte_location": 12804, "location": 4054}, {"symbol": "output_dot", "type": "mannual_defined_function", "byte_location": 15474, "location": 4849}, {"symbol": "init_matplotlib", "type": "mannual_defined_function", "byte_location": 33502, "location": 10340}, {"symbol": "find_names", "type": "mannual_defined_function", "byte_location": 5496, "location": 1892}, {"symbol": "check_rest", "type": "mannual_defined_function", "byte_location": 15561, "location": 4882}, {"symbol": "get_all_dict", "type": "mannual_defined_function", "byte_location": 7235, "location": 2403}, {"symbol": "main", "type": "mannual_defined_function", "byte_location": 33775, "location": 10429}, {"symbol": "iter_included_files", "type": "mannual_defined_function", "byte_location": 31513, "location": 9731}, {"symbol": "check_doctests_testfile", "type": "mannual_defined_function", "byte_location": 27866, "location": 8589}, {"symbol": "is_deprecated", "type": "mannual_defined_function", "byte_location": 10098, "location": 3226}, {"symbol": "Checker", "type": "mannual_defined_class", "byte_location": 19615, "location": 6133}, {"symbol": "DTRunner", "type": "mannual_defined_class", "byte_location": 18235, "location": 5717}]}} {"input": "\"\"\"numpy.distutils.fcompiler\n\nContains FCompiler, an abstract base class that defines the interface\nfor the numpy.distutils Fortran compiler abstraction model.\n\nTerminology:\n\nTo be consistent, where the term 'executable' is used, it means the single\nfile, like 'gcc', that is executed, and should be a string. In contrast,\n'command' means the entire command line, like ['gcc', '-c', 'file.c'], and\nshould be a list.\n\nBut note that FCompiler.executables is actually a dictionary of commands.\n\n\"\"\"\n__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',\n 'dummy_fortran_file']\n\nimport os\nimport sys\nimport re\nfrom pathlib import Path\n\nfrom distutils.sysconfig import get_python_lib\nfrom distutils.fancy_getopt import FancyGetopt\nfrom distutils.errors import DistutilsModuleError, \\\n DistutilsExecError, CompileError, LinkError, DistutilsPlatformError\nfrom distutils.util import split_quoted, strtobool\n\nfrom numpy.distutils.ccompiler import CCompiler, gen_lib_options\nfrom numpy.distutils import log\nfrom numpy.distutils.misc_util import is_string, all_strings, is_sequence, \\\n make_temp_file, get_shared_lib_extension\nfrom numpy.distutils.exec_command import find_executable\nfrom numpy.distutils import _shell_utils\n\nfrom .environment import EnvironmentConfig\n\n__metaclass__ = type\n\n\nFORTRAN_COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f']\n\n\nclass CompilerNotFound(Exception):\n pass\n\ndef flaglist(s):\n if is_string(s):\n return split_quoted(s)\n else:\n return s\n\ndef str2bool(s):\n if is_string(s):\n return strtobool(s)\n return bool(s)\n\ndef is_sequence_of_strings(seq):\n return is_sequence(seq) and all_strings(seq)\n\nclass FCompiler(CCompiler):\n \"\"\"Abstract base class to define the interface that must be implemented\n by real Fortran compiler classes.\n\n Methods that subclasses may redefine:\n\n update_executables(), find_executables(), get_version()\n get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()\n get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),\n get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),\n get_flags_arch_f90(), get_flags_debug_f90(),\n get_flags_fix(), get_flags_linker_so()\n\n DON'T call these methods (except get_version) after\n constructing a compiler instance or inside any other method.\n All methods, except update_executables() and find_executables(),\n may call the get_version() method.\n\n After constructing a compiler instance, always call customize(dist=None)\n method that finalizes compiler construction and makes the following\n attributes available:\n compiler_f77\n compiler_f90\n compiler_fix\n linker_so\n archiver\n ranlib\n libraries\n library_dirs\n \"\"\"\n\n # These are the environment variables and distutils keys used.\n # Each configuration description is\n # (, , , , )\n # The hook names are handled by the self._environment_hook method.\n # - names starting with 'self.' call methods in this class\n # - names starting with 'exe.' return the key in the executables dict\n # - names like 'flags.YYY' return self.get_flag_YYY()\n # convert is either None or a function to convert a string to the\n # appropriate type used.\n\n distutils_vars = EnvironmentConfig(\n distutils_section='config_fc',\n noopt = (None, None, 'noopt', str2bool, False),\n noarch = (None, None, 'noarch', str2bool, False),\n debug = (None, None, 'debug', str2bool, False),\n verbose = (None, None, 'verbose', str2bool, False),\n )\n\n command_vars = EnvironmentConfig(\n distutils_section='config_fc',\n compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False),\n compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False),\n compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False),\n version_cmd = ('exe.version_cmd', None, None, None, False),\n linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False),\n linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False),\n archiver = (None, 'AR', 'ar', None, False),\n ranlib = (None, 'RANLIB', 'ranlib', None, False),\n )\n\n flag_vars = EnvironmentConfig(\n distutils_section='config_fc',\n f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True),\n f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True),\n free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True),\n fix = ('flags.fix', None, None, flaglist, False),\n opt = ('flags.opt', 'FOPT', 'opt', flaglist, True),\n opt_f77 = ('flags.opt_f77', None, None, flaglist, False),\n opt_f90 = ('flags.opt_f90', None, None, flaglist, False),\n arch = ('flags.arch', 'FARCH', 'arch', flaglist, False),\n arch_f77 = ('flags.arch_f77', None, None, flaglist, False),\n arch_f90 = ('flags.arch_f90', None, None, flaglist, False),\n debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True),\n debug_f77 = ('flags.debug_f77', None, None, flaglist, False),\n debug_f90 = ('flags.debug_f90', None, None, flaglist, False),\n flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True),\n linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True),\n linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True),\n ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True),\n )\n\n language_map = {'.f': 'f77',\n '.for': 'f77',\n '.F': 'f77', # XXX: needs preprocessor\n '.ftn': 'f77',\n '.f77': 'f77',\n '.f90': 'f90',\n '.F90': 'f90', # XXX: needs preprocessor\n '.f95': 'f90',\n }\n language_order = ['f90', 'f77']\n\n\n # These will be set by the subclass\n\n compiler_type = None\n compiler_aliases = ()\n version_pattern = None\n\n possible_executables = []\n executables = {\n 'version_cmd': [\"f77\", \"-v\"],\n 'compiler_f77': [\"f77\"],\n 'compiler_f90': [\"f90\"],\n 'compiler_fix': [\"f90\", \"-fixed\"],\n 'linker_so': [\"f90\", \"-shared\"],\n 'linker_exe': [\"f90\"],\n 'archiver': [\"ar\", \"-cr\"],\n 'ranlib': None,\n }\n\n # If compiler does not support compiling Fortran 90 then it can\n # suggest using another compiler. For example, gnu would suggest\n # gnu95 compiler type when there are F90 sources.\n suggested_f90_compiler = None\n\n compile_switch = \"-c\"\n object_switch = \"-o \" # Ending space matters! It will be stripped\n # but if it is missing then object_switch\n # will be prefixed to object file name by\n # string concatenation.\n library_switch = \"-o \" # Ditto!\n\n # Switch to specify where module files are created and searched\n # for USE statement. Normally it is a string and also here ending\n # space matters. See above.\n module_dir_switch = None\n\n # Switch to specify where module files are searched for USE statement.\n module_include_switch = '-I'\n\n pic_flags = [] # Flags to create position-independent code\n\n src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR']\n obj_extension = \".o\"\n\n shared_lib_extension = get_shared_lib_extension()\n static_lib_extension = \".a\" # or .lib\n static_lib_format = \"lib%s%s\" # or %s%s\n shared_lib_format = \"%s%s\"\n exe_extension = \"\"\n\n _exe_cache = {}\n\n _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',\n 'compiler_fix', 'linker_so', 'linker_exe', 'archiver',\n 'ranlib']\n\n # This will be set by new_fcompiler when called in\n # command/{build_ext.py, build_clib.py, config.py} files.\n c_compiler = None\n\n # extra_{f77,f90}_compile_args are set by build_ext.build_extension method\n extra_f77_compile_args = []\n extra_f90_compile_args = []\n\n def __init__(self, *args, **kw):\n CCompiler.__init__(self, *args, **kw)\n self.distutils_vars = self.distutils_vars.clone(self._environment_hook)\n self.command_vars = self.command_vars.clone(self._environment_hook)\n self.flag_vars = self.flag_vars.clone(self._environment_hook)\n self.executables = self.executables.copy()\n for e in self._executable_keys:\n if e not in self.executables:\n self.executables[e] = None\n\n # Some methods depend on .customize() being called first, so\n # this keeps track of whether that's happened yet.\n self._is_customised = False\n\n def __copy__(self):\n obj = self.__new__(self.__class__)\n obj.__dict__.update(self.__dict__)\n obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)\n obj.command_vars = obj.command_vars.clone(obj._environment_hook)\n obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)\n obj.executables = obj.executables.copy()\n return obj\n\n def copy(self):\n return self.__copy__()\n\n # Use properties for the attributes used by CCompiler. Setting them\n # as attributes from the self.executables dictionary is error-prone,\n # so we get them from there each time.\n def _command_property(key):\n def fget(self):\n assert self._is_customised\n return self.executables[key]\n return property(fget=fget)\n version_cmd = _command_property('version_cmd')\n compiler_f77 = _command_property('compiler_f77')\n compiler_f90 = _command_property('compiler_f90')\n compiler_fix = _command_property('compiler_fix')\n linker_so = _command_property('linker_so')\n linker_exe = _command_property('linker_exe')\n archiver = _command_property('archiver')\n ranlib = _command_property('ranlib')\n\n # Make our terminology consistent.\n def set_executable(self, key, value):\n self.set_command(key, value)\n\n def set_commands(self, **kw):\n for k, v in kw.items():\n self.set_command(k, v)\n\n def set_command(self, key, value):\n if not key in self._executable_keys:\n raise ValueError(\n \"unknown executable '%s' for class %s\" %\n (key, self.__class__.__name__))\n if is_string(value):\n value = split_quoted(value)\n assert value is None or is_sequence_of_strings(value[1:]), (key, value)\n self.executables[key] = value\n\n ######################################################################\n ## Methods that subclasses may redefine. But don't call these methods!\n ## They are private to FCompiler class and may return unexpected\n ## results if used elsewhere. So, you have been warned..\n\n def find_executables(self):\n \"\"\"Go through the self.executables dictionary, and attempt to\n find and assign appropriate executables.\n\n Executable names are looked for in the environment (environment\n variables, the distutils.cfg, and command line), the 0th-element of\n the command list, and the self.possible_executables list.\n\n Also, if the 0th element is \"\" or \"\", the Fortran 77\n or the Fortran 90 compiler executable is used, unless overridden\n by an environment setting.\n\n Subclasses should call this if overridden.\n \"\"\"\n assert self._is_customised\n exe_cache = self._exe_cache\n def cached_find_executable(exe):\n if exe in exe_cache:\n return exe_cache[exe]\n fc_exe = find_executable(exe)\n exe_cache[exe] = exe_cache[fc_exe] = fc_exe\n return fc_exe\n def verify_command_form(name, value):\n if value is not None and not is_sequence_of_strings(value):\n raise ValueError(\n \"%s value %r is invalid in class %s\" %\n (name, value, self.__class__.__name__))\n def set_exe(exe_key, f77=None, f90=None):\n cmd = self.executables.get(exe_key, None)\n if not cmd:\n return None\n # Note that we get cmd[0] here if the environment doesn't\n # have anything set\n exe_from_environ = getattr(self.command_vars, exe_key)\n if not exe_from_environ:\n possibles = [f90, f77] + self.possible_executables\n else:\n possibles = [exe_from_environ] + self.possible_executables\n\n seen = set()\n unique_possibles = []\n for e in possibles:\n if e == '':\n e = f77\n elif e == '':\n e = f90\n if not e or e in seen:\n continue\n seen.add(e)\n unique_possibles.append(e)\n\n for exe in unique_possibles:\n fc_exe = cached_find_executable(exe)\n if fc_exe:\n cmd[0] = fc_exe\n return fc_exe\n self.set_command(exe_key, None)\n return None\n\n ctype = self.compiler_type\n f90 = set_exe('compiler_f90')\n if not f90:\n f77 = set_exe('compiler_f77')\n if f77:\n log.warn('%s: no Fortran 90 compiler found' % ctype)\n else:\n raise CompilerNotFound('%s: f90 nor f77' % ctype)\n else:\n f77 = set_exe('compiler_f77', f90=f90)\n if not f77:\n log.warn('%s: no Fortran 77 compiler found' % ctype)\n set_exe('compiler_fix', f90=f90)\n\n set_exe('linker_so', f77=f77, f90=f90)\n set_exe('linker_exe', f77=f77, f90=f90)\n set_exe('version_cmd', f77=f77, f90=f90)\n set_exe('archiver')\n set_exe('ranlib')\n\n def update_executables(self):\n \"\"\"Called at the beginning of customisation. Subclasses should\n override this if they need to set up the executables dictionary.\n\n Note that self.find_executables() is run afterwards, so the\n self.executables dictionary values can contain or as\n the command, which will be replaced by the found F77 or F90\n compiler.\n \"\"\"\n pass\n\n def get_flags(self):\n \"\"\"List of flags common to all compiler types.\"\"\"\n return [] + self.pic_flags\n\n def _get_command_flags(self, key):\n cmd = self.executables.get(key, None)\n if cmd is None:\n return []\n return cmd[1:]\n\n def get_flags_f77(self):\n \"\"\"List of Fortran 77 specific flags.\"\"\"\n return self._get_command_flags('compiler_f77')\n def get_flags_f90(self):\n \"\"\"List of Fortran 90 specific flags.\"\"\"\n return self._get_command_flags('compiler_f90')\n def get_flags_free(self):\n \"\"\"List of Fortran 90 free format specific flags.\"\"\"\n return []\n def get_flags_fix(self):\n \"\"\"List of Fortran 90 fixed format specific flags.\"\"\"\n return self._get_command_flags('compiler_fix')\n def get_flags_linker_so(self):\n \"\"\"List of linker flags to build a shared library.\"\"\"\n return self._get_command_flags('linker_so')\n def get_flags_linker_exe(self):\n \"\"\"List of linker flags to build an executable.\"\"\"\n return self._get_command_flags('linker_exe')\n def get_flags_ar(self):\n \"\"\"List of archiver flags. \"\"\"\n return self._get_command_flags('archiver')\n def get_flags_opt(self):\n \"\"\"List of architecture independent compiler flags.\"\"\"\n return []\n def get_flags_arch(self):\n \"\"\"List of architecture dependent compiler flags.\"\"\"\n return []\n def get_flags_debug(self):\n \"\"\"List of compiler flags to compile with debugging information.\"\"\"\n return []\n\n get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt\n get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch\n get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug\n\n def get_libraries(self):\n \"\"\"List of compiler libraries.\"\"\"\n return self.libraries[:]\n def get_library_dirs(self):\n \"\"\"List of compiler library directories.\"\"\"\n return self.library_dirs[:]\n\n def get_version(self, force=False, ok_status=[0]):\n assert self._is_customised\n version = CCompiler.get_version(self, force=force, ok_status=ok_status)\n if version is None:\n raise CompilerNotFound()\n return version\n\n\n ############################################################\n\n ## Public methods:\n\n def customize(self, dist = None):\n \"\"\"Customize Fortran compiler.\n\n This method gets Fortran compiler specific information from\n (i) class definition, (ii) environment, (iii) distutils config\n files, and (iv) command line (later overrides earlier).\n\n This method should be always called after constructing a\n compiler instance. But not in __init__ because Distribution\n instance is needed for (iii) and (iv).\n \"\"\"\n log.info('customize %s' % (self.__class__.__name__))\n\n self._is_customised = True\n\n self.distutils_vars.use_distribution(dist)\n self.command_vars.use_distribution(dist)\n self.flag_vars.use_distribution(dist)\n\n self.update_executables()\n\n # find_executables takes care of setting the compiler commands,\n # version_cmd, linker_so, linker_exe, ar, and ranlib\n self.find_executables()\n\n noopt = self.distutils_vars.get('noopt', False)\n noarch = self.distutils_vars.get('noarch', noopt)\n debug = self.distutils_vars.get('debug', False)\n\n f77 = self.command_vars.compiler_f77\n f90 = self.command_vars.compiler_f90\n\n f77flags = []\n f90flags = []\n freeflags = []\n fixflags = []\n\n if f77:\n f77 = _shell_utils.NativeParser.split(f77)\n f77flags = self.flag_vars.f77\n if f90:\n f90 = _shell_utils.NativeParser.split(f90)\n f90flags = self.flag_vars.f90\n freeflags = self.flag_vars.free\n # XXX Assuming that free format is default for f90 compiler.\n fix = self.command_vars.compiler_fix\n # NOTE: this and similar examples are probably just\n # excluding --coverage flag when F90 = gfortran --coverage\n # instead of putting that flag somewhere more appropriate\n # this and similar examples where a Fortran compiler\n # environment variable has been customized by CI or a user\n # should perhaps eventually be more thoroughly tested and more\n # robustly handled\n if fix:\n fix = _shell_utils.NativeParser.split(fix)\n fixflags = self.flag_vars.fix + f90flags\n\n oflags, aflags, dflags = [], [], []\n # examine get_flags__ for extra flags\n # only add them if the method is different from get_flags_\n def get_flags(tag, flags):\n # note that self.flag_vars. calls self.get_flags_()\n flags.extend(getattr(self.flag_vars, tag))\n this_get = getattr(self, 'get_flags_' + tag)\n for name, c, flagvar in [('f77', f77, f77flags),\n ('f90', f90, f90flags),\n ('f90', fix, fixflags)]:\n t = '%s_%s' % (tag, name)\n if c and this_get is not getattr(self, 'get_flags_' + t):\n flagvar.extend(getattr(self.flag_vars, t))\n if not noopt:\n get_flags('opt', oflags)\n if not noarch:\n get_flags('arch', aflags)\n if debug:\n get_flags('debug', dflags)\n\n fflags = self.flag_vars.flags + dflags + oflags + aflags\n\n if f77:\n self.set_commands(compiler_f77=f77+f77flags+fflags)\n if f90:\n self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)\n if fix:\n self.set_commands(compiler_fix=fix+fixflags+fflags)\n\n\n #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS\n linker_so = self.linker_so\n if linker_so:\n linker_so_flags = self.flag_vars.linker_so\n if sys.platform.startswith('aix'):\n python_lib = get_python_lib(standard_lib=1)\n ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')\n python_exp = os.path.join(python_lib, 'config', 'python.exp')\n linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]\n if sys.platform.startswith('os400'):\n from distutils.sysconfig import get_config_var\n python_config = get_config_var('LIBPL')\n ld_so_aix = os.path.join(python_config, 'ld_so_aix')\n python_exp = os.path.join(python_config, 'python.exp')\n linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]\n self.set_commands(linker_so=linker_so+linker_so_flags)\n\n linker_exe = self.linker_exe\n if linker_exe:\n linker_exe_flags = self.flag_vars.linker_exe\n self.set_commands(linker_exe=linker_exe+linker_exe_flags)\n\n ar = self.command_vars.archiver\n if ar:\n arflags = self.flag_vars.ar\n self.set_commands(archiver=[ar]+arflags)\n\n self.set_library_dirs(self.get_library_dirs())\n self.set_libraries(self.get_libraries())\n\n def dump_properties(self):\n \"\"\"Print out the attributes of a compiler instance.\"\"\"\n props = []\n for key in list(self.executables.keys()) + \\\n ['version', 'libraries', 'library_dirs',\n 'object_switch', 'compile_switch']:\n if hasattr(self, key):\n v = getattr(self, key)\n props.append((key, None, '= '+repr(v)))\n props.sort()\n\n pretty_printer = FancyGetopt(props)\n for l in pretty_printer.generate_help(\"%s instance properties:\" \\\n % (self.__class__.__name__)):\n if l[:4]==' --':\n l = ' ' + l[4:]\n print(l)\n\n ###################\n\n def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):\n \"\"\"Compile 'src' to product 'obj'.\"\"\"\n src_flags = {}\n if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \\\n and not has_f90_header(src):\n flavor = ':f77'\n compiler = self.compiler_f77\n src_flags = get_f77flags(src)\n extra_compile_args = self.extra_f77_compile_args or []\n elif is_free_format(src):\n flavor = ':f90'\n compiler = self.compiler_f90\n if compiler is None:\n raise DistutilsExecError('f90 not supported by %s needed for %s'\\\n % (self.__class__.__name__, src))\n extra_compile_args = self.extra_f90_compile_args or []\n else:\n flavor = ':fix'\n compiler = self.compiler_fix\n if compiler is None:\n raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\\\n % (self.__class__.__name__, src))\n extra_compile_args = self.extra_f90_compile_args or []\n if self.object_switch[-1]==' ':\n o_args = [self.object_switch.strip(), obj]\n else:\n o_args = [self.object_switch.strip()+obj]\n\n assert self.compile_switch.strip()\n s_args = [self.compile_switch, src]\n\n if extra_compile_args:\n log.info('extra %s options: %r' \\\n % (flavor[1:], ' '.join(extra_compile_args)))\n\n extra_flags = src_flags.get(self.compiler_type, [])\n if extra_flags:\n log.info('using compile options from source: %r' \\\n % ' '.join(extra_flags))\n\n command = compiler + cc_args + extra_flags + s_args + o_args \\\n + extra_postargs + extra_compile_args\n\n display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,\n src)\n try:\n self.spawn(command, display=display)\n except DistutilsExecError as e:\n msg = str(e)\n raise CompileError(msg) from None\n\n def module_options(self, module_dirs, module_build_dir):\n options = []\n if self.module_dir_switch is not None:\n if self.module_dir_switch[-1]==' ':\n options.extend([self.module_dir_switch.strip(), module_build_dir])\n else:\n options.append(self.module_dir_switch.strip()+module_build_dir)\n else:\n print('XXX: module_build_dir=%r option ignored' % (module_build_dir))\n print('XXX: Fix module_dir_switch for ', self.__class__.__name__)\n if self.module_include_switch is not None:\n for d in [module_build_dir]+module_dirs:\n options.append('%s%s' % (self.module_include_switch, d))\n else:\n print('XXX: module_dirs=%r option ignored' % (module_dirs))\n print('XXX: Fix module_include_switch for ', self.__class__.__name__)\n return options\n\n def library_option(self, lib):\n return \"-l\" + lib\n def library_dir_option(self, dir):\n return \"-L\" + dir\n\n def link(self, target_desc, objects,\n output_filename, output_dir=None, libraries=None,\n library_dirs=None, runtime_library_dirs=None,\n export_symbols=None, debug=0, extra_preargs=None,\n extra_postargs=None, build_temp=None, target_lang=None):\n objects, output_dir = self._fix_object_args(objects, output_dir)\n libraries, library_dirs, runtime_library_dirs = \\\n self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)\n\n lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,\n libraries)\n if is_string(output_dir):\n output_filename = os.path.join(output_dir, output_filename)\n elif output_dir is not None:\n raise TypeError(\"'output_dir' must be a string or None\")\n\n if self._need_link(objects, output_filename):\n if self.library_switch[-1]==' ':\n o_args = [self.library_switch.strip(), output_filename]\n else:\n o_args = [self.library_switch.strip()+output_filename]\n\n if is_string(self.objects):\n ld_args = objects + [self.objects]\n else:\n ld_args = objects + self.objects\n ld_args = ld_args + lib_opts + o_args\n if debug:\n ld_args[:0] = ['-g']\n if extra_preargs:\n ld_args[:0] = extra_preargs\n if extra_postargs:\n ld_args.extend(extra_postargs)\n self.mkpath(os.path.dirname(output_filename))\n if target_desc == CCompiler.EXECUTABLE:\n linker = self.linker_exe[:]\n else:\n linker = self.linker_so[:]\n command = linker + ld_args\n try:\n self.spawn(command)\n except DistutilsExecError as e:\n msg = str(e)\n raise LinkError(msg) from None\n else:\n log.debug(\"skipping %s (up-to-date)\", output_filename)\n\n def _environment_hook(self, name, hook_name):\n if hook_name is None:\n return None\n if is_string(hook_name):\n if hook_name.startswith('self.'):\n hook_name = hook_name[5:]\n hook = getattr(self, hook_name)\n return hook()\n elif hook_name.startswith('exe.'):\n hook_name = hook_name[4:]\n var = self.executables[hook_name]\n if var:\n return var[0]\n else:\n return None\n elif hook_name.startswith('flags.'):\n hook_name = hook_name[6:]\n hook = getattr(self, 'get_flags_' + hook_name)\n return hook()\n else:\n return hook_name()\n\n def can_ccompiler_link(self, ccompiler):\n \"\"\"\n Check if the given C compiler can link objects produced by\n this compiler.\n \"\"\"\n return True\n\n def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):\n \"\"\"\n Convert a set of object files that are not compatible with the default\n linker, to a file that is compatible.\n\n Parameters\n ----------\n objects : list\n List of object files to include.\n output_dir : str\n Output directory to place generated object files.\n extra_dll_dir : str\n Output directory to place extra DLL files that need to be\n included on Windows.\n\n Returns\n -------\n converted_objects : list of str\n List of converted object files.\n Note that the number of output files is not necessarily\n the same as inputs.\n\n \"\"\"\n raise NotImplementedError()\n\n ## class FCompiler\n\n_default_compilers = (\n # sys.platform mappings\n ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',\n 'intelvem', 'intelem', 'flang')),\n ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),\n ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag',\n 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95', \n 'pathf95', 'nagfor', 'fujitsu')),\n ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu',\n 'g95', 'pg')),\n ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),\n ('irix.*', ('mips', 'gnu', 'gnu95',)),\n ('aix.*', ('ibm', 'gnu', 'gnu95',)),\n # os.name mappings\n ('posix', ('gnu', 'gnu95',)),\n ('nt', ('gnu', 'gnu95',)),\n ('mac', ('gnu95', 'gnu', 'pg')),\n )\n\nfcompiler_class = None\nfcompiler_aliases = None\n\ndef load_all_fcompiler_classes():\n \"\"\"Cache all the FCompiler classes found in modules in the\n numpy.distutils.fcompiler package.\n \"\"\"\n from glob import glob\n global fcompiler_class, fcompiler_aliases\n if fcompiler_class is not None:\n return\n pys = os.path.join(os.path.dirname(__file__), '*.py')\n fcompiler_class = {}\n fcompiler_aliases = {}\n for fname in glob(pys):\n module_name, ext = os.path.splitext(os.path.basename(fname))\n module_name = 'numpy.distutils.fcompiler.' + module_name\n __import__ (module_name)\n module = sys.modules[module_name]\n if hasattr(module, 'compilers'):\n for cname in module.compilers:\n klass = getattr(module, cname)\n desc = (klass.compiler_type, klass, klass.description)\n fcompiler_class[klass.compiler_type] = desc\n for alias in klass.compiler_aliases:\n if alias in fcompiler_aliases:\n raise ValueError(\"alias %r defined for both %s and %s\"\n % (alias, klass.__name__,\n fcompiler_aliases[alias][1].__name__))\n fcompiler_aliases[alias] = desc\n\ndef _find_existing_fcompiler(compiler_types,\n osname=None, platform=None,\n requiref90=False,\n c_compiler=None):\n from numpy.distutils.core import get_distribution\n dist = get_distribution(always=True)\n for compiler_type in compiler_types:\n v = None\n try:\n c = new_fcompiler(plat=platform, compiler=compiler_type,\n c_compiler=c_compiler)\n c.customize(dist)\n v = c.get_version()\n if requiref90 and c.compiler_f90 is None:\n v = None\n new_compiler = c.suggested_f90_compiler\n if new_compiler:\n log.warn('Trying %r compiler as suggested by %r '\n 'compiler for f90 support.' % (compiler_type,\n new_compiler))\n c = new_fcompiler(plat=platform, compiler=new_compiler,\n c_compiler=c_compiler)\n c.customize(dist)\n v = c.get_version()\n if v is not None:\n compiler_type = new_compiler\n if requiref90 and c.compiler_f90 is None:\n raise ValueError('%s does not support compiling f90 codes, '\n 'skipping.' % (c.__class__.__name__))\n except DistutilsModuleError:\n log.debug(\"_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError\", compiler_type)\n except CompilerNotFound:\n log.debug(\"_find_existing_fcompiler: compiler_type='%s' not found\", compiler_type)\n if v is not None:\n return compiler_type\n return None\n\ndef available_fcompilers_for_platform(osname=None, platform=None):\n if osname is None:\n osname = os.name\n if platform is None:\n platform = sys.platform\n matching_compiler_types = []\n for pattern, compiler_type in _default_compilers:\n if re.match(pattern, platform) or re.match(pattern, osname):\n for ct in compiler_type:\n if ct not in matching_compiler_types:\n matching_compiler_types.append(ct)\n if not matching_compiler_types:\n matching_compiler_types.append('gnu')\n return matching_compiler_types\n\ndef get_default_fcompiler(osname=None, platform=None, requiref90=False,\n c_compiler=None):\n \"\"\"Determine the default Fortran compiler to use for the given\n platform.\"\"\"\n matching_compiler_types = available_fcompilers_for_platform(osname,\n platform)\n log.info(\"get_default_fcompiler: matching types: '%s'\",\n matching_compiler_types)\n compiler_type = _find_existing_fcompiler(matching_compiler_types,\n osname=osname,\n platform=platform,\n requiref90=requiref90,\n c_compiler=c_compiler)\n return compiler_type\n\n# Flag to avoid rechecking for Fortran compiler every time\nfailed_fcompilers = set()\n\ndef new_fcompiler(plat=None,\n compiler=None,\n verbose=0,\n dry_run=0,\n force=0,\n requiref90=False,\n c_compiler = None):\n \"\"\"Generate an instance of some FCompiler subclass for the supplied\n platform/compiler combination.\n \"\"\"\n global failed_fcompilers\n fcompiler_key = (plat, compiler)\n if fcompiler_key in failed_fcompilers:\n return None\n\n load_all_fcompiler_classes()\n if plat is None:\n plat = os.name\n if compiler is None:\n compiler = get_default_fcompiler(plat, requiref90=requiref90,\n c_compiler=c_compiler)\n if compiler in fcompiler_class:\n module_name, klass, long_description = fcompiler_class[compiler]\n elif compiler in fcompiler_aliases:\n module_name, klass, long_description = fcompiler_aliases[compiler]\n else:\n msg = \"don't know how to compile Fortran code on platform '%s'\" % plat\n if compiler is not None:\n msg = msg + \" with '%s' compiler.\" % compiler\n msg = msg + \" Supported compilers are: %s)\" \\\n % (','.join(fcompiler_class.keys()))\n log.warn(msg)\n failed_fcompilers.add(fcompiler_key)\n return None\n\n compiler = klass(verbose=verbose, dry_run=dry_run, force=force)\n compiler.c_compiler = c_compiler\n return compiler\n\ndef show_fcompilers(dist=None):\n \"\"\"Print list of available compilers (used by the \"--help-fcompiler\"\n option to \"config_fc\").\n \"\"\"\n if dist is None:\n from distutils.dist import Distribution\n from numpy.distutils.command.config_compiler import config_fc\n dist = Distribution()\n dist.script_name = os.path.basename(sys.argv[0])\n dist.script_args = ['config_fc'] + sys.argv[1:]\n try:\n dist.script_args.remove('--help-fcompiler')\n except ValueError:\n pass\n dist.cmdclass['config_fc'] = config_fc\n dist.parse_config_files()\n dist.parse_command_line()\n compilers = []\n compilers_na = []\n compilers_ni = []\n if not fcompiler_class:\n load_all_fcompiler_classes()\n platform_compilers = available_fcompilers_for_platform()\n for compiler in platform_compilers:\n v = None\n log.set_verbosity(-2)\n try:\n c = new_fcompiler(compiler=compiler, verbose=dist.verbose)\n c.customize(dist)\n v = c.get_version()\n except (DistutilsModuleError, CompilerNotFound) as e:\n log.debug(\"show_fcompilers: %s not found\" % (compiler,))\n log.debug(repr(e))\n\n if v is None:\n compilers_na.append((\"fcompiler=\"+compiler, None,\n fcompiler_class[compiler][2]))\n else:\n c.dump_properties()\n compilers.append((\"fcompiler=\"+compiler, None,\n fcompiler_class[compiler][2] + ' (%s)' % v))\n\n compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))\n compilers_ni = [(\"fcompiler=\"+fc, None, fcompiler_class[fc][2])\n for fc in compilers_ni]\n\n compilers.sort()\n compilers_na.sort()\n compilers_ni.sort()\n pretty_printer = FancyGetopt(compilers)\n pretty_printer.print_help(\"Fortran compilers found:\")\n pretty_printer = FancyGetopt(compilers_na)\n pretty_printer.print_help(\"Compilers available for this \"\n \"platform, but not found:\")\n if compilers_ni:\n pretty_printer = FancyGetopt(compilers_ni)\n pretty_printer.print_help(\"Compilers not available on this platform:\")\n print(\"For compiler details, run 'config_fc --verbose' setup command.\")\n\n\ndef dummy_fortran_file():\n fo, name = make_temp_file(suffix='.f')\n fo.write(\" subroutine dummy()\\n end\\n\")\n fo.close()\n return name[:-2]\n\n\n_has_f_header = re.compile(r'-\\*-\\s*fortran\\s*-\\*-', re.I).search\n_has_f90_header = re.compile(r'-\\*-\\s*f90\\s*-\\*-', re.I).search\n_has_fix_header = re.compile(r'-\\*-\\s*fix\\s*-\\*-', re.I).search\n_free_f90_start = re.compile(r'[^c*!]\\s*[^\\s\\d\\t]', re.I).match\n\ndef is_free_format(file):\n \"\"\"Check if file is in free format Fortran.\"\"\"\n # f90 allows both fixed and free format, assuming fixed unless\n # signs of free format are detected.\n result = 0\n with open(file, encoding='latin1') as f:\n line = f.readline()\n n = 10000 # the number of non-comment lines to scan for hints\n if _has_f_header(line) or _has_fix_header(line):\n n = 0\n elif _has_f90_header(line):\n n = 0\n result = 1\n while n>0 and line:\n line = line.rstrip()\n if line and line[0]!='!':\n n -= 1\n if (line[0]!='\\t' and _free_f90_start(line[:5])) or line[-1:]=='&':\n result = 1\n break\n line = f.readline()\n return result\n\ndef has_f90_header(src):\n with open(src, encoding='latin1') as f:\n line = f.readline()\n return _has_f90_header(line) or _has_fix_header(line)\n\n_f77flags_re = re.compile(r'(c|)f77flags\\s*\\(\\s*(?P\\w+)\\s*\\)\\s*=\\s*(?P.*)', re.I)\ndef get_f77flags(src):\n \"\"\"\n Search the first 20 lines of fortran 77 code for line pattern\n `CF77FLAGS()=`\n Return a dictionary {:}.\n \"\"\"\n flags = {}\n with open(src, encoding='latin1') as f:\n i = 0\n for line in f:\n i += 1\n if i>20: break\n m = _f77flags_re.match(line)\n if not m: continue\n fcname = m.group('fcname').strip()\n fflags = m.group('fflags').strip()\n flags[fcname] = split_quoted(fflags)\n return flags\n\n# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags\n\nif __name__ == '__main__':\n show_fcompilers()\n", "output": ["flaglist", "has_f90_header", "available_fcompilers_for_platform", "is_sequence_of_strings", "_find_existing_fcompiler", "is_free_format", "new_fcompiler", "str2bool", "show_fcompilers", "dummy_fortran_file", "get_f77flags", "get_default_fcompiler", "load_all_fcompiler_classes", "FCompiler", "CompilerNotFound"], "metadata": {"file_path": "numpy-main/numpy/distutils/fcompiler/__init__.py", "file_length": 12271, "symbol_dict": [{"symbol": "str2bool", "type": "mannual_defined_function", "byte_location": 1513, "location": 452}, {"symbol": "show_fcompilers", "type": "mannual_defined_function", "byte_location": 36107, "location": 10758}, {"symbol": "get_default_fcompiler", "type": "mannual_defined_function", "byte_location": 33783, "location": 10144}, {"symbol": "new_fcompiler", "type": "mannual_defined_function", "byte_location": 34674, "location": 10353}, {"symbol": "dummy_fortran_file", "type": "mannual_defined_function", "byte_location": 38428, "location": 11448}, {"symbol": "available_fcompilers_for_platform", "type": "mannual_defined_function", "byte_location": 33191, "location": 9981}, {"symbol": "get_f77flags", "type": "mannual_defined_function", "byte_location": 39911, "location": 12026}, {"symbol": "is_free_format", "type": "mannual_defined_function", "byte_location": 38848, "location": 11650}, {"symbol": "flaglist", "type": "mannual_defined_function", "byte_location": 1416, "location": 417}, {"symbol": "load_all_fcompiler_classes", "type": "mannual_defined_function", "byte_location": 30133, "location": 9180}, {"symbol": "has_f90_header", "type": "mannual_defined_function", "byte_location": 39657, "location": 11905}, {"symbol": "is_sequence_of_strings", "type": "mannual_defined_function", "byte_location": 1599, "location": 486}, {"symbol": "_find_existing_fcompiler", "type": "mannual_defined_function", "byte_location": 31395, "location": 9528}, {"symbol": "CompilerNotFound", "type": "mannual_defined_class", "byte_location": 1371, "location": 405}, {"symbol": "FCompiler", "type": "mannual_defined_class", "byte_location": 1682, "location": 515}]}} {"input": "import pytest\n\nimport numpy as np\nfrom numpy._core.multiarray import _vec_string\nfrom numpy.testing import (\n assert_, assert_equal, assert_array_equal, assert_raises,\n assert_raises_regex\n )\n\nkw_unicode_true = {'unicode': True} # make 2to3 work properly\nkw_unicode_false = {'unicode': False}\n\nclass TestBasic:\n def test_from_object_array(self):\n A = np.array([['abc', 2],\n ['long ', '0123456789']], dtype='O')\n B = np.char.array(A)\n assert_equal(B.dtype.itemsize, 10)\n assert_array_equal(B, [[b'abc', b'2'],\n [b'long', b'0123456789']])\n\n def test_from_object_array_unicode(self):\n A = np.array([['abc', 'Sigma \\u03a3'],\n ['long ', '0123456789']], dtype='O')\n assert_raises(ValueError, np.char.array, (A,))\n B = np.char.array(A, **kw_unicode_true)\n assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)\n assert_array_equal(B, [['abc', 'Sigma \\u03a3'],\n ['long', '0123456789']])\n\n def test_from_string_array(self):\n A = np.array([[b'abc', b'foo'],\n [b'long ', b'0123456789']])\n assert_equal(A.dtype.type, np.bytes_)\n B = np.char.array(A)\n assert_array_equal(B, A)\n assert_equal(B.dtype, A.dtype)\n assert_equal(B.shape, A.shape)\n B[0, 0] = 'changed'\n assert_(B[0, 0] != A[0, 0])\n C = np.char.asarray(A)\n assert_array_equal(C, A)\n assert_equal(C.dtype, A.dtype)\n C[0, 0] = 'changed again'\n assert_(C[0, 0] != B[0, 0])\n assert_(C[0, 0] == A[0, 0])\n\n def test_from_unicode_array(self):\n A = np.array([['abc', 'Sigma \\u03a3'],\n ['long ', '0123456789']])\n assert_equal(A.dtype.type, np.str_)\n B = np.char.array(A)\n assert_array_equal(B, A)\n assert_equal(B.dtype, A.dtype)\n assert_equal(B.shape, A.shape)\n B = np.char.array(A, **kw_unicode_true)\n assert_array_equal(B, A)\n assert_equal(B.dtype, A.dtype)\n assert_equal(B.shape, A.shape)\n\n def fail():\n np.char.array(A, **kw_unicode_false)\n\n assert_raises(UnicodeEncodeError, fail)\n\n def test_unicode_upconvert(self):\n A = np.char.array(['abc'])\n B = np.char.array(['\\u03a3'])\n assert_(issubclass((A + B).dtype.type, np.str_))\n\n def test_from_string(self):\n A = np.char.array(b'abc')\n assert_equal(len(A), 1)\n assert_equal(len(A[0]), 3)\n assert_(issubclass(A.dtype.type, np.bytes_))\n\n def test_from_unicode(self):\n A = np.char.array('\\u03a3')\n assert_equal(len(A), 1)\n assert_equal(len(A[0]), 1)\n assert_equal(A.itemsize, 4)\n assert_(issubclass(A.dtype.type, np.str_))\n\nclass TestVecString:\n def test_non_existent_method(self):\n\n def fail():\n _vec_string('a', np.bytes_, 'bogus')\n\n assert_raises(AttributeError, fail)\n\n def test_non_string_array(self):\n\n def fail():\n _vec_string(1, np.bytes_, 'strip')\n\n assert_raises(TypeError, fail)\n\n def test_invalid_args_tuple(self):\n\n def fail():\n _vec_string(['a'], np.bytes_, 'strip', 1)\n\n assert_raises(TypeError, fail)\n\n def test_invalid_type_descr(self):\n\n def fail():\n _vec_string(['a'], 'BOGUS', 'strip')\n\n assert_raises(TypeError, fail)\n\n def test_invalid_function_args(self):\n\n def fail():\n _vec_string(['a'], np.bytes_, 'strip', (1,))\n\n assert_raises(TypeError, fail)\n\n def test_invalid_result_type(self):\n\n def fail():\n _vec_string(['a'], np.int_, 'strip')\n\n assert_raises(TypeError, fail)\n\n def test_broadcast_error(self):\n\n def fail():\n _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))\n\n assert_raises(ValueError, fail)\n\n\nclass TestWhitespace:\n def setup_method(self):\n self.A = np.array([['abc ', '123 '],\n ['789 ', 'xyz ']]).view(np.char.chararray)\n self.B = np.array([['abc', '123'],\n ['789', 'xyz']]).view(np.char.chararray)\n\n def test1(self):\n assert_(np.all(self.A == self.B))\n assert_(np.all(self.A >= self.B))\n assert_(np.all(self.A <= self.B))\n assert_(not np.any(self.A > self.B))\n assert_(not np.any(self.A < self.B))\n assert_(not np.any(self.A != self.B))\n\nclass TestChar:\n def setup_method(self):\n self.A = np.array('abc1', dtype='c').view(np.char.chararray)\n\n def test_it(self):\n assert_equal(self.A.shape, (4,))\n assert_equal(self.A.upper()[:2].tobytes(), b'AB')\n\nclass TestComparisons:\n def setup_method(self):\n self.A = np.array([['abc', 'abcc', '123'],\n ['789', 'abc', 'xyz']]).view(np.char.chararray)\n self.B = np.array([['efg', 'efg', '123 '],\n ['051', 'efgg', 'tuv']]).view(np.char.chararray)\n\n def test_not_equal(self):\n assert_array_equal((self.A != self.B),\n [[True, True, False], [True, True, True]])\n\n def test_equal(self):\n assert_array_equal((self.A == self.B),\n [[False, False, True], [False, False, False]])\n\n def test_greater_equal(self):\n assert_array_equal((self.A >= self.B),\n [[False, False, True], [True, False, True]])\n\n def test_less_equal(self):\n assert_array_equal((self.A <= self.B),\n [[True, True, True], [False, True, False]])\n\n def test_greater(self):\n assert_array_equal((self.A > self.B),\n [[False, False, False], [True, False, True]])\n\n def test_less(self):\n assert_array_equal((self.A < self.B),\n [[True, True, False], [False, True, False]])\n\n def test_type(self):\n out1 = np.char.equal(self.A, self.B)\n out2 = np.char.equal('a', 'a')\n assert_(isinstance(out1, np.ndarray))\n assert_(isinstance(out2, np.ndarray))\n\nclass TestComparisonsMixed1(TestComparisons):\n \"\"\"Ticket #1276\"\"\"\n\n def setup_method(self):\n TestComparisons.setup_method(self)\n self.B = np.array(\n [['efg', 'efg', '123 '],\n ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray)\n\nclass TestComparisonsMixed2(TestComparisons):\n \"\"\"Ticket #1276\"\"\"\n\n def setup_method(self):\n TestComparisons.setup_method(self)\n self.A = np.array(\n [['abc', 'abcc', '123'],\n ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray)\n\nclass TestInformation:\n def setup_method(self):\n self.A = np.array([[' abc ', ''],\n ['12345', 'MixedCase'],\n ['123 \\t 345 \\0 ', 'UPPER']]) \\\n .view(np.char.chararray)\n self.B = np.array([[' \\u03a3 ', ''],\n ['12345', 'MixedCase'],\n ['123 \\t 345 \\0 ', 'UPPER']]) \\\n .view(np.char.chararray)\n # Array with longer strings, > MEMCHR_CUT_OFF in code.\n self.C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ',\n '01234567890123456789012345'])\n .view(np.char.chararray))\n\n def test_len(self):\n assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))\n assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])\n assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])\n\n def test_count(self):\n assert_(issubclass(self.A.count('').dtype.type, np.integer))\n assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])\n assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])\n # Python doesn't seem to like counting NULL characters\n # assert_array_equal(self.A.count('\\0'), [[0, 0], [0, 0], [1, 0]])\n assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])\n assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])\n assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])\n # assert_array_equal(self.B.count('\\0'), [[0, 0], [0, 0], [1, 0]])\n\n def test_endswith(self):\n assert_(issubclass(self.A.endswith('').dtype.type, np.bool))\n assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])\n assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])\n\n def fail():\n self.A.endswith('3', 'fdjk')\n\n assert_raises(TypeError, fail)\n\n @pytest.mark.parametrize(\n \"dtype, encode\",\n [(\"U\", str),\n (\"S\", lambda x: x.encode('ascii')),\n ])\n def test_find(self, dtype, encode):\n A = self.A.astype(dtype)\n assert_(issubclass(A.find(encode('a')).dtype.type, np.integer))\n assert_array_equal(A.find(encode('a')),\n [[1, -1], [-1, 6], [-1, -1]])\n assert_array_equal(A.find(encode('3')),\n [[-1, -1], [2, -1], [2, -1]])\n assert_array_equal(A.find(encode('a'), 0, 2),\n [[1, -1], [-1, -1], [-1, -1]])\n assert_array_equal(A.find([encode('1'), encode('P')]),\n [[-1, -1], [0, -1], [0, 1]])\n C = self.C.astype(dtype)\n assert_array_equal(C.find(encode('M')), [12, -1])\n\n def test_index(self):\n\n def fail():\n self.A.index('a')\n\n assert_raises(ValueError, fail)\n assert_(np.char.index('abcba', 'b') == 1)\n assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))\n\n def test_isalnum(self):\n assert_(issubclass(self.A.isalnum().dtype.type, np.bool))\n assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])\n\n def test_isalpha(self):\n assert_(issubclass(self.A.isalpha().dtype.type, np.bool))\n assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])\n\n def test_isdigit(self):\n assert_(issubclass(self.A.isdigit().dtype.type, np.bool))\n assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])\n\n def test_islower(self):\n assert_(issubclass(self.A.islower().dtype.type, np.bool))\n assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])\n\n def test_isspace(self):\n assert_(issubclass(self.A.isspace().dtype.type, np.bool))\n assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])\n\n def test_istitle(self):\n assert_(issubclass(self.A.istitle().dtype.type, np.bool))\n assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])\n\n def test_isupper(self):\n assert_(issubclass(self.A.isupper().dtype.type, np.bool))\n assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])\n\n def test_rfind(self):\n assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))\n assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])\n assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])\n assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])\n assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])\n\n def test_rindex(self):\n\n def fail():\n self.A.rindex('a')\n\n assert_raises(ValueError, fail)\n assert_(np.char.rindex('abcba', 'b') == 3)\n assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))\n\n def test_startswith(self):\n assert_(issubclass(self.A.startswith('').dtype.type, np.bool))\n assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])\n assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])\n\n def fail():\n self.A.startswith('3', 'fdjk')\n\n assert_raises(TypeError, fail)\n\n\nclass TestMethods:\n def setup_method(self):\n self.A = np.array([[' abc ', ''],\n ['12345', 'MixedCase'],\n ['123 \\t 345 \\0 ', 'UPPER']],\n dtype='S').view(np.char.chararray)\n self.B = np.array([[' \\u03a3 ', ''],\n ['12345', 'MixedCase'],\n ['123 \\t 345 \\0 ', 'UPPER']]).view(\n np.char.chararray)\n\n def test_capitalize(self):\n tgt = [[b' abc ', b''],\n [b'12345', b'Mixedcase'],\n [b'123 \\t 345 \\0 ', b'Upper']]\n assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_))\n assert_array_equal(self.A.capitalize(), tgt)\n\n tgt = [[' \\u03c3 ', ''],\n ['12345', 'Mixedcase'],\n ['123 \\t 345 \\0 ', 'Upper']]\n assert_(issubclass(self.B.capitalize().dtype.type, np.str_))\n assert_array_equal(self.B.capitalize(), tgt)\n\n def test_center(self):\n assert_(issubclass(self.A.center(10).dtype.type, np.bytes_))\n C = self.A.center([10, 20])\n assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])\n\n C = self.A.center(20, b'#')\n assert_(np.all(C.startswith(b'#')))\n assert_(np.all(C.endswith(b'#')))\n\n C = np.char.center(b'FOO', [[10, 20], [15, 8]])\n tgt = [[b' FOO ', b' FOO '],\n [b' FOO ', b' FOO ']]\n assert_(issubclass(C.dtype.type, np.bytes_))\n assert_array_equal(C, tgt)\n\n def test_decode(self):\n A = np.char.array([b'\\\\u03a3'])\n assert_(A.decode('unicode-escape')[0] == '\\u03a3')\n\n def test_encode(self):\n B = self.B.encode('unicode_escape')\n assert_(B[0][0] == str(' \\\\u03a3 ').encode('latin1'))\n\n def test_expandtabs(self):\n T = self.A.expandtabs()\n assert_(T[2, 0] == b'123 345 \\0')\n\n def test_join(self):\n # NOTE: list(b'123') == [49, 50, 51]\n # so that b','.join(b'123') results to an error on Py3\n A0 = self.A.decode('ascii')\n\n A = np.char.join([',', '#'], A0)\n assert_(issubclass(A.dtype.type, np.str_))\n tgt = np.array([[' ,a,b,c, ', ''],\n ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],\n ['1,2,3, ,\\t, ,3,4,5, ,\\x00, ', 'U#P#P#E#R']])\n assert_array_equal(np.char.join([',', '#'], A0), tgt)\n\n def test_ljust(self):\n assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_))\n\n C = self.A.ljust([10, 20])\n assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])\n\n C = self.A.ljust(20, b'#')\n assert_array_equal(C.startswith(b'#'), [\n [False, True], [False, False], [False, False]])\n assert_(np.all(C.endswith(b'#')))\n\n C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])\n tgt = [[b'FOO ', b'FOO '],\n [b'FOO ', b'FOO ']]\n assert_(issubclass(C.dtype.type, np.bytes_))\n assert_array_equal(C, tgt)\n\n def test_lower(self):\n tgt = [[b' abc ', b''],\n [b'12345', b'mixedcase'],\n [b'123 \\t 345 \\0 ', b'upper']]\n assert_(issubclass(self.A.lower().dtype.type, np.bytes_))\n assert_array_equal(self.A.lower(), tgt)\n\n tgt = [[' \\u03c3 ', ''],\n ['12345', 'mixedcase'],\n ['123 \\t 345 \\0 ', 'upper']]\n assert_(issubclass(self.B.lower().dtype.type, np.str_))\n assert_array_equal(self.B.lower(), tgt)\n\n def test_lstrip(self):\n tgt = [[b'abc ', b''],\n [b'12345', b'MixedCase'],\n [b'123 \\t 345 \\0 ', b'UPPER']]\n assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_))\n assert_array_equal(self.A.lstrip(), tgt)\n\n tgt = [[b' abc', b''],\n [b'2345', b'ixedCase'],\n [b'23 \\t 345 \\x00', b'UPPER']]\n assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)\n\n tgt = [['\\u03a3 ', ''],\n ['12345', 'MixedCase'],\n ['123 \\t 345 \\0 ', 'UPPER']]\n assert_(issubclass(self.B.lstrip().dtype.type, np.str_))\n assert_array_equal(self.B.lstrip(), tgt)\n\n def test_partition(self):\n P = self.A.partition([b'3', b'M'])\n tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],\n [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],\n [(b'12', b'3', b' \\t 345 \\0 '), (b'UPPER', b'', b'')]]\n assert_(issubclass(P.dtype.type, np.bytes_))\n assert_array_equal(P, tgt)\n\n def test_replace(self):\n R = self.A.replace([b'3', b'a'],\n [b'##########', b'@'])\n tgt = [[b' abc ', b''],\n [b'12##########45', b'MixedC@se'],\n [b'12########## \\t ##########45 \\x00 ', b'UPPER']]\n assert_(issubclass(R.dtype.type, np.bytes_))\n assert_array_equal(R, tgt)\n # Test special cases that should just return the input array,\n # since replacements are not possible or do nothing.\n S1 = self.A.replace(b'A very long byte string, longer than A', b'')\n assert_array_equal(S1, self.A)\n S2 = self.A.replace(b'', b'')\n assert_array_equal(S2, self.A)\n S3 = self.A.replace(b'3', b'3')\n assert_array_equal(S3, self.A)\n S4 = self.A.replace(b'3', b'', count=0)\n assert_array_equal(S4, self.A)\n\n def test_replace_count_and_size(self):\n a = np.array(['0123456789' * i for i in range(4)]\n ).view(np.char.chararray)\n r1 = a.replace('5', 'ABCDE')\n assert r1.dtype.itemsize == (3*10 + 3*4) * 4\n assert_array_equal(r1, np.array(['01234ABCDE6789' * i\n for i in range(4)]))\n r2 = a.replace('5', 'ABCDE', count=1)\n assert r2.dtype.itemsize == (3*10 + 4) * 4\n r3 = a.replace('5', 'ABCDE', count=0)\n assert r3.dtype.itemsize == a.dtype.itemsize\n assert_array_equal(r3, a)\n # Negative values mean to replace all.\n r4 = a.replace('5', 'ABCDE', count=-1)\n assert r4.dtype.itemsize == (3*10 + 3*4) * 4\n assert_array_equal(r4, r1)\n # We can do count on an element-by-element basis.\n r5 = a.replace('5', 'ABCDE', count=[-1, -1, -1, 1])\n assert r5.dtype.itemsize == (3*10 + 4) * 4\n assert_array_equal(r5, np.array(\n ['01234ABCDE6789' * i for i in range(3)]\n + ['01234ABCDE6789' + '0123456789' * 2]))\n\n def test_replace_broadcasting(self):\n a = np.array('0,0,0').view(np.char.chararray)\n r1 = a.replace('0', '1', count=np.arange(3))\n assert r1.dtype == a.dtype\n assert_array_equal(r1, np.array(['0,0,0', '1,0,0', '1,1,0']))\n r2 = a.replace('0', [['1'], ['2']], count=np.arange(1, 4))\n assert_array_equal(r2, np.array([['1,0,0', '1,1,0', '1,1,1'],\n ['2,0,0', '2,2,0', '2,2,2']]))\n r3 = a.replace(['0', '0,0', '0,0,0'], 'X')\n assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X']))\n\n def test_rjust(self):\n assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_))\n\n C = self.A.rjust([10, 20])\n assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])\n\n C = self.A.rjust(20, b'#')\n assert_(np.all(C.startswith(b'#')))\n assert_array_equal(C.endswith(b'#'),\n [[False, True], [False, False], [False, False]])\n\n C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])\n tgt = [[b' FOO', b' FOO'],\n [b' FOO', b' FOO']]\n assert_(issubclass(C.dtype.type, np.bytes_))\n assert_array_equal(C, tgt)\n\n def test_rpartition(self):\n P = self.A.rpartition([b'3', b'M'])\n tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],\n [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],\n [(b'123 \\t ', b'3', b'45 \\0 '), (b'', b'', b'UPPER')]]\n assert_(issubclass(P.dtype.type, np.bytes_))\n assert_array_equal(P, tgt)\n\n def test_rsplit(self):\n A = self.A.rsplit(b'3')\n tgt = [[[b' abc '], [b'']],\n [[b'12', b'45'], [b'MixedCase']],\n [[b'12', b' \\t ', b'45 \\x00 '], [b'UPPER']]]\n assert_(issubclass(A.dtype.type, np.object_))\n assert_equal(A.tolist(), tgt)\n\n def test_rstrip(self):\n assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_))\n\n tgt = [[b' abc', b''],\n [b'12345', b'MixedCase'],\n [b'123 \\t 345', b'UPPER']]\n assert_array_equal(self.A.rstrip(), tgt)\n\n tgt = [[b' abc ', b''],\n [b'1234', b'MixedCase'],\n [b'123 \\t 345 \\x00', b'UPP']\n ]\n assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)\n\n tgt = [[' \\u03a3', ''],\n ['12345', 'MixedCase'],\n ['123 \\t 345', 'UPPER']]\n assert_(issubclass(self.B.rstrip().dtype.type, np.str_))\n assert_array_equal(self.B.rstrip(), tgt)\n\n def test_strip(self):\n tgt = [[b'abc', b''],\n [b'12345', b'MixedCase'],\n [b'123 \\t 345', b'UPPER']]\n assert_(issubclass(self.A.strip().dtype.type, np.bytes_))\n assert_array_equal(self.A.strip(), tgt)\n\n tgt = [[b' abc ', b''],\n [b'234', b'ixedCas'],\n [b'23 \\t 345 \\x00', b'UPP']]\n assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)\n\n tgt = [['\\u03a3', ''],\n ['12345', 'MixedCase'],\n ['123 \\t 345', 'UPPER']]\n assert_(issubclass(self.B.strip().dtype.type, np.str_))\n assert_array_equal(self.B.strip(), tgt)\n\n def test_split(self):\n A = self.A.split(b'3')\n tgt = [\n [[b' abc '], [b'']],\n [[b'12', b'45'], [b'MixedCase']],\n [[b'12', b' \\t ', b'45 \\x00 '], [b'UPPER']]]\n assert_(issubclass(A.dtype.type, np.object_))\n assert_equal(A.tolist(), tgt)\n\n def test_splitlines(self):\n A = np.char.array(['abc\\nfds\\nwer']).splitlines()\n assert_(issubclass(A.dtype.type, np.object_))\n assert_(A.shape == (1,))\n assert_(len(A[0]) == 3)\n\n def test_swapcase(self):\n tgt = [[b' ABC ', b''],\n [b'12345', b'mIXEDcASE'],\n [b'123 \\t 345 \\0 ', b'upper']]\n assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_))\n assert_array_equal(self.A.swapcase(), tgt)\n\n tgt = [[' \\u03c3 ', ''],\n ['12345', 'mIXEDcASE'],\n ['123 \\t 345 \\0 ', 'upper']]\n assert_(issubclass(self.B.swapcase().dtype.type, np.str_))\n assert_array_equal(self.B.swapcase(), tgt)\n\n def test_title(self):\n tgt = [[b' Abc ', b''],\n [b'12345', b'Mixedcase'],\n [b'123 \\t 345 \\0 ', b'Upper']]\n assert_(issubclass(self.A.title().dtype.type, np.bytes_))\n assert_array_equal(self.A.title(), tgt)\n\n tgt = [[' \\u03a3 ', ''],\n ['12345', 'Mixedcase'],\n ['123 \\t 345 \\0 ', 'Upper']]\n assert_(issubclass(self.B.title().dtype.type, np.str_))\n assert_array_equal(self.B.title(), tgt)\n\n def test_upper(self):\n tgt = [[b' ABC ', b''],\n [b'12345', b'MIXEDCASE'],\n [b'123 \\t 345 \\0 ', b'UPPER']]\n assert_(issubclass(self.A.upper().dtype.type, np.bytes_))\n assert_array_equal(self.A.upper(), tgt)\n\n tgt = [[' \\u03a3 ', ''],\n ['12345', 'MIXEDCASE'],\n ['123 \\t 345 \\0 ', 'UPPER']]\n assert_(issubclass(self.B.upper().dtype.type, np.str_))\n assert_array_equal(self.B.upper(), tgt)\n\n def test_isnumeric(self):\n\n def fail():\n self.A.isnumeric()\n\n assert_raises(TypeError, fail)\n assert_(issubclass(self.B.isnumeric().dtype.type, np.bool))\n assert_array_equal(self.B.isnumeric(), [\n [False, False], [True, False], [False, False]])\n\n def test_isdecimal(self):\n\n def fail():\n self.A.isdecimal()\n\n assert_raises(TypeError, fail)\n assert_(issubclass(self.B.isdecimal().dtype.type, np.bool))\n assert_array_equal(self.B.isdecimal(), [\n [False, False], [True, False], [False, False]])\n\n\nclass TestOperations:\n def setup_method(self):\n self.A = np.array([['abc', '123'],\n ['789', 'xyz']]).view(np.char.chararray)\n self.B = np.array([['efg', '456'],\n ['051', 'tuv']]).view(np.char.chararray)\n\n def test_add(self):\n AB = np.array([['abcefg', '123456'],\n ['789051', 'xyztuv']]).view(np.char.chararray)\n assert_array_equal(AB, (self.A + self.B))\n assert_(len((self.A + self.B)[0][0]) == 6)\n\n def test_radd(self):\n QA = np.array([['qabc', 'q123'],\n ['q789', 'qxyz']]).view(np.char.chararray)\n assert_array_equal(QA, ('q' + self.A))\n\n def test_mul(self):\n A = self.A\n for r in (2, 3, 5, 7, 197):\n Ar = np.array([[A[0, 0]*r, A[0, 1]*r],\n [A[1, 0]*r, A[1, 1]*r]]).view(np.char.chararray)\n\n assert_array_equal(Ar, (self.A * r))\n\n for ob in [object(), 'qrs']:\n with assert_raises_regex(ValueError,\n 'Can only multiply by integers'):\n A*ob\n\n def test_rmul(self):\n A = self.A\n for r in (2, 3, 5, 7, 197):\n Ar = np.array([[A[0, 0]*r, A[0, 1]*r],\n [A[1, 0]*r, A[1, 1]*r]]).view(np.char.chararray)\n assert_array_equal(Ar, (r * self.A))\n\n for ob in [object(), 'qrs']:\n with assert_raises_regex(ValueError,\n 'Can only multiply by integers'):\n ob * A\n\n def test_mod(self):\n \"\"\"Ticket #856\"\"\"\n F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.char.chararray)\n C = np.array([[3, 7], [19, 1]], dtype=np.int64)\n FC = np.array([['3', '7.000000'],\n ['19', 'np.int64(1)']]).view(np.char.chararray)\n assert_array_equal(FC, F % C)\n\n A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.char.chararray)\n A1 = np.array([['1.000', '1'],\n ['1', repr(np.array(1)[()])]]).view(np.char.chararray)\n assert_array_equal(A1, (A % 1))\n\n A2 = np.array([['1.000', '2'],\n ['3', repr(np.array(4)[()])]]).view(np.char.chararray)\n assert_array_equal(A2, (A % [[1, 2], [3, 4]]))\n\n def test_rmod(self):\n assert_((\"%s\" % self.A) == str(self.A))\n assert_((\"%r\" % self.A) == repr(self.A))\n\n for ob in [42, object()]:\n with assert_raises_regex(\n TypeError, \"unsupported operand type.* and 'chararray'\"):\n ob % self.A\n\n def test_slice(self):\n \"\"\"Regression test for https://github.com/numpy/numpy/issues/5982\"\"\"\n\n arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],\n dtype='S4').view(np.char.chararray)\n sl1 = arr[:]\n assert_array_equal(sl1, arr)\n assert_(sl1.base is arr)\n assert_(sl1.base.base is arr.base)\n\n sl2 = arr[:, :]\n assert_array_equal(sl2, arr)\n assert_(sl2.base is arr)\n assert_(sl2.base.base is arr.base)\n\n assert_(arr[0, 0] == b'abc')\n\n\nclass TestMethodsEmptyArray:\n def setup_method(self):\n self.U = np.array([], dtype='U')\n self.S = np.array([], dtype='S')\n\n def test_encode(self):\n res = np.char.encode(self.U)\n assert_array_equal(res, [])\n assert_(res.dtype.char == 'S')\n\n def test_decode(self):\n res = np.char.decode(self.S)\n assert_array_equal(res, [])\n assert_(res.dtype.char == 'U')\n\n def test_decode_with_reshape(self):\n res = np.char.decode(self.S.reshape((1, 0, 1)))\n assert_(res.shape == (1, 0, 1))\n\n\nclass TestMethodsScalarValues:\n def test_mod(self):\n A = np.array([[' abc ', ''],\n ['12345', 'MixedCase'],\n ['123 \\t 345 \\0 ', 'UPPER']], dtype='S')\n tgt = [[b'123 abc ', b'123'],\n [b'12312345', b'123MixedCase'],\n [b'123123 \\t 345 \\0 ', b'123UPPER']]\n assert_array_equal(np.char.mod(b\"123%s\", A), tgt)\n\n def test_decode(self):\n bytestring = b'\\x81\\xc1\\x81\\xc1\\x81\\xc1'\n assert_equal(np.char.decode(bytestring, encoding='cp037'),\n 'aAaAaA')\n\n def test_encode(self):\n unicode = 'aAaAaA'\n assert_equal(np.char.encode(unicode, encoding='cp037'),\n b'\\x81\\xc1\\x81\\xc1\\x81\\xc1')\n\n def test_expandtabs(self):\n s = \"\\tone level of indentation\\n\\t\\ttwo levels of indentation\"\n assert_equal(\n np.char.expandtabs(s, tabsize=2),\n \" one level of indentation\\n two levels of indentation\"\n )\n\n def test_join(self):\n seps = np.array(['-', '_'])\n assert_array_equal(np.char.join(seps, 'hello'),\n ['h-e-l-l-o', 'h_e_l_l_o'])\n\n def test_partition(self):\n assert_equal(np.char.partition('This string', ' '),\n ['This', ' ', 'string'])\n\n def test_rpartition(self):\n assert_equal(np.char.rpartition('This string here', ' '),\n ['This string', ' ', 'here'])\n\n def test_replace(self):\n assert_equal(np.char.replace('Python is good', 'good', 'great'),\n 'Python is great')\n\n\ndef test_empty_indexing():\n \"\"\"Regression test for ticket 1948.\"\"\"\n # Check that indexing a chararray with an empty list/array returns an\n # empty chararray instead of a chararray with a single empty string in it.\n s = np.char.chararray((4,))\n assert_(s[[]].size == 0)\n", "output": ["test_empty_indexing", "TestInformation", "TestMethods", "TestOperations", "TestMethodsEmptyArray", "TestChar", "TestMethodsScalarValues", "TestBasic", "TestComparisons", "TestWhitespace", "TestComparisonsMixed2", "TestVecString", "TestComparisonsMixed1"], "metadata": {"file_path": "numpy-main/numpy/_core/tests/test_defchararray.py", "file_length": 11609, "symbol_dict": [{"symbol": "test_empty_indexing", "type": "mannual_defined_function", "byte_location": 29840, "location": 11522}, {"symbol": "TestComparisons", "type": "mannual_defined_class", "byte_location": 4768, "location": 1785}, {"symbol": "TestComparisonsMixed2", "type": "mannual_defined_class", "byte_location": 6446, "location": 2349}, {"symbol": "TestWhitespace", "type": "mannual_defined_class", "byte_location": 3969, "location": 1490}, {"symbol": "TestInformation", "type": "mannual_defined_class", "byte_location": 6722, "location": 2453}, {"symbol": "TestChar", "type": "mannual_defined_class", "byte_location": 4531, "location": 1696}, {"symbol": "TestMethods", "type": "mannual_defined_class", "byte_location": 12123, "location": 4534}, {"symbol": "TestVecString", "type": "mannual_defined_class", "byte_location": 2850, "location": 1112}, {"symbol": "TestOperations", "type": "mannual_defined_class", "byte_location": 24538, "location": 9550}, {"symbol": "TestBasic", "type": "mannual_defined_class", "byte_location": 304, "location": 100}, {"symbol": "TestMethodsEmptyArray", "type": "mannual_defined_class", "byte_location": 27685, "location": 10736}, {"symbol": "TestComparisonsMixed1", "type": "mannual_defined_class", "byte_location": 6168, "location": 2240}, {"symbol": "TestMethodsScalarValues", "type": "mannual_defined_class", "byte_location": 28243, "location": 10941}]}} {"input": "\"\"\"\nCopyright 1999 -- 2011 Pearu Peterson all rights reserved.\nCopyright 2011 -- present NumPy Developers.\nPermission to use, modify, and distribute this software is given under the\nterms of the NumPy License.\n\nNO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.\n\"\"\"\nfrom . import __version__\nf2py_version = __version__.version\n\nimport copy\nimport re\nimport os\nfrom .crackfortran import markoutercomma\nfrom . import cb_rules\nfrom ._isocbind import iso_c_binding_map, isoc_c2pycode_map, iso_c2py_map\n\n# The environment provided by auxfuncs.py is needed for some calls to eval.\n# As the needed functions cannot be determined by static inspection of the\n# code, it is safest to use import * pending a major refactoring of f2py.\nfrom .auxfuncs import *\n\n__all__ = [\n 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign',\n 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map',\n 'cb_sign2map', 'cb_routsign2map', 'common_sign2map', 'process_f2cmap_dict'\n]\n\n\ndepargs = []\nlcb_map = {}\nlcb2_map = {}\n# forced casting: mainly caused by the fact that Python or Numeric\n# C/APIs do not support the corresponding C types.\nc2py_map = {'double': 'float',\n 'float': 'float', # forced casting\n 'long_double': 'float', # forced casting\n 'char': 'int', # forced casting\n 'signed_char': 'int', # forced casting\n 'unsigned_char': 'int', # forced casting\n 'short': 'int', # forced casting\n 'unsigned_short': 'int', # forced casting\n 'int': 'int', # forced casting\n 'long': 'int',\n 'long_long': 'long',\n 'unsigned': 'int', # forced casting\n 'complex_float': 'complex', # forced casting\n 'complex_double': 'complex',\n 'complex_long_double': 'complex', # forced casting\n 'string': 'string',\n 'character': 'bytes',\n }\n\nc2capi_map = {'double': 'NPY_DOUBLE',\n 'float': 'NPY_FLOAT',\n 'long_double': 'NPY_LONGDOUBLE',\n 'char': 'NPY_BYTE',\n 'unsigned_char': 'NPY_UBYTE',\n 'signed_char': 'NPY_BYTE',\n 'short': 'NPY_SHORT',\n 'unsigned_short': 'NPY_USHORT',\n 'int': 'NPY_INT',\n 'unsigned': 'NPY_UINT',\n 'long': 'NPY_LONG',\n 'unsigned_long': 'NPY_ULONG',\n 'long_long': 'NPY_LONGLONG',\n 'unsigned_long_long': 'NPY_ULONGLONG',\n 'complex_float': 'NPY_CFLOAT',\n 'complex_double': 'NPY_CDOUBLE',\n 'complex_long_double': 'NPY_CDOUBLE',\n 'string': 'NPY_STRING',\n 'character': 'NPY_STRING'}\n\nc2pycode_map = {'double': 'd',\n 'float': 'f',\n 'long_double': 'g',\n 'char': 'b',\n 'unsigned_char': 'B',\n 'signed_char': 'b',\n 'short': 'h',\n 'unsigned_short': 'H',\n 'int': 'i',\n 'unsigned': 'I',\n 'long': 'l',\n 'unsigned_long': 'L',\n 'long_long': 'q',\n 'unsigned_long_long': 'Q',\n 'complex_float': 'F',\n 'complex_double': 'D',\n 'complex_long_double': 'G',\n 'string': 'S',\n 'character': 'c'}\n\n# https://docs.python.org/3/c-api/arg.html#building-values\nc2buildvalue_map = {'double': 'd',\n 'float': 'f',\n 'char': 'b',\n 'signed_char': 'b',\n 'short': 'h',\n 'int': 'i',\n 'long': 'l',\n 'long_long': 'L',\n 'complex_float': 'N',\n 'complex_double': 'N',\n 'complex_long_double': 'N',\n 'string': 'y',\n 'character': 'c'}\n\nf2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double',\n '12': 'long_double', '16': 'long_double'},\n 'integer': {'': 'int', '1': 'signed_char', '2': 'short',\n '4': 'int', '8': 'long_long',\n '-1': 'unsigned_char', '-2': 'unsigned_short',\n '-4': 'unsigned', '-8': 'unsigned_long_long'},\n 'complex': {'': 'complex_float', '8': 'complex_float',\n '16': 'complex_double', '24': 'complex_long_double',\n '32': 'complex_long_double'},\n 'complexkind': {'': 'complex_float', '4': 'complex_float',\n '8': 'complex_double', '12': 'complex_long_double',\n '16': 'complex_long_double'},\n 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int',\n '8': 'long_long'},\n 'double complex': {'': 'complex_double'},\n 'double precision': {'': 'double'},\n 'byte': {'': 'char'},\n }\n\n# Add ISO_C handling\nc2pycode_map.update(isoc_c2pycode_map)\nc2py_map.update(iso_c2py_map)\nf2cmap_all, _ = process_f2cmap_dict(f2cmap_all, iso_c_binding_map, c2py_map)\n# End ISO_C handling\nf2cmap_default = copy.deepcopy(f2cmap_all)\n\nf2cmap_mapped = []\n\ndef load_f2cmap_file(f2cmap_file):\n global f2cmap_all, f2cmap_mapped\n\n f2cmap_all = copy.deepcopy(f2cmap_default)\n\n if f2cmap_file is None:\n # Default value\n f2cmap_file = '.f2py_f2cmap'\n if not os.path.isfile(f2cmap_file):\n return\n\n # User defined additions to f2cmap_all.\n # f2cmap_file must contain a dictionary of dictionaries, only. For\n # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is\n # interpreted as C 'float'. This feature is useful for F90/95 users if\n # they use PARAMETERS in type specifications.\n try:\n outmess('Reading f2cmap from {!r} ...\\n'.format(f2cmap_file))\n with open(f2cmap_file) as f:\n d = eval(f.read().lower(), {}, {})\n f2cmap_all, f2cmap_mapped = process_f2cmap_dict(f2cmap_all, d, c2py_map, True)\n outmess('Successfully applied user defined f2cmap changes\\n')\n except Exception as msg:\n errmess('Failed to apply user defined f2cmap changes: %s. Skipping.\\n' % (msg))\n\n\ncformat_map = {'double': '%g',\n 'float': '%g',\n 'long_double': '%Lg',\n 'char': '%d',\n 'signed_char': '%d',\n 'unsigned_char': '%hhu',\n 'short': '%hd',\n 'unsigned_short': '%hu',\n 'int': '%d',\n 'unsigned': '%u',\n 'long': '%ld',\n 'unsigned_long': '%lu',\n 'long_long': '%ld',\n 'complex_float': '(%g,%g)',\n 'complex_double': '(%g,%g)',\n 'complex_long_double': '(%Lg,%Lg)',\n 'string': '\\\\\"%s\\\\\"',\n 'character': \"'%c'\",\n }\n\n# Auxiliary functions\n\n\ndef getctype(var):\n \"\"\"\n Determines C type\n \"\"\"\n ctype = 'void'\n if isfunction(var):\n if 'result' in var:\n a = var['result']\n else:\n a = var['name']\n if a in var['vars']:\n return getctype(var['vars'][a])\n else:\n errmess('getctype: function %s has no return value?!\\n' % a)\n elif issubroutine(var):\n return ctype\n elif ischaracter_or_characterarray(var):\n return 'character'\n elif isstring_or_stringarray(var):\n return 'string'\n elif 'typespec' in var and var['typespec'].lower() in f2cmap_all:\n typespec = var['typespec'].lower()\n f2cmap = f2cmap_all[typespec]\n ctype = f2cmap[''] # default type\n if 'kindselector' in var:\n if '*' in var['kindselector']:\n try:\n ctype = f2cmap[var['kindselector']['*']]\n except KeyError:\n errmess('getctype: \"%s %s %s\" not supported.\\n' %\n (var['typespec'], '*', var['kindselector']['*']))\n elif 'kind' in var['kindselector']:\n if typespec + 'kind' in f2cmap_all:\n f2cmap = f2cmap_all[typespec + 'kind']\n try:\n ctype = f2cmap[var['kindselector']['kind']]\n except KeyError:\n if typespec in f2cmap_all:\n f2cmap = f2cmap_all[typespec]\n try:\n ctype = f2cmap[str(var['kindselector']['kind'])]\n except KeyError:\n errmess('getctype: \"%s(kind=%s)\" is mapped to C \"%s\" (to override define dict(%s = dict(%s=\"\")) in %s/.f2py_f2cmap file).\\n'\n % (typespec, var['kindselector']['kind'], ctype,\n typespec, var['kindselector']['kind'], os.getcwd()))\n else:\n if not isexternal(var):\n errmess('getctype: No C-type found in \"%s\", assuming void.\\n' % var)\n return ctype\n\n\ndef f2cexpr(expr):\n \"\"\"Rewrite Fortran expression as f2py supported C expression.\n\n Due to the lack of a proper expression parser in f2py, this\n function uses a heuristic approach that assumes that Fortran\n arithmetic expressions are valid C arithmetic expressions when\n mapping Fortran function calls to the corresponding C function/CPP\n macros calls.\n\n \"\"\"\n # TODO: support Fortran `len` function with optional kind parameter\n expr = re.sub(r'\\blen\\b', 'f2py_slen', expr)\n return expr\n\n\ndef getstrlength(var):\n if isstringfunction(var):\n if 'result' in var:\n a = var['result']\n else:\n a = var['name']\n if a in var['vars']:\n return getstrlength(var['vars'][a])\n else:\n errmess('getstrlength: function %s has no return value?!\\n' % a)\n if not isstring(var):\n errmess(\n 'getstrlength: expected a signature of a string but got: %s\\n' % (repr(var)))\n len = '1'\n if 'charselector' in var:\n a = var['charselector']\n if '*' in a:\n len = a['*']\n elif 'len' in a:\n len = f2cexpr(a['len'])\n if re.match(r'\\(\\s*(\\*|:)\\s*\\)', len) or re.match(r'(\\*|:)', len):\n if isintent_hide(var):\n errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\\n' % (\n repr(var)))\n len = '-1'\n return len\n\n\ndef getarrdims(a, var, verbose=0):\n ret = {}\n if isstring(var) and not isarray(var):\n ret['size'] = getstrlength(var)\n ret['rank'] = '0'\n ret['dims'] = ''\n elif isscalar(var):\n ret['size'] = '1'\n ret['rank'] = '0'\n ret['dims'] = ''\n elif isarray(var):\n dim = copy.copy(var['dimension'])\n ret['size'] = '*'.join(dim)\n try:\n ret['size'] = repr(eval(ret['size']))\n except Exception:\n pass\n ret['dims'] = ','.join(dim)\n ret['rank'] = repr(len(dim))\n ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1]\n for i in range(len(dim)): # solve dim for dependencies\n v = []\n if dim[i] in depargs:\n v = [dim[i]]\n else:\n for va in depargs:\n if re.match(r'.*?\\b%s\\b.*' % va, dim[i]):\n v.append(va)\n for va in v:\n if depargs.index(va) > depargs.index(a):\n dim[i] = '*'\n break\n ret['setdims'], i = '', -1\n for d in dim:\n i = i + 1\n if d not in ['*', ':', '(*)', '(:)']:\n ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % (\n ret['setdims'], i, d)\n if ret['setdims']:\n ret['setdims'] = ret['setdims'][:-1]\n ret['cbsetdims'], i = '', -1\n for d in var['dimension']:\n i = i + 1\n if d not in ['*', ':', '(*)', '(:)']:\n ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % (\n ret['cbsetdims'], i, d)\n elif isintent_in(var):\n outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\\n'\n % (d))\n ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % (\n ret['cbsetdims'], i, 0)\n elif verbose:\n errmess(\n 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\\n' % (repr(a), repr(d)))\n if ret['cbsetdims']:\n ret['cbsetdims'] = ret['cbsetdims'][:-1]\n# if not isintent_c(var):\n# var['dimension'].reverse()\n return ret\n\n\ndef getpydocsign(a, var):\n global lcb_map\n if isfunction(var):\n if 'result' in var:\n af = var['result']\n else:\n af = var['name']\n if af in var['vars']:\n return getpydocsign(af, var['vars'][af])\n else:\n errmess('getctype: function %s has no return value?!\\n' % af)\n return '', ''\n sig, sigout = a, a\n opt = ''\n if isintent_in(var):\n opt = 'input'\n elif isintent_inout(var):\n opt = 'in/output'\n out_a = a\n if isintent_out(var):\n for k in var['intent']:\n if k[:4] == 'out=':\n out_a = k[4:]\n break\n init = ''\n ctype = getctype(var)\n\n if hasinitvalue(var):\n init, showinit = getinit(a, var)\n init = ', optional\\\\n Default: %s' % showinit\n if isscalar(var):\n if isintent_inout(var):\n sig = '%s : %s rank-0 array(%s,\\'%s\\')%s' % (a, opt, c2py_map[ctype],\n c2pycode_map[ctype], init)\n else:\n sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init)\n sigout = '%s : %s' % (out_a, c2py_map[ctype])\n elif isstring(var):\n if isintent_inout(var):\n sig = '%s : %s rank-0 array(string(len=%s),\\'c\\')%s' % (\n a, opt, getstrlength(var), init)\n else:\n sig = '%s : %s string(len=%s)%s' % (\n a, opt, getstrlength(var), init)\n sigout = '%s : string(len=%s)' % (out_a, getstrlength(var))\n elif isarray(var):\n dim = var['dimension']\n rank = repr(len(dim))\n sig = '%s : %s rank-%s array(\\'%s\\') with bounds (%s)%s' % (a, opt, rank,\n c2pycode_map[\n ctype],\n ','.join(dim), init)\n if a == out_a:\n sigout = '%s : rank-%s array(\\'%s\\') with bounds (%s)'\\\n % (a, rank, c2pycode_map[ctype], ','.join(dim))\n else:\n sigout = '%s : rank-%s array(\\'%s\\') with bounds (%s) and %s storage'\\\n % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a)\n elif isexternal(var):\n ua = ''\n if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]:\n ua = lcb2_map[lcb_map[a]]['argname']\n if not ua == a:\n ua = ' => %s' % ua\n else:\n ua = ''\n sig = '%s : call-back function%s' % (a, ua)\n sigout = sig\n else:\n errmess(\n 'getpydocsign: Could not resolve docsignature for \"%s\".\\n' % a)\n return sig, sigout\n\n\ndef getarrdocsign(a, var):\n ctype = getctype(var)\n if isstring(var) and (not isarray(var)):\n sig = '%s : rank-0 array(string(len=%s),\\'c\\')' % (a,\n getstrlength(var))\n elif isscalar(var):\n sig = '%s : rank-0 array(%s,\\'%s\\')' % (a, c2py_map[ctype],\n c2pycode_map[ctype],)\n elif isarray(var):\n dim = var['dimension']\n rank = repr(len(dim))\n sig = '%s : rank-%s array(\\'%s\\') with bounds (%s)' % (a, rank,\n c2pycode_map[\n ctype],\n ','.join(dim))\n return sig\n\n\ndef getinit(a, var):\n if isstring(var):\n init, showinit = '\"\"', \"''\"\n else:\n init, showinit = '', ''\n if hasinitvalue(var):\n init = var['=']\n showinit = init\n if iscomplex(var) or iscomplexarray(var):\n ret = {}\n\n try:\n v = var[\"=\"]\n if ',' in v:\n ret['init.r'], ret['init.i'] = markoutercomma(\n v[1:-1]).split('@,@')\n else:\n v = eval(v, {}, {})\n ret['init.r'], ret['init.i'] = str(v.real), str(v.imag)\n except Exception:\n raise ValueError(\n 'getinit: expected complex number `(r,i)\\' but got `%s\\' as initial value of %r.' % (init, a))\n if isarray(var):\n init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % (\n ret['init.r'], ret['init.i'])\n elif isstring(var):\n if not init:\n init, showinit = '\"\"', \"''\"\n if init[0] == \"'\":\n init = '\"%s\"' % (init[1:-1].replace('\"', '\\\\\"'))\n if init[0] == '\"':\n showinit = \"'%s'\" % (init[1:-1])\n return init, showinit\n\n\ndef get_elsize(var):\n if isstring(var) or isstringarray(var):\n elsize = getstrlength(var)\n # override with user-specified length when available:\n elsize = var['charselector'].get('f2py_len', elsize)\n return elsize\n if ischaracter(var) or ischaracterarray(var):\n return '1'\n # for numerical types, PyArray_New* functions ignore specified\n # elsize, so we just return 1 and let elsize be determined at\n # runtime, see fortranobject.c\n return '1'\n\n\ndef sign2map(a, var):\n \"\"\"\n varname,ctype,atype\n init,init.r,init.i,pytype\n vardebuginfo,vardebugshowvalue,varshowvalue\n varrformat\n\n intent\n \"\"\"\n out_a = a\n if isintent_out(var):\n for k in var['intent']:\n if k[:4] == 'out=':\n out_a = k[4:]\n break\n ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)}\n intent_flags = []\n for f, s in isintent_dict.items():\n if f(var):\n intent_flags.append('F2PY_%s' % s)\n if intent_flags:\n # TODO: Evaluate intent_flags here.\n ret['intent'] = '|'.join(intent_flags)\n else:\n ret['intent'] = 'F2PY_INTENT_IN'\n if isarray(var):\n ret['varrformat'] = 'N'\n elif ret['ctype'] in c2buildvalue_map:\n ret['varrformat'] = c2buildvalue_map[ret['ctype']]\n else:\n ret['varrformat'] = 'O'\n ret['init'], ret['showinit'] = getinit(a, var)\n if hasinitvalue(var) and iscomplex(var) and not isarray(var):\n ret['init.r'], ret['init.i'] = markoutercomma(\n ret['init'][1:-1]).split('@,@')\n if isexternal(var):\n ret['cbnamekey'] = a\n if a in lcb_map:\n ret['cbname'] = lcb_map[a]\n ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs']\n ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs']\n ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr']\n ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr']\n else:\n ret['cbname'] = a\n errmess('sign2map: Confused: external %s is not in lcb_map%s.\\n' % (\n a, list(lcb_map.keys())))\n if isstring(var):\n ret['length'] = getstrlength(var)\n if isarray(var):\n ret = dictappend(ret, getarrdims(a, var))\n dim = copy.copy(var['dimension'])\n if ret['ctype'] in c2capi_map:\n ret['atype'] = c2capi_map[ret['ctype']]\n ret['elsize'] = get_elsize(var)\n # Debug info\n if debugcapi(var):\n il = [isintent_in, 'input', isintent_out, 'output',\n isintent_inout, 'inoutput', isrequired, 'required',\n isoptional, 'optional', isintent_hide, 'hidden',\n iscomplex, 'complex scalar',\n l_and(isscalar, l_not(iscomplex)), 'scalar',\n isstring, 'string', isarray, 'array',\n iscomplexarray, 'complex array', isstringarray, 'string array',\n iscomplexfunction, 'complex function',\n l_and(isfunction, l_not(iscomplexfunction)), 'function',\n isexternal, 'callback',\n isintent_callback, 'callback',\n isintent_aux, 'auxiliary',\n ]\n rl = []\n for i in range(0, len(il), 2):\n if il[i](var):\n rl.append(il[i + 1])\n if isstring(var):\n rl.append('slen(%s)=%s' % (a, ret['length']))\n if isarray(var):\n ddim = ','.join(\n map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim))\n rl.append('dims(%s)' % ddim)\n if isexternal(var):\n ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % (\n a, ret['cbname'], ','.join(rl))\n else:\n ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % (\n ret['ctype'], a, ret['showinit'], ','.join(rl))\n if isscalar(var):\n if ret['ctype'] in cformat_map:\n ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % (\n a, cformat_map[ret['ctype']])\n if isstring(var):\n ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\\\\"%%s\\\\\"' % (\n a, a)\n if isexternal(var):\n ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a)\n if ret['ctype'] in cformat_map:\n ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']])\n ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])\n if isstring(var):\n ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\\\\"%%s\\\\\"' % (a, a)\n ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)\n if hasnote(var):\n ret['note'] = var['note']\n return ret\n\n\ndef routsign2map(rout):\n \"\"\"\n name,NAME,begintitle,endtitle\n rname,ctype,rformat\n routdebugshowvalue\n \"\"\"\n global lcb_map\n name = rout['name']\n fname = getfortranname(rout)\n ret = {'name': name,\n 'texname': name.replace('_', '\\\\_'),\n 'name_lower': name.lower(),\n 'NAME': name.upper(),\n 'begintitle': gentitle(name),\n 'endtitle': gentitle('end of %s' % name),\n 'fortranname': fname,\n 'FORTRANNAME': fname.upper(),\n 'callstatement': getcallstatement(rout) or '',\n 'usercode': getusercode(rout) or '',\n 'usercode1': getusercode1(rout) or '',\n }\n if '_' in fname:\n ret['F_FUNC'] = 'F_FUNC_US'\n else:\n ret['F_FUNC'] = 'F_FUNC'\n if '_' in name:\n ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US'\n else:\n ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC'\n lcb_map = {}\n if 'use' in rout:\n for u in rout['use'].keys():\n if u in cb_rules.cb_map:\n for un in cb_rules.cb_map[u]:\n ln = un[0]\n if 'map' in rout['use'][u]:\n for k in rout['use'][u]['map'].keys():\n if rout['use'][u]['map'][k] == un[0]:\n ln = k\n break\n lcb_map[ln] = un[1]\n elif 'externals' in rout and rout['externals']:\n errmess('routsign2map: Confused: function %s has externals %s but no \"use\" statement.\\n' % (\n ret['name'], repr(rout['externals'])))\n ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or ''\n if isfunction(rout):\n if 'result' in rout:\n a = rout['result']\n else:\n a = rout['name']\n ret['rname'] = a\n ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout)\n ret['ctype'] = getctype(rout['vars'][a])\n if hasresultnote(rout):\n ret['resultnote'] = rout['vars'][a]['note']\n rout['vars'][a]['note'] = ['See elsewhere.']\n if ret['ctype'] in c2buildvalue_map:\n ret['rformat'] = c2buildvalue_map[ret['ctype']]\n else:\n ret['rformat'] = 'O'\n errmess('routsign2map: no c2buildvalue key for type %s\\n' %\n (repr(ret['ctype'])))\n if debugcapi(rout):\n if ret['ctype'] in cformat_map:\n ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % (\n a, cformat_map[ret['ctype']])\n if isstringfunction(rout):\n ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\\\\"%%s\\\\\"' % (\n a, a)\n if isstringfunction(rout):\n ret['rlength'] = getstrlength(rout['vars'][a])\n if ret['rlength'] == '-1':\n errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\\n' % (\n repr(rout['name'])))\n ret['rlength'] = '10'\n if hasnote(rout):\n ret['note'] = rout['note']\n rout['note'] = ['See elsewhere.']\n return ret\n\n\ndef modsign2map(m):\n \"\"\"\n modulename\n \"\"\"\n if ismodule(m):\n ret = {'f90modulename': m['name'],\n 'F90MODULENAME': m['name'].upper(),\n 'texf90modulename': m['name'].replace('_', '\\\\_')}\n else:\n ret = {'modulename': m['name'],\n 'MODULENAME': m['name'].upper(),\n 'texmodulename': m['name'].replace('_', '\\\\_')}\n ret['restdoc'] = getrestdoc(m) or []\n if hasnote(m):\n ret['note'] = m['note']\n ret['usercode'] = getusercode(m) or ''\n ret['usercode1'] = getusercode1(m) or ''\n if m['body']:\n ret['interface_usercode'] = getusercode(m['body'][0]) or ''\n else:\n ret['interface_usercode'] = ''\n ret['pymethoddef'] = getpymethoddef(m) or ''\n if 'coutput' in m:\n ret['coutput'] = m['coutput']\n if 'f2py_wrapper_output' in m:\n ret['f2py_wrapper_output'] = m['f2py_wrapper_output']\n return ret\n\n\ndef cb_sign2map(a, var, index=None):\n ret = {'varname': a}\n ret['varname_i'] = ret['varname']\n ret['ctype'] = getctype(var)\n if ret['ctype'] in c2capi_map:\n ret['atype'] = c2capi_map[ret['ctype']]\n ret['elsize'] = get_elsize(var)\n if ret['ctype'] in cformat_map:\n ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])\n if isarray(var):\n ret = dictappend(ret, getarrdims(a, var))\n ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)\n if hasnote(var):\n ret['note'] = var['note']\n var['note'] = ['See elsewhere.']\n return ret\n\n\ndef cb_routsign2map(rout, um):\n \"\"\"\n name,begintitle,endtitle,argname\n ctype,rctype,maxnofargs,nofoptargs,returncptr\n \"\"\"\n ret = {'name': 'cb_%s_in_%s' % (rout['name'], um),\n 'returncptr': ''}\n if isintent_callback(rout):\n if '_' in rout['name']:\n F_FUNC = 'F_FUNC_US'\n else:\n F_FUNC = 'F_FUNC'\n ret['callbackname'] = '%s(%s,%s)' \\\n % (F_FUNC,\n rout['name'].lower(),\n rout['name'].upper(),\n )\n ret['static'] = 'extern'\n else:\n ret['callbackname'] = ret['name']\n ret['static'] = 'static'\n ret['argname'] = rout['name']\n ret['begintitle'] = gentitle(ret['name'])\n ret['endtitle'] = gentitle('end of %s' % ret['name'])\n ret['ctype'] = getctype(rout)\n ret['rctype'] = 'void'\n if ret['ctype'] == 'string':\n ret['rctype'] = 'void'\n else:\n ret['rctype'] = ret['ctype']\n if ret['rctype'] != 'void':\n if iscomplexfunction(rout):\n ret['returncptr'] = \"\"\"\n#ifdef F2PY_CB_RETURNCOMPLEX\nreturn_value=\n#endif\n\"\"\"\n else:\n ret['returncptr'] = 'return_value='\n if ret['ctype'] in cformat_map:\n ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])\n if isstringfunction(rout):\n ret['strlength'] = getstrlength(rout)\n if isfunction(rout):\n if 'result' in rout:\n a = rout['result']\n else:\n a = rout['name']\n if hasnote(rout['vars'][a]):\n ret['note'] = rout['vars'][a]['note']\n rout['vars'][a]['note'] = ['See elsewhere.']\n ret['rname'] = a\n ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout)\n if iscomplexfunction(rout):\n ret['rctype'] = \"\"\"\n#ifdef F2PY_CB_RETURNCOMPLEX\n#ctype#\n#else\nvoid\n#endif\n\"\"\"\n else:\n if hasnote(rout):\n ret['note'] = rout['note']\n rout['note'] = ['See elsewhere.']\n nofargs = 0\n nofoptargs = 0\n if 'args' in rout and 'vars' in rout:\n for a in rout['args']:\n var = rout['vars'][a]\n if l_or(isintent_in, isintent_inout)(var):\n nofargs = nofargs + 1\n if isoptional(var):\n nofoptargs = nofoptargs + 1\n ret['maxnofargs'] = repr(nofargs)\n ret['nofoptargs'] = repr(nofoptargs)\n if hasnote(rout) and isfunction(rout) and 'result' in rout:\n ret['routnote'] = rout['note']\n rout['note'] = ['See elsewhere.']\n return ret\n\n\ndef common_sign2map(a, var): # obsolute\n ret = {'varname': a, 'ctype': getctype(var)}\n if isstringarray(var):\n ret['ctype'] = 'char'\n if ret['ctype'] in c2capi_map:\n ret['atype'] = c2capi_map[ret['ctype']]\n ret['elsize'] = get_elsize(var)\n if ret['ctype'] in cformat_map:\n ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])\n if isarray(var):\n ret = dictappend(ret, getarrdims(a, var))\n elif isstring(var):\n ret['size'] = getstrlength(var)\n ret['rank'] = '1'\n ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)\n if hasnote(var):\n ret['note'] = var['note']\n var['note'] = ['See elsewhere.']\n # for strings this returns 0-rank but actually is 1-rank\n ret['arrdocstr'] = getarrdocsign(a, var)\n return ret\n", "output": ["getinit", "common_sign2map", "getpydocsign", "getstrlength", "cb_sign2map", "getctype", "getarrdocsign", "routsign2map", "sign2map", "cb_routsign2map", "modsign2map", "f2cexpr", "get_elsize", "getarrdims", "load_f2cmap_file"], "metadata": {"file_path": "numpy-main/numpy/f2py/capi_maps.py", "file_length": 10015, "symbol_dict": [{"symbol": "cb_sign2map", "type": "mannual_defined_function", "byte_location": 26544, "location": 8607}, {"symbol": "getarrdocsign", "type": "mannual_defined_function", "byte_location": 15778, "location": 5019}, {"symbol": "getctype", "type": "mannual_defined_function", "byte_location": 7275, "location": 2247}, {"symbol": "get_elsize", "type": "mannual_defined_function", "byte_location": 17795, "location": 5631}, {"symbol": "getpydocsign", "type": "mannual_defined_function", "byte_location": 13018, "location": 4081}, {"symbol": "cb_routsign2map", "type": "mannual_defined_function", "byte_location": 27153, "location": 8840}, {"symbol": "load_f2cmap_file", "type": "mannual_defined_function", "byte_location": 5556, "location": 1669}, {"symbol": "sign2map", "type": "mannual_defined_function", "byte_location": 18294, "location": 5780}, {"symbol": "modsign2map", "type": "mannual_defined_function", "byte_location": 25613, "location": 8268}, {"symbol": "common_sign2map", "type": "mannual_defined_function", "byte_location": 29746, "location": 9716}, {"symbol": "getstrlength", "type": "mannual_defined_function", "byte_location": 9857, "location": 3012}, {"symbol": "getinit", "type": "mannual_defined_function", "byte_location": 16581, "location": 5246}, {"symbol": "f2cexpr", "type": "mannual_defined_function", "byte_location": 9338, "location": 2872}, {"symbol": "routsign2map", "type": "mannual_defined_function", "byte_location": 22440, "location": 7237}, {"symbol": "getarrdims", "type": "mannual_defined_function", "byte_location": 10765, "location": 3315}]}} {"input": "import functools\nimport sys\nimport math\nimport warnings\n\nimport numpy as np\nfrom .._utils import set_module\nimport numpy._core.numeric as _nx\nfrom numpy._core.numeric import ScalarType, array\nfrom numpy._core.numerictypes import issubdtype\n\nimport numpy.matrixlib as matrixlib\nfrom numpy._core.multiarray import ravel_multi_index, unravel_index\nfrom numpy._core import overrides, linspace\nfrom numpy.lib.stride_tricks import as_strided\nfrom numpy.lib._function_base_impl import diff\n\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\n__all__ = [\n 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',\n 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',\n 'diag_indices', 'diag_indices_from'\n]\n\n\ndef _ix__dispatcher(*args):\n return args\n\n\n@array_function_dispatch(_ix__dispatcher)\ndef ix_(*args):\n \"\"\"\n Construct an open mesh from multiple sequences.\n\n This function takes N 1-D sequences and returns N outputs with N\n dimensions each, such that the shape is 1 in all but one dimension\n and the dimension with the non-unit shape value cycles through all\n N dimensions.\n\n Using `ix_` one can quickly construct index arrays that will index\n the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array\n ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.\n\n Parameters\n ----------\n args : 1-D sequences\n Each sequence should be of integer or boolean type.\n Boolean sequences will be interpreted as boolean masks for the\n corresponding dimension (equivalent to passing in\n ``np.nonzero(boolean_sequence)``).\n\n Returns\n -------\n out : tuple of ndarrays\n N arrays with N dimensions each, with N the number of input\n sequences. Together these arrays form an open mesh.\n\n See Also\n --------\n ogrid, mgrid, meshgrid\n\n Examples\n --------\n >>> a = np.arange(10).reshape(2, 5)\n >>> a\n array([[0, 1, 2, 3, 4],\n [5, 6, 7, 8, 9]])\n >>> ixgrid = np.ix_([0, 1], [2, 4])\n >>> ixgrid\n (array([[0],\n [1]]), array([[2, 4]]))\n >>> ixgrid[0].shape, ixgrid[1].shape\n ((2, 1), (1, 2))\n >>> a[ixgrid]\n array([[2, 4],\n [7, 9]])\n\n >>> ixgrid = np.ix_([True, True], [2, 4])\n >>> a[ixgrid]\n array([[2, 4],\n [7, 9]])\n >>> ixgrid = np.ix_([True, True], [False, False, True, False, True])\n >>> a[ixgrid]\n array([[2, 4],\n [7, 9]])\n\n \"\"\"\n out = []\n nd = len(args)\n for k, new in enumerate(args):\n if not isinstance(new, _nx.ndarray):\n new = np.asarray(new)\n if new.size == 0:\n # Explicitly type empty arrays to avoid float default\n new = new.astype(_nx.intp)\n if new.ndim != 1:\n raise ValueError(\"Cross index must be 1 dimensional\")\n if issubdtype(new.dtype, _nx.bool):\n new, = new.nonzero()\n new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))\n out.append(new)\n return tuple(out)\n\n\nclass nd_grid:\n \"\"\"\n Construct a multi-dimensional \"meshgrid\".\n\n ``grid = nd_grid()`` creates an instance which will return a mesh-grid\n when indexed. The dimension and number of the output arrays are equal\n to the number of indexing dimensions. If the step length is not a\n complex number, then the stop is not inclusive.\n\n However, if the step length is a **complex number** (e.g. 5j), then the\n integer part of its magnitude is interpreted as specifying the\n number of points to create between the start and stop values, where\n the stop value **is inclusive**.\n\n If instantiated with an argument of ``sparse=True``, the mesh-grid is\n open (or not fleshed out) so that only one-dimension of each returned\n argument is greater than 1.\n\n Parameters\n ----------\n sparse : bool, optional\n Whether the grid is sparse or not. Default is False.\n\n Notes\n -----\n Two instances of `nd_grid` are made available in the NumPy namespace,\n `mgrid` and `ogrid`, approximately defined as::\n\n mgrid = nd_grid(sparse=False)\n ogrid = nd_grid(sparse=True)\n\n Users should use these pre-defined instances instead of using `nd_grid`\n directly.\n \"\"\"\n\n def __init__(self, sparse=False):\n self.sparse = sparse\n\n def __getitem__(self, key):\n try:\n size = []\n # Mimic the behavior of `np.arange` and use a data type\n # which is at least as large as `np.int_`\n num_list = [0]\n for k in range(len(key)):\n step = key[k].step\n start = key[k].start\n stop = key[k].stop\n if start is None:\n start = 0\n if step is None:\n step = 1\n if isinstance(step, (_nx.complexfloating, complex)):\n step = abs(step)\n size.append(int(step))\n else:\n size.append(\n int(math.ceil((stop - start) / (step*1.0))))\n num_list += [start, stop, step]\n typ = _nx.result_type(*num_list)\n if self.sparse:\n nn = [_nx.arange(_x, dtype=_t)\n for _x, _t in zip(size, (typ,)*len(size))]\n else:\n nn = _nx.indices(size, typ)\n for k, kk in enumerate(key):\n step = kk.step\n start = kk.start\n if start is None:\n start = 0\n if step is None:\n step = 1\n if isinstance(step, (_nx.complexfloating, complex)):\n step = int(abs(step))\n if step != 1:\n step = (kk.stop - start) / float(step - 1)\n nn[k] = (nn[k]*step+start)\n if self.sparse:\n slobj = [_nx.newaxis]*len(size)\n for k in range(len(size)):\n slobj[k] = slice(None, None)\n nn[k] = nn[k][tuple(slobj)]\n slobj[k] = _nx.newaxis\n return tuple(nn) # ogrid -> tuple of arrays\n return nn # mgrid -> ndarray\n except (IndexError, TypeError):\n step = key.step\n stop = key.stop\n start = key.start\n if start is None:\n start = 0\n if isinstance(step, (_nx.complexfloating, complex)):\n # Prevent the (potential) creation of integer arrays\n step_float = abs(step)\n step = length = int(step_float)\n if step != 1:\n step = (key.stop-start)/float(step-1)\n typ = _nx.result_type(start, stop, step_float)\n return _nx.arange(0, length, 1, dtype=typ)*step + start\n else:\n return _nx.arange(start, stop, step)\n\n\nclass MGridClass(nd_grid):\n \"\"\"\n An instance which returns a dense multi-dimensional \"meshgrid\".\n\n An instance which returns a dense (or fleshed out) mesh-grid\n when indexed, so that each returned argument has the same shape.\n The dimensions and number of the output arrays are equal to the\n number of indexing dimensions. If the step length is not a complex\n number, then the stop is not inclusive.\n\n However, if the step length is a **complex number** (e.g. 5j), then\n the integer part of its magnitude is interpreted as specifying the\n number of points to create between the start and stop values, where\n the stop value **is inclusive**.\n\n Returns\n -------\n mesh-grid : ndarray\n A single array, containing a set of `ndarray`\\\\ s all of the same\n dimensions. stacked along the first axis.\n\n See Also\n --------\n ogrid : like `mgrid` but returns open (not fleshed out) mesh grids\n meshgrid: return coordinate matrices from coordinate vectors\n r_ : array concatenator\n :ref:`how-to-partition`\n\n Examples\n --------\n >>> np.mgrid[0:5, 0:5]\n array([[[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4]],\n [[0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4]]])\n >>> np.mgrid[-1:1:5j]\n array([-1. , -0.5, 0. , 0.5, 1. ])\n\n >>> np.mgrid[0:4].shape\n (4,)\n >>> np.mgrid[0:4, 0:5].shape\n (2, 4, 5)\n >>> np.mgrid[0:4, 0:5, 0:6].shape\n (3, 4, 5, 6)\n\n \"\"\"\n\n def __init__(self):\n super().__init__(sparse=False)\n\n\nmgrid = MGridClass()\n\n\nclass OGridClass(nd_grid):\n \"\"\"\n An instance which returns an open multi-dimensional \"meshgrid\".\n\n An instance which returns an open (i.e. not fleshed out) mesh-grid\n when indexed, so that only one dimension of each returned array is\n greater than 1. The dimension and number of the output arrays are\n equal to the number of indexing dimensions. If the step length is\n not a complex number, then the stop is not inclusive.\n\n However, if the step length is a **complex number** (e.g. 5j), then\n the integer part of its magnitude is interpreted as specifying the\n number of points to create between the start and stop values, where\n the stop value **is inclusive**.\n\n Returns\n -------\n mesh-grid : ndarray or tuple of ndarrays\n If the input is a single slice, returns an array.\n If the input is multiple slices, returns a tuple of arrays, with\n only one dimension not equal to 1.\n\n See Also\n --------\n mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids\n meshgrid: return coordinate matrices from coordinate vectors\n r_ : array concatenator\n :ref:`how-to-partition`\n\n Examples\n --------\n >>> from numpy import ogrid\n >>> ogrid[-1:1:5j]\n array([-1. , -0.5, 0. , 0.5, 1. ])\n >>> ogrid[0:5, 0:5]\n (array([[0],\n [1],\n [2],\n [3],\n [4]]),\n array([[0, 1, 2, 3, 4]]))\n\n \"\"\"\n\n def __init__(self):\n super().__init__(sparse=True)\n\n\nogrid = OGridClass()\n\n\nclass AxisConcatenator:\n \"\"\"\n Translates slice objects to concatenation along an axis.\n\n For detailed documentation on usage, see `r_`.\n \"\"\"\n # allow ma.mr_ to override this\n concatenate = staticmethod(_nx.concatenate)\n makemat = staticmethod(matrixlib.matrix)\n\n def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):\n self.axis = axis\n self.matrix = matrix\n self.trans1d = trans1d\n self.ndmin = ndmin\n\n def __getitem__(self, key):\n # handle matrix builder syntax\n if isinstance(key, str):\n frame = sys._getframe().f_back\n mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)\n return mymat\n\n if not isinstance(key, tuple):\n key = (key,)\n\n # copy attributes, since they can be overridden in the first argument\n trans1d = self.trans1d\n ndmin = self.ndmin\n matrix = self.matrix\n axis = self.axis\n\n objs = []\n # dtypes or scalars for weak scalar handling in result_type\n result_type_objs = []\n\n for k, item in enumerate(key):\n scalar = False\n if isinstance(item, slice):\n step = item.step\n start = item.start\n stop = item.stop\n if start is None:\n start = 0\n if step is None:\n step = 1\n if isinstance(step, (_nx.complexfloating, complex)):\n size = int(abs(step))\n newobj = linspace(start, stop, num=size)\n else:\n newobj = _nx.arange(start, stop, step)\n if ndmin > 1:\n newobj = array(newobj, copy=False, ndmin=ndmin)\n if trans1d != -1:\n newobj = newobj.swapaxes(-1, trans1d)\n elif isinstance(item, str):\n if k != 0:\n raise ValueError(\"special directives must be the \"\n \"first entry.\")\n if item in ('r', 'c'):\n matrix = True\n col = (item == 'c')\n continue\n if ',' in item:\n vec = item.split(',')\n try:\n axis, ndmin = [int(x) for x in vec[:2]]\n if len(vec) == 3:\n trans1d = int(vec[2])\n continue\n except Exception as e:\n raise ValueError(\n \"unknown special directive {!r}\".format(item)\n ) from e\n try:\n axis = int(item)\n continue\n except (ValueError, TypeError) as e:\n raise ValueError(\"unknown special directive\") from e\n elif type(item) in ScalarType:\n scalar = True\n newobj = item\n else:\n item_ndim = np.ndim(item)\n newobj = array(item, copy=False, subok=True, ndmin=ndmin)\n if trans1d != -1 and item_ndim < ndmin:\n k2 = ndmin - item_ndim\n k1 = trans1d\n if k1 < 0:\n k1 += k2 + 1\n defaxes = list(range(ndmin))\n axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]\n newobj = newobj.transpose(axes)\n\n objs.append(newobj)\n if scalar:\n result_type_objs.append(item)\n else:\n result_type_objs.append(newobj.dtype)\n\n # Ensure that scalars won't up-cast unless warranted, for 0, drops\n # through to error in concatenate.\n if len(result_type_objs) != 0:\n final_dtype = _nx.result_type(*result_type_objs)\n # concatenate could do cast, but that can be overridden:\n objs = [array(obj, copy=False, subok=True,\n ndmin=ndmin, dtype=final_dtype) for obj in objs]\n\n res = self.concatenate(tuple(objs), axis=axis)\n\n if matrix:\n oldndim = res.ndim\n res = self.makemat(res)\n if oldndim == 1 and col:\n res = res.T\n return res\n\n def __len__(self):\n return 0\n\n# separate classes are used here instead of just making r_ = concatentor(0),\n# etc. because otherwise we couldn't get the doc string to come out right\n# in help(r_)\n\n\nclass RClass(AxisConcatenator):\n \"\"\"\n Translates slice objects to concatenation along the first axis.\n\n This is a simple way to build up arrays quickly. There are two use cases.\n\n 1. If the index expression contains comma separated arrays, then stack\n them along their first axis.\n 2. If the index expression contains slice notation or scalars then create\n a 1-D array with a range indicated by the slice notation.\n\n If slice notation is used, the syntax ``start:stop:step`` is equivalent\n to ``np.arange(start, stop, step)`` inside of the brackets. However, if\n ``step`` is an imaginary number (i.e. 100j) then its integer portion is\n interpreted as a number-of-points desired and the start and stop are\n inclusive. In other words ``start:stop:stepj`` is interpreted as\n ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.\n After expansion of slice notation, all comma separated sequences are\n concatenated together.\n\n Optional character strings placed as the first element of the index\n expression can be used to change the output. The strings 'r' or 'c' result\n in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)\n matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1\n (column) matrix is produced. If the result is 2-D then both provide the\n same matrix result.\n\n A string integer specifies which axis to stack multiple comma separated\n arrays along. A string of two comma-separated integers allows indication\n of the minimum number of dimensions to force each entry into as the\n second integer (the axis to concatenate along is still the first integer).\n\n A string with three comma-separated integers allows specification of the\n axis to concatenate along, the minimum number of dimensions to force the\n entries to, and which axis should contain the start of the arrays which\n are less than the specified number of dimensions. In other words the third\n integer allows you to specify where the 1's should be placed in the shape\n of the arrays that have their shapes upgraded. By default, they are placed\n in the front of the shape tuple. The third argument allows you to specify\n where the start of the array should be instead. Thus, a third argument of\n '0' would place the 1's at the end of the array shape. Negative integers\n specify where in the new shape tuple the last dimension of upgraded arrays\n should be placed, so the default is '-1'.\n\n Parameters\n ----------\n Not a function, so takes no parameters\n\n\n Returns\n -------\n A concatenated ndarray or matrix.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n c_ : Translates slice objects to concatenation along the second axis.\n\n Examples\n --------\n >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]\n array([1, 2, 3, ..., 4, 5, 6])\n >>> np.r_[-1:1:6j, [0]*3, 5, 6]\n array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])\n\n String integers specify the axis to concatenate along or the minimum\n number of dimensions to force entries into.\n\n >>> a = np.array([[0, 1, 2], [3, 4, 5]])\n >>> np.r_['-1', a, a] # concatenate along last axis\n array([[0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5]])\n >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2\n array([[1, 2, 3],\n [4, 5, 6]])\n\n >>> np.r_['0,2,0', [1,2,3], [4,5,6]]\n array([[1],\n [2],\n [3],\n [4],\n [5],\n [6]])\n >>> np.r_['1,2,0', [1,2,3], [4,5,6]]\n array([[1, 4],\n [2, 5],\n [3, 6]])\n\n Using 'r' or 'c' as a first string argument creates a matrix.\n\n >>> np.r_['r',[1,2,3], [4,5,6]]\n matrix([[1, 2, 3, 4, 5, 6]])\n\n \"\"\"\n\n def __init__(self):\n AxisConcatenator.__init__(self, 0)\n\n\nr_ = RClass()\n\n\nclass CClass(AxisConcatenator):\n \"\"\"\n Translates slice objects to concatenation along the second axis.\n\n This is short-hand for ``np.r_['-1,2,0', index expression]``, which is\n useful because of its common occurrence. In particular, arrays will be\n stacked along their last axis after being upgraded to at least 2-D with\n 1's post-pended to the shape (column vectors made out of 1-D arrays).\n\n See Also\n --------\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n r_ : For more detailed documentation.\n\n Examples\n --------\n >>> np.c_[np.array([1,2,3]), np.array([4,5,6])]\n array([[1, 4],\n [2, 5],\n [3, 6]])\n >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]\n array([[1, 2, 3, ..., 4, 5, 6]])\n\n \"\"\"\n\n def __init__(self):\n AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)\n\n\nc_ = CClass()\n\n\n@set_module('numpy')\nclass ndenumerate:\n \"\"\"\n Multidimensional index iterator.\n\n Return an iterator yielding pairs of array coordinates and values.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n\n See Also\n --------\n ndindex, flatiter\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> for index, x in np.ndenumerate(a):\n ... print(index, x)\n (0, 0) 1\n (0, 1) 2\n (1, 0) 3\n (1, 1) 4\n\n \"\"\"\n\n def __init__(self, arr):\n self.iter = np.asarray(arr).flat\n\n def __next__(self):\n \"\"\"\n Standard iterator method, returns the index tuple and array value.\n\n Returns\n -------\n coords : tuple of ints\n The indices of the current iteration.\n val : scalar\n The array element of the current iteration.\n\n \"\"\"\n return self.iter.coords, next(self.iter)\n\n def __iter__(self):\n return self\n\n\n@set_module('numpy')\nclass ndindex:\n \"\"\"\n An N-dimensional iterator object to index arrays.\n\n Given the shape of an array, an `ndindex` instance iterates over\n the N-dimensional index of the array. At each iteration a tuple\n of indices is returned, the last dimension is iterated over first.\n\n Parameters\n ----------\n shape : ints, or a single tuple of ints\n The size of each dimension of the array can be passed as\n individual parameters or as the elements of a tuple.\n\n See Also\n --------\n ndenumerate, flatiter\n\n Examples\n --------\n Dimensions as individual arguments\n\n >>> for index in np.ndindex(3, 2, 1):\n ... print(index)\n (0, 0, 0)\n (0, 1, 0)\n (1, 0, 0)\n (1, 1, 0)\n (2, 0, 0)\n (2, 1, 0)\n\n Same dimensions - but in a tuple ``(3, 2, 1)``\n\n >>> for index in np.ndindex((3, 2, 1)):\n ... print(index)\n (0, 0, 0)\n (0, 1, 0)\n (1, 0, 0)\n (1, 1, 0)\n (2, 0, 0)\n (2, 1, 0)\n\n \"\"\"\n\n def __init__(self, *shape):\n if len(shape) == 1 and isinstance(shape[0], tuple):\n shape = shape[0]\n x = as_strided(_nx.zeros(1), shape=shape,\n strides=_nx.zeros_like(shape))\n self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],\n order='C')\n\n def __iter__(self):\n return self\n\n def ndincr(self):\n \"\"\"\n Increment the multi-dimensional index by one.\n\n This method is for backward compatibility only: do not use.\n\n .. deprecated:: 1.20.0\n This method has been advised against since numpy 1.8.0, but only\n started emitting DeprecationWarning as of this version.\n \"\"\"\n # NumPy 1.20.0, 2020-09-08\n warnings.warn(\n \"`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead\",\n DeprecationWarning, stacklevel=2)\n next(self)\n\n def __next__(self):\n \"\"\"\n Standard iterator method, updates the index and returns the index\n tuple.\n\n Returns\n -------\n val : tuple of ints\n Returns a tuple containing the indices of the current\n iteration.\n\n \"\"\"\n next(self._it)\n return self._it.multi_index\n\n\n# You can do all this with slice() plus a few special objects,\n# but there's a lot to remember. This version is simpler because\n# it uses the standard array indexing syntax.\n#\n# Written by Konrad Hinsen \n# last revision: 1999-7-23\n#\n# Cosmetic changes by T. Oliphant 2001\n#\n#\n\nclass IndexExpression:\n \"\"\"\n A nicer way to build up index tuples for arrays.\n\n .. note::\n Use one of the two predefined instances ``index_exp`` or `s_`\n rather than directly using `IndexExpression`.\n\n For any index combination, including slicing and axis insertion,\n ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any\n array `a`. However, ``np.index_exp[indices]`` can be used anywhere\n in Python code and returns a tuple of slice objects that can be\n used in the construction of complex index expressions.\n\n Parameters\n ----------\n maketuple : bool\n If True, always returns a tuple.\n\n See Also\n --------\n s_ : Predefined instance without tuple conversion:\n `s_ = IndexExpression(maketuple=False)`.\n The ``index_exp`` is another predefined instance that\n always returns a tuple:\n `index_exp = IndexExpression(maketuple=True)`.\n\n Notes\n -----\n You can do all this with :class:`slice` plus a few special objects,\n but there's a lot to remember and this version is simpler because\n it uses the standard array indexing syntax.\n\n Examples\n --------\n >>> np.s_[2::2]\n slice(2, None, 2)\n >>> np.index_exp[2::2]\n (slice(2, None, 2),)\n\n >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]\n array([2, 4])\n\n \"\"\"\n\n def __init__(self, maketuple):\n self.maketuple = maketuple\n\n def __getitem__(self, item):\n if self.maketuple and not isinstance(item, tuple):\n return (item,)\n else:\n return item\n\n\nindex_exp = IndexExpression(maketuple=True)\ns_ = IndexExpression(maketuple=False)\n\n# End contribution from Konrad.\n\n\n# The following functions complement those in twodim_base, but are\n# applicable to N-dimensions.\n\n\ndef _fill_diagonal_dispatcher(a, val, wrap=None):\n return (a,)\n\n\n@array_function_dispatch(_fill_diagonal_dispatcher)\ndef fill_diagonal(a, val, wrap=False):\n \"\"\"Fill the main diagonal of the given array of any dimensionality.\n\n For an array `a` with ``a.ndim >= 2``, the diagonal is the list of\n values ``a[i, ..., i]`` with indices ``i`` all identical. This function\n modifies the input array in-place without returning a value.\n\n Parameters\n ----------\n a : array, at least 2-D.\n Array whose diagonal is to be filled in-place.\n val : scalar or array_like\n Value(s) to write on the diagonal. If `val` is scalar, the value is\n written along the diagonal. If array-like, the flattened `val` is\n written along the diagonal, repeating if necessary to fill all\n diagonal entries.\n\n wrap : bool\n For tall matrices in NumPy version up to 1.6.2, the\n diagonal \"wrapped\" after N columns. You can have this behavior\n with this option. This affects only tall matrices.\n\n See also\n --------\n diag_indices, diag_indices_from\n\n Notes\n -----\n .. versionadded:: 1.4.0\n\n This functionality can be obtained via `diag_indices`, but internally\n this version uses a much faster implementation that never constructs the\n indices and uses simple slicing.\n\n Examples\n --------\n >>> a = np.zeros((3, 3), int)\n >>> np.fill_diagonal(a, 5)\n >>> a\n array([[5, 0, 0],\n [0, 5, 0],\n [0, 0, 5]])\n\n The same function can operate on a 4-D array:\n\n >>> a = np.zeros((3, 3, 3, 3), int)\n >>> np.fill_diagonal(a, 4)\n\n We only show a few blocks for clarity:\n\n >>> a[0, 0]\n array([[4, 0, 0],\n [0, 0, 0],\n [0, 0, 0]])\n >>> a[1, 1]\n array([[0, 0, 0],\n [0, 4, 0],\n [0, 0, 0]])\n >>> a[2, 2]\n array([[0, 0, 0],\n [0, 0, 0],\n [0, 0, 4]])\n\n The wrap option affects only tall matrices:\n\n >>> # tall matrices no wrap\n >>> a = np.zeros((5, 3), int)\n >>> np.fill_diagonal(a, 4)\n >>> a\n array([[4, 0, 0],\n [0, 4, 0],\n [0, 0, 4],\n [0, 0, 0],\n [0, 0, 0]])\n\n >>> # tall matrices wrap\n >>> a = np.zeros((5, 3), int)\n >>> np.fill_diagonal(a, 4, wrap=True)\n >>> a\n array([[4, 0, 0],\n [0, 4, 0],\n [0, 0, 4],\n [0, 0, 0],\n [4, 0, 0]])\n\n >>> # wide matrices\n >>> a = np.zeros((3, 5), int)\n >>> np.fill_diagonal(a, 4, wrap=True)\n >>> a\n array([[4, 0, 0, 0, 0],\n [0, 4, 0, 0, 0],\n [0, 0, 4, 0, 0]])\n\n The anti-diagonal can be filled by reversing the order of elements\n using either `numpy.flipud` or `numpy.fliplr`.\n\n >>> a = np.zeros((3, 3), int);\n >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip\n >>> a\n array([[0, 0, 1],\n [0, 2, 0],\n [3, 0, 0]])\n >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip\n >>> a\n array([[0, 0, 3],\n [0, 2, 0],\n [1, 0, 0]])\n\n Note that the order in which the diagonal is filled varies depending\n on the flip function.\n \"\"\"\n if a.ndim < 2:\n raise ValueError(\"array must be at least 2-d\")\n end = None\n if a.ndim == 2:\n # Explicit, fast formula for the common case. For 2-d arrays, we\n # accept rectangular ones.\n step = a.shape[1] + 1\n # This is needed to don't have tall matrix have the diagonal wrap.\n if not wrap:\n end = a.shape[1] * a.shape[1]\n else:\n # For more than d=2, the strided formula is only valid for arrays with\n # all dimensions equal, so we check first.\n if not np.all(diff(a.shape) == 0):\n raise ValueError(\"All dimensions of input must be of equal length\")\n step = 1 + (np.cumprod(a.shape[:-1])).sum()\n\n # Write the value out into the diagonal.\n a.flat[:end:step] = val\n\n\n@set_module('numpy')\ndef diag_indices(n, ndim=2):\n \"\"\"\n Return the indices to access the main diagonal of an array.\n\n This returns a tuple of indices that can be used to access the main\n diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape\n (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for\n ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``\n for ``i = [0..n-1]``.\n\n Parameters\n ----------\n n : int\n The size, along each dimension, of the arrays for which the returned\n indices can be used.\n\n ndim : int, optional\n The number of dimensions.\n\n See Also\n --------\n diag_indices_from\n\n Notes\n -----\n .. versionadded:: 1.4.0\n\n Examples\n --------\n Create a set of indices to access the diagonal of a (4, 4) array:\n\n >>> di = np.diag_indices(4)\n >>> di\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> a[di] = 100\n >>> a\n array([[100, 1, 2, 3],\n [ 4, 100, 6, 7],\n [ 8, 9, 100, 11],\n [ 12, 13, 14, 100]])\n\n Now, we create indices to manipulate a 3-D array:\n\n >>> d3 = np.diag_indices(2, 3)\n >>> d3\n (array([0, 1]), array([0, 1]), array([0, 1]))\n\n And use it to set the diagonal of an array of zeros to 1:\n\n >>> a = np.zeros((2, 2, 2), dtype=int)\n >>> a[d3] = 1\n >>> a\n array([[[1, 0],\n [0, 0]],\n [[0, 0],\n [0, 1]]])\n\n \"\"\"\n idx = np.arange(n)\n return (idx,) * ndim\n\n\ndef _diag_indices_from(arr):\n return (arr,)\n\n\n@array_function_dispatch(_diag_indices_from)\ndef diag_indices_from(arr):\n \"\"\"\n Return the indices to access the main diagonal of an n-dimensional array.\n\n See `diag_indices` for full details.\n\n Parameters\n ----------\n arr : array, at least 2-D\n\n See Also\n --------\n diag_indices\n\n Notes\n -----\n .. versionadded:: 1.4.0\n\n Examples\n --------\n \n Create a 4 by 4 array.\n\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n \n Get the indices of the diagonal elements.\n\n >>> di = np.diag_indices_from(a)\n >>> di\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n\n >>> a[di]\n array([ 0, 5, 10, 15])\n\n This is simply syntactic sugar for diag_indices.\n\n >>> np.diag_indices(a.shape[0])\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n\n \"\"\"\n\n if not arr.ndim >= 2:\n raise ValueError(\"input array must be at least 2-d\")\n # For more than d=2, the strided formula is only valid for arrays with\n # all dimensions equal, so we check first.\n if not np.all(diff(arr.shape) == 0):\n raise ValueError(\"All dimensions of input must be of equal length\")\n\n return diag_indices(arr.shape[0], arr.ndim)\n", "output": ["_fill_diagonal_dispatcher", "_diag_indices_from", "_ix__dispatcher", "RClass", "ndindex", "IndexExpression", "OGridClass", "ndenumerate", "MGridClass", "AxisConcatenator", "nd_grid", "CClass"], "metadata": {"file_path": "numpy-main/numpy/lib/_index_tricks_impl.py", "file_length": 10479, "symbol_dict": [{"symbol": "_ix__dispatcher", "type": "mannual_defined_function", "byte_location": 788, "location": 262}, {"symbol": "_diag_indices_from", "type": "mannual_defined_function", "byte_location": 30509, "location": 9951}, {"symbol": "_fill_diagonal_dispatcher", "type": "mannual_defined_function", "byte_location": 24885, "location": 7795}, {"symbol": "MGridClass", "type": "mannual_defined_class", "byte_location": 6952, "location": 2137}, {"symbol": "AxisConcatenator", "type": "mannual_defined_class", "byte_location": 10197, "location": 3245}, {"symbol": "RClass", "type": "mannual_defined_class", "byte_location": 14727, "location": 4504}, {"symbol": "ndenumerate", "type": "mannual_defined_class", "byte_location": 19593, "location": 6098}, {"symbol": "ndindex", "type": "mannual_defined_class", "byte_location": 20548, "location": 6403}, {"symbol": "nd_grid", "type": "mannual_defined_class", "byte_location": 3062, "location": 1051}, {"symbol": "OGridClass", "type": "mannual_defined_class", "byte_location": 8672, "location": 2780}, {"symbol": "CClass", "type": "mannual_defined_class", "byte_location": 18676, "location": 5755}, {"symbol": "IndexExpression", "type": "mannual_defined_class", "byte_location": 23102, "location": 7245}]}} {"input": "import pickle\nfrom functools import partial\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_equal, assert_, assert_array_equal\nfrom numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64)\n\n@pytest.fixture(scope='module',\n params=(np.bool, np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64))\ndef dtype(request):\n return request.param\n\n\ndef params_0(f):\n val = f()\n assert_(np.isscalar(val))\n val = f(10)\n assert_(val.shape == (10,))\n val = f((10, 10))\n assert_(val.shape == (10, 10))\n val = f((10, 10, 10))\n assert_(val.shape == (10, 10, 10))\n val = f(size=(5, 5))\n assert_(val.shape == (5, 5))\n\n\ndef params_1(f, bounded=False):\n a = 5.0\n b = np.arange(2.0, 12.0)\n c = np.arange(2.0, 102.0).reshape((10, 10))\n d = np.arange(2.0, 1002.0).reshape((10, 10, 10))\n e = np.array([2.0, 3.0])\n g = np.arange(2.0, 12.0).reshape((1, 10, 1))\n if bounded:\n a = 0.5\n b = b / (1.5 * b.max())\n c = c / (1.5 * c.max())\n d = d / (1.5 * d.max())\n e = e / (1.5 * e.max())\n g = g / (1.5 * g.max())\n\n # Scalar\n f(a)\n # Scalar - size\n f(a, size=(10, 10))\n # 1d\n f(b)\n # 2d\n f(c)\n # 3d\n f(d)\n # 1d size\n f(b, size=10)\n # 2d - size - broadcast\n f(e, size=(10, 2))\n # 3d - size\n f(g, size=(10, 10, 10))\n\n\ndef comp_state(state1, state2):\n identical = True\n if isinstance(state1, dict):\n for key in state1:\n identical &= comp_state(state1[key], state2[key])\n elif type(state1) != type(state2):\n identical &= type(state1) == type(state2)\n else:\n if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance(\n state2, (list, tuple, np.ndarray))):\n for s1, s2 in zip(state1, state2):\n identical &= comp_state(s1, s2)\n else:\n identical &= state1 == state2\n return identical\n\n\ndef warmup(rg, n=None):\n if n is None:\n n = 11 + np.random.randint(0, 20)\n rg.standard_normal(n)\n rg.standard_normal(n)\n rg.standard_normal(n, dtype=np.float32)\n rg.standard_normal(n, dtype=np.float32)\n rg.integers(0, 2 ** 24, n, dtype=np.uint64)\n rg.integers(0, 2 ** 48, n, dtype=np.uint64)\n rg.standard_gamma(11.0, n)\n rg.standard_gamma(11.0, n, dtype=np.float32)\n rg.random(n, dtype=np.float64)\n rg.random(n, dtype=np.float32)\n\n\nclass RNG:\n @classmethod\n def setup_class(cls):\n # Overridden in test classes. Place holder to silence IDE noise\n cls.bit_generator = PCG64\n cls.advance = None\n cls.seed = [12345]\n cls.rg = Generator(cls.bit_generator(*cls.seed))\n cls.initial_state = cls.rg.bit_generator.state\n cls.seed_vector_bits = 64\n cls._extra_setup()\n\n @classmethod\n def _extra_setup(cls):\n cls.vec_1d = np.arange(2.0, 102.0)\n cls.vec_2d = np.arange(2.0, 102.0)[None, :]\n cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100))\n cls.seed_error = TypeError\n\n def _reset_state(self):\n self.rg.bit_generator.state = self.initial_state\n\n def test_init(self):\n rg = Generator(self.bit_generator())\n state = rg.bit_generator.state\n rg.standard_normal(1)\n rg.standard_normal(1)\n rg.bit_generator.state = state\n new_state = rg.bit_generator.state\n assert_(comp_state(state, new_state))\n\n def test_advance(self):\n state = self.rg.bit_generator.state\n if hasattr(self.rg.bit_generator, 'advance'):\n self.rg.bit_generator.advance(self.advance)\n assert_(not comp_state(state, self.rg.bit_generator.state))\n else:\n bitgen_name = self.rg.bit_generator.__class__.__name__\n pytest.skip(f'Advance is not supported by {bitgen_name}')\n\n def test_jump(self):\n state = self.rg.bit_generator.state\n if hasattr(self.rg.bit_generator, 'jumped'):\n bit_gen2 = self.rg.bit_generator.jumped()\n jumped_state = bit_gen2.state\n assert_(not comp_state(state, jumped_state))\n self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17)\n self.rg.bit_generator.state = state\n bit_gen3 = self.rg.bit_generator.jumped()\n rejumped_state = bit_gen3.state\n assert_(comp_state(jumped_state, rejumped_state))\n else:\n bitgen_name = self.rg.bit_generator.__class__.__name__\n if bitgen_name not in ('SFC64',):\n raise AttributeError(f'no \"jumped\" in {bitgen_name}')\n pytest.skip(f'Jump is not supported by {bitgen_name}')\n\n def test_uniform(self):\n r = self.rg.uniform(-1.0, 0.0, size=10)\n assert_(len(r) == 10)\n assert_((r > -1).all())\n assert_((r <= 0).all())\n\n def test_uniform_array(self):\n r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10)\n assert_(len(r) == 10)\n assert_((r > -1).all())\n assert_((r <= 0).all())\n r = self.rg.uniform(np.array([-1.0] * 10),\n np.array([0.0] * 10), size=10)\n assert_(len(r) == 10)\n assert_((r > -1).all())\n assert_((r <= 0).all())\n r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10)\n assert_(len(r) == 10)\n assert_((r > -1).all())\n assert_((r <= 0).all())\n\n def test_random(self):\n assert_(len(self.rg.random(10)) == 10)\n params_0(self.rg.random)\n\n def test_standard_normal_zig(self):\n assert_(len(self.rg.standard_normal(10)) == 10)\n\n def test_standard_normal(self):\n assert_(len(self.rg.standard_normal(10)) == 10)\n params_0(self.rg.standard_normal)\n\n def test_standard_gamma(self):\n assert_(len(self.rg.standard_gamma(10, 10)) == 10)\n assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10)\n params_1(self.rg.standard_gamma)\n\n def test_standard_exponential(self):\n assert_(len(self.rg.standard_exponential(10)) == 10)\n params_0(self.rg.standard_exponential)\n\n def test_standard_exponential_float(self):\n randoms = self.rg.standard_exponential(10, dtype='float32')\n assert_(len(randoms) == 10)\n assert randoms.dtype == np.float32\n params_0(partial(self.rg.standard_exponential, dtype='float32'))\n\n def test_standard_exponential_float_log(self):\n randoms = self.rg.standard_exponential(10, dtype='float32',\n method='inv')\n assert_(len(randoms) == 10)\n assert randoms.dtype == np.float32\n params_0(partial(self.rg.standard_exponential, dtype='float32',\n method='inv'))\n\n def test_standard_cauchy(self):\n assert_(len(self.rg.standard_cauchy(10)) == 10)\n params_0(self.rg.standard_cauchy)\n\n def test_standard_t(self):\n assert_(len(self.rg.standard_t(10, 10)) == 10)\n params_1(self.rg.standard_t)\n\n def test_binomial(self):\n assert_(self.rg.binomial(10, .5) >= 0)\n assert_(self.rg.binomial(1000, .5) >= 0)\n\n def test_reset_state(self):\n state = self.rg.bit_generator.state\n int_1 = self.rg.integers(2**31)\n self.rg.bit_generator.state = state\n int_2 = self.rg.integers(2**31)\n assert_(int_1 == int_2)\n\n def test_entropy_init(self):\n rg = Generator(self.bit_generator())\n rg2 = Generator(self.bit_generator())\n assert_(not comp_state(rg.bit_generator.state,\n rg2.bit_generator.state))\n\n def test_seed(self):\n rg = Generator(self.bit_generator(*self.seed))\n rg2 = Generator(self.bit_generator(*self.seed))\n rg.random()\n rg2.random()\n assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))\n\n def test_reset_state_gauss(self):\n rg = Generator(self.bit_generator(*self.seed))\n rg.standard_normal()\n state = rg.bit_generator.state\n n1 = rg.standard_normal(size=10)\n rg2 = Generator(self.bit_generator())\n rg2.bit_generator.state = state\n n2 = rg2.standard_normal(size=10)\n assert_array_equal(n1, n2)\n\n def test_reset_state_uint32(self):\n rg = Generator(self.bit_generator(*self.seed))\n rg.integers(0, 2 ** 24, 120, dtype=np.uint32)\n state = rg.bit_generator.state\n n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32)\n rg2 = Generator(self.bit_generator())\n rg2.bit_generator.state = state\n n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32)\n assert_array_equal(n1, n2)\n\n def test_reset_state_float(self):\n rg = Generator(self.bit_generator(*self.seed))\n rg.random(dtype='float32')\n state = rg.bit_generator.state\n n1 = rg.random(size=10, dtype='float32')\n rg2 = Generator(self.bit_generator())\n rg2.bit_generator.state = state\n n2 = rg2.random(size=10, dtype='float32')\n assert_((n1 == n2).all())\n\n def test_shuffle(self):\n original = np.arange(200, 0, -1)\n permuted = self.rg.permutation(original)\n assert_((original != permuted).any())\n\n def test_permutation(self):\n original = np.arange(200, 0, -1)\n permuted = self.rg.permutation(original)\n assert_((original != permuted).any())\n\n def test_beta(self):\n vals = self.rg.beta(2.0, 2.0, 10)\n assert_(len(vals) == 10)\n vals = self.rg.beta(np.array([2.0] * 10), 2.0)\n assert_(len(vals) == 10)\n vals = self.rg.beta(2.0, np.array([2.0] * 10))\n assert_(len(vals) == 10)\n vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10))\n assert_(len(vals) == 10)\n vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10))\n assert_(vals.shape == (10, 10))\n\n def test_bytes(self):\n vals = self.rg.bytes(10)\n assert_(len(vals) == 10)\n\n def test_chisquare(self):\n vals = self.rg.chisquare(2.0, 10)\n assert_(len(vals) == 10)\n params_1(self.rg.chisquare)\n\n def test_exponential(self):\n vals = self.rg.exponential(2.0, 10)\n assert_(len(vals) == 10)\n params_1(self.rg.exponential)\n\n def test_f(self):\n vals = self.rg.f(3, 1000, 10)\n assert_(len(vals) == 10)\n\n def test_gamma(self):\n vals = self.rg.gamma(3, 2, 10)\n assert_(len(vals) == 10)\n\n def test_geometric(self):\n vals = self.rg.geometric(0.5, 10)\n assert_(len(vals) == 10)\n params_1(self.rg.exponential, bounded=True)\n\n def test_gumbel(self):\n vals = self.rg.gumbel(2.0, 2.0, 10)\n assert_(len(vals) == 10)\n\n def test_laplace(self):\n vals = self.rg.laplace(2.0, 2.0, 10)\n assert_(len(vals) == 10)\n\n def test_logitic(self):\n vals = self.rg.logistic(2.0, 2.0, 10)\n assert_(len(vals) == 10)\n\n def test_logseries(self):\n vals = self.rg.logseries(0.5, 10)\n assert_(len(vals) == 10)\n\n def test_negative_binomial(self):\n vals = self.rg.negative_binomial(10, 0.2, 10)\n assert_(len(vals) == 10)\n\n def test_noncentral_chisquare(self):\n vals = self.rg.noncentral_chisquare(10, 2, 10)\n assert_(len(vals) == 10)\n\n def test_noncentral_f(self):\n vals = self.rg.noncentral_f(3, 1000, 2, 10)\n assert_(len(vals) == 10)\n vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2)\n assert_(len(vals) == 10)\n vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2)\n assert_(len(vals) == 10)\n vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10))\n assert_(len(vals) == 10)\n\n def test_normal(self):\n vals = self.rg.normal(10, 0.2, 10)\n assert_(len(vals) == 10)\n\n def test_pareto(self):\n vals = self.rg.pareto(3.0, 10)\n assert_(len(vals) == 10)\n\n def test_poisson(self):\n vals = self.rg.poisson(10, 10)\n assert_(len(vals) == 10)\n vals = self.rg.poisson(np.array([10] * 10))\n assert_(len(vals) == 10)\n params_1(self.rg.poisson)\n\n def test_power(self):\n vals = self.rg.power(0.2, 10)\n assert_(len(vals) == 10)\n\n def test_integers(self):\n vals = self.rg.integers(10, 20, 10)\n assert_(len(vals) == 10)\n\n def test_rayleigh(self):\n vals = self.rg.rayleigh(0.2, 10)\n assert_(len(vals) == 10)\n params_1(self.rg.rayleigh, bounded=True)\n\n def test_vonmises(self):\n vals = self.rg.vonmises(10, 0.2, 10)\n assert_(len(vals) == 10)\n\n def test_wald(self):\n vals = self.rg.wald(1.0, 1.0, 10)\n assert_(len(vals) == 10)\n\n def test_weibull(self):\n vals = self.rg.weibull(1.0, 10)\n assert_(len(vals) == 10)\n\n def test_zipf(self):\n vals = self.rg.zipf(10, 10)\n assert_(len(vals) == 10)\n vals = self.rg.zipf(self.vec_1d)\n assert_(len(vals) == 100)\n vals = self.rg.zipf(self.vec_2d)\n assert_(vals.shape == (1, 100))\n vals = self.rg.zipf(self.mat)\n assert_(vals.shape == (100, 100))\n\n def test_hypergeometric(self):\n vals = self.rg.hypergeometric(25, 25, 20)\n assert_(np.isscalar(vals))\n vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20)\n assert_(vals.shape == (10,))\n\n def test_triangular(self):\n vals = self.rg.triangular(-5, 0, 5)\n assert_(np.isscalar(vals))\n vals = self.rg.triangular(-5, np.array([0] * 10), 5)\n assert_(vals.shape == (10,))\n\n def test_multivariate_normal(self):\n mean = [0, 0]\n cov = [[1, 0], [0, 100]] # diagonal covariance\n x = self.rg.multivariate_normal(mean, cov, 5000)\n assert_(x.shape == (5000, 2))\n x_zig = self.rg.multivariate_normal(mean, cov, 5000)\n assert_(x.shape == (5000, 2))\n x_inv = self.rg.multivariate_normal(mean, cov, 5000)\n assert_(x.shape == (5000, 2))\n assert_((x_zig != x_inv).any())\n\n def test_multinomial(self):\n vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3])\n assert_(vals.shape == (2,))\n vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10)\n assert_(vals.shape == (10, 2))\n\n def test_dirichlet(self):\n s = self.rg.dirichlet((10, 5, 3), 20)\n assert_(s.shape == (20, 3))\n\n def test_pickle(self):\n pick = pickle.dumps(self.rg)\n unpick = pickle.loads(pick)\n assert_((type(self.rg) == type(unpick)))\n assert_(comp_state(self.rg.bit_generator.state,\n unpick.bit_generator.state))\n\n pick = pickle.dumps(self.rg)\n unpick = pickle.loads(pick)\n assert_((type(self.rg) == type(unpick)))\n assert_(comp_state(self.rg.bit_generator.state,\n unpick.bit_generator.state))\n\n def test_seed_array(self):\n if self.seed_vector_bits is None:\n bitgen_name = self.bit_generator.__name__\n pytest.skip(f'Vector seeding is not supported by {bitgen_name}')\n\n if self.seed_vector_bits == 32:\n dtype = np.uint32\n else:\n dtype = np.uint64\n seed = np.array([1], dtype=dtype)\n bg = self.bit_generator(seed)\n state1 = bg.state\n bg = self.bit_generator(1)\n state2 = bg.state\n assert_(comp_state(state1, state2))\n\n seed = np.arange(4, dtype=dtype)\n bg = self.bit_generator(seed)\n state1 = bg.state\n bg = self.bit_generator(seed[0])\n state2 = bg.state\n assert_(not comp_state(state1, state2))\n\n seed = np.arange(1500, dtype=dtype)\n bg = self.bit_generator(seed)\n state1 = bg.state\n bg = self.bit_generator(seed[0])\n state2 = bg.state\n assert_(not comp_state(state1, state2))\n\n seed = 2 ** np.mod(np.arange(1500, dtype=dtype),\n self.seed_vector_bits - 1) + 1\n bg = self.bit_generator(seed)\n state1 = bg.state\n bg = self.bit_generator(seed[0])\n state2 = bg.state\n assert_(not comp_state(state1, state2))\n\n def test_uniform_float(self):\n rg = Generator(self.bit_generator(12345))\n warmup(rg)\n state = rg.bit_generator.state\n r1 = rg.random(11, dtype=np.float32)\n rg2 = Generator(self.bit_generator())\n warmup(rg2)\n rg2.bit_generator.state = state\n r2 = rg2.random(11, dtype=np.float32)\n assert_array_equal(r1, r2)\n assert_equal(r1.dtype, np.float32)\n assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))\n\n def test_gamma_floats(self):\n rg = Generator(self.bit_generator())\n warmup(rg)\n state = rg.bit_generator.state\n r1 = rg.standard_gamma(4.0, 11, dtype=np.float32)\n rg2 = Generator(self.bit_generator())\n warmup(rg2)\n rg2.bit_generator.state = state\n r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32)\n assert_array_equal(r1, r2)\n assert_equal(r1.dtype, np.float32)\n assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))\n\n def test_normal_floats(self):\n rg = Generator(self.bit_generator())\n warmup(rg)\n state = rg.bit_generator.state\n r1 = rg.standard_normal(11, dtype=np.float32)\n rg2 = Generator(self.bit_generator())\n warmup(rg2)\n rg2.bit_generator.state = state\n r2 = rg2.standard_normal(11, dtype=np.float32)\n assert_array_equal(r1, r2)\n assert_equal(r1.dtype, np.float32)\n assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))\n\n def test_normal_zig_floats(self):\n rg = Generator(self.bit_generator())\n warmup(rg)\n state = rg.bit_generator.state\n r1 = rg.standard_normal(11, dtype=np.float32)\n rg2 = Generator(self.bit_generator())\n warmup(rg2)\n rg2.bit_generator.state = state\n r2 = rg2.standard_normal(11, dtype=np.float32)\n assert_array_equal(r1, r2)\n assert_equal(r1.dtype, np.float32)\n assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))\n\n def test_output_fill(self):\n rg = self.rg\n state = rg.bit_generator.state\n size = (31, 7, 97)\n existing = np.empty(size)\n rg.bit_generator.state = state\n rg.standard_normal(out=existing)\n rg.bit_generator.state = state\n direct = rg.standard_normal(size=size)\n assert_equal(direct, existing)\n\n sized = np.empty(size)\n rg.bit_generator.state = state\n rg.standard_normal(out=sized, size=sized.shape)\n\n existing = np.empty(size, dtype=np.float32)\n rg.bit_generator.state = state\n rg.standard_normal(out=existing, dtype=np.float32)\n rg.bit_generator.state = state\n direct = rg.standard_normal(size=size, dtype=np.float32)\n assert_equal(direct, existing)\n\n def test_output_filling_uniform(self):\n rg = self.rg\n state = rg.bit_generator.state\n size = (31, 7, 97)\n existing = np.empty(size)\n rg.bit_generator.state = state\n rg.random(out=existing)\n rg.bit_generator.state = state\n direct = rg.random(size=size)\n assert_equal(direct, existing)\n\n existing = np.empty(size, dtype=np.float32)\n rg.bit_generator.state = state\n rg.random(out=existing, dtype=np.float32)\n rg.bit_generator.state = state\n direct = rg.random(size=size, dtype=np.float32)\n assert_equal(direct, existing)\n\n def test_output_filling_exponential(self):\n rg = self.rg\n state = rg.bit_generator.state\n size = (31, 7, 97)\n existing = np.empty(size)\n rg.bit_generator.state = state\n rg.standard_exponential(out=existing)\n rg.bit_generator.state = state\n direct = rg.standard_exponential(size=size)\n assert_equal(direct, existing)\n\n existing = np.empty(size, dtype=np.float32)\n rg.bit_generator.state = state\n rg.standard_exponential(out=existing, dtype=np.float32)\n rg.bit_generator.state = state\n direct = rg.standard_exponential(size=size, dtype=np.float32)\n assert_equal(direct, existing)\n\n def test_output_filling_gamma(self):\n rg = self.rg\n state = rg.bit_generator.state\n size = (31, 7, 97)\n existing = np.zeros(size)\n rg.bit_generator.state = state\n rg.standard_gamma(1.0, out=existing)\n rg.bit_generator.state = state\n direct = rg.standard_gamma(1.0, size=size)\n assert_equal(direct, existing)\n\n existing = np.zeros(size, dtype=np.float32)\n rg.bit_generator.state = state\n rg.standard_gamma(1.0, out=existing, dtype=np.float32)\n rg.bit_generator.state = state\n direct = rg.standard_gamma(1.0, size=size, dtype=np.float32)\n assert_equal(direct, existing)\n\n def test_output_filling_gamma_broadcast(self):\n rg = self.rg\n state = rg.bit_generator.state\n size = (31, 7, 97)\n mu = np.arange(97.0) + 1.0\n existing = np.zeros(size)\n rg.bit_generator.state = state\n rg.standard_gamma(mu, out=existing)\n rg.bit_generator.state = state\n direct = rg.standard_gamma(mu, size=size)\n assert_equal(direct, existing)\n\n existing = np.zeros(size, dtype=np.float32)\n rg.bit_generator.state = state\n rg.standard_gamma(mu, out=existing, dtype=np.float32)\n rg.bit_generator.state = state\n direct = rg.standard_gamma(mu, size=size, dtype=np.float32)\n assert_equal(direct, existing)\n\n def test_output_fill_error(self):\n rg = self.rg\n size = (31, 7, 97)\n existing = np.empty(size)\n with pytest.raises(TypeError):\n rg.standard_normal(out=existing, dtype=np.float32)\n with pytest.raises(ValueError):\n rg.standard_normal(out=existing[::3])\n existing = np.empty(size, dtype=np.float32)\n with pytest.raises(TypeError):\n rg.standard_normal(out=existing, dtype=np.float64)\n\n existing = np.zeros(size, dtype=np.float32)\n with pytest.raises(TypeError):\n rg.standard_gamma(1.0, out=existing, dtype=np.float64)\n with pytest.raises(ValueError):\n rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32)\n existing = np.zeros(size, dtype=np.float64)\n with pytest.raises(TypeError):\n rg.standard_gamma(1.0, out=existing, dtype=np.float32)\n with pytest.raises(ValueError):\n rg.standard_gamma(1.0, out=existing[::3])\n\n def test_integers_broadcast(self, dtype):\n if dtype == np.bool:\n upper = 2\n lower = 0\n else:\n info = np.iinfo(dtype)\n upper = int(info.max) + 1\n lower = info.min\n self._reset_state()\n a = self.rg.integers(lower, [upper] * 10, dtype=dtype)\n self._reset_state()\n b = self.rg.integers([lower] * 10, upper, dtype=dtype)\n assert_equal(a, b)\n self._reset_state()\n c = self.rg.integers(lower, upper, size=10, dtype=dtype)\n assert_equal(a, c)\n self._reset_state()\n d = self.rg.integers(np.array(\n [lower] * 10), np.array([upper], dtype=object), size=10,\n dtype=dtype)\n assert_equal(a, d)\n self._reset_state()\n e = self.rg.integers(\n np.array([lower] * 10), np.array([upper] * 10), size=10,\n dtype=dtype)\n assert_equal(a, e)\n\n self._reset_state()\n a = self.rg.integers(0, upper, size=10, dtype=dtype)\n self._reset_state()\n b = self.rg.integers([upper] * 10, dtype=dtype)\n assert_equal(a, b)\n\n def test_integers_numpy(self, dtype):\n high = np.array([1])\n low = np.array([0])\n\n out = self.rg.integers(low, high, dtype=dtype)\n assert out.shape == (1,)\n\n out = self.rg.integers(low[0], high, dtype=dtype)\n assert out.shape == (1,)\n\n out = self.rg.integers(low, high[0], dtype=dtype)\n assert out.shape == (1,)\n\n def test_integers_broadcast_errors(self, dtype):\n if dtype == np.bool:\n upper = 2\n lower = 0\n else:\n info = np.iinfo(dtype)\n upper = int(info.max) + 1\n lower = info.min\n with pytest.raises(ValueError):\n self.rg.integers(lower, [upper + 1] * 10, dtype=dtype)\n with pytest.raises(ValueError):\n self.rg.integers(lower - 1, [upper] * 10, dtype=dtype)\n with pytest.raises(ValueError):\n self.rg.integers([lower - 1], [upper] * 10, dtype=dtype)\n with pytest.raises(ValueError):\n self.rg.integers([0], [0], dtype=dtype)\n\n\nclass TestMT19937(RNG):\n @classmethod\n def setup_class(cls):\n cls.bit_generator = MT19937\n cls.advance = None\n cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]\n cls.rg = Generator(cls.bit_generator(*cls.seed))\n cls.initial_state = cls.rg.bit_generator.state\n cls.seed_vector_bits = 32\n cls._extra_setup()\n cls.seed_error = ValueError\n\n def test_numpy_state(self):\n nprg = np.random.RandomState()\n nprg.standard_normal(99)\n state = nprg.get_state()\n self.rg.bit_generator.state = state\n state2 = self.rg.bit_generator.state\n assert_((state[1] == state2['state']['key']).all())\n assert_((state[2] == state2['state']['pos']))\n\n\nclass TestPhilox(RNG):\n @classmethod\n def setup_class(cls):\n cls.bit_generator = Philox\n cls.advance = 2**63 + 2**31 + 2**15 + 1\n cls.seed = [12345]\n cls.rg = Generator(cls.bit_generator(*cls.seed))\n cls.initial_state = cls.rg.bit_generator.state\n cls.seed_vector_bits = 64\n cls._extra_setup()\n\n\nclass TestSFC64(RNG):\n @classmethod\n def setup_class(cls):\n cls.bit_generator = SFC64\n cls.advance = None\n cls.seed = [12345]\n cls.rg = Generator(cls.bit_generator(*cls.seed))\n cls.initial_state = cls.rg.bit_generator.state\n cls.seed_vector_bits = 192\n cls._extra_setup()\n\n\nclass TestPCG64(RNG):\n @classmethod\n def setup_class(cls):\n cls.bit_generator = PCG64\n cls.advance = 2**63 + 2**31 + 2**15 + 1\n cls.seed = [12345]\n cls.rg = Generator(cls.bit_generator(*cls.seed))\n cls.initial_state = cls.rg.bit_generator.state\n cls.seed_vector_bits = 64\n cls._extra_setup()\n\n\nclass TestPCG64DXSM(RNG):\n @classmethod\n def setup_class(cls):\n cls.bit_generator = PCG64DXSM\n cls.advance = 2**63 + 2**31 + 2**15 + 1\n cls.seed = [12345]\n cls.rg = Generator(cls.bit_generator(*cls.seed))\n cls.initial_state = cls.rg.bit_generator.state\n cls.seed_vector_bits = 64\n cls._extra_setup()\n\n\nclass TestDefaultRNG(RNG):\n @classmethod\n def setup_class(cls):\n # This will duplicate some tests that directly instantiate a fresh\n # Generator(), but that's okay.\n cls.bit_generator = PCG64\n cls.advance = 2**63 + 2**31 + 2**15 + 1\n cls.seed = [12345]\n cls.rg = np.random.default_rng(*cls.seed)\n cls.initial_state = cls.rg.bit_generator.state\n cls.seed_vector_bits = 64\n cls._extra_setup()\n\n def test_default_is_pcg64(self):\n # In order to change the default BitGenerator, we'll go through\n # a deprecation cycle to move to a different function.\n assert_(isinstance(self.rg.bit_generator, PCG64))\n\n def test_seed(self):\n np.random.default_rng()\n np.random.default_rng(None)\n np.random.default_rng(12345)\n np.random.default_rng(0)\n np.random.default_rng(43660444402423911716352051725018508569)\n np.random.default_rng([43660444402423911716352051725018508569,\n 279705150948142787361475340226491943209])\n with pytest.raises(ValueError):\n np.random.default_rng(-1)\n with pytest.raises(ValueError):\n np.random.default_rng([12345, -1])\n", "output": ["comp_state", "params_0", "warmup", "params_1", "TestPhilox", "TestDefaultRNG", "RNG", "TestPCG64DXSM", "TestPCG64", "TestMT19937", "TestSFC64"], "metadata": {"file_path": "numpy-main/numpy/random/tests/test_smoke.py", "file_length": 10714, "symbol_dict": [{"symbol": "warmup", "type": "mannual_defined_function", "byte_location": 2005, "location": 864}, {"symbol": "comp_state", "type": "mannual_defined_function", "byte_location": 1430, "location": 687}, {"symbol": "params_0", "type": "mannual_defined_function", "byte_location": 445, "location": 161}, {"symbol": "params_1", "type": "mannual_defined_function", "byte_location": 736, "location": 304}, {"symbol": "TestMT19937", "type": "mannual_defined_class", "byte_location": 24826, "location": 9384}, {"symbol": "TestSFC64", "type": "mannual_defined_class", "byte_location": 25911, "location": 9796}, {"symbol": "TestPCG64", "type": "mannual_defined_class", "byte_location": 26240, "location": 9922}, {"symbol": "TestPCG64DXSM", "type": "mannual_defined_class", "byte_location": 26589, "location": 10066}, {"symbol": "RNG", "type": "mannual_defined_class", "byte_location": 2477, "location": 1083}, {"symbol": "TestDefaultRNG", "type": "mannual_defined_class", "byte_location": 26946, "location": 10216}, {"symbol": "TestPhilox", "type": "mannual_defined_class", "byte_location": 25560, "location": 9655}]}} {"input": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Upgrader for Python scripts according to an API change specification.\"\"\"\n\nimport ast\nimport collections\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport traceback\n\nimport pasta\n\n\n# Some regular expressions we will need for parsing\nFIND_OPEN = re.compile(r\"^\\s*(\\[).*$\")\nFIND_STRING_CHARS = re.compile(r\"['\\\"]\")\n\n\nINFO = \"INFO\"\nWARNING = \"WARNING\"\nERROR = \"ERROR\"\n\n\nImportRename = collections.namedtuple(\n \"ImportRename\", [\"new_name\", \"excluded_prefixes\"])\n\n\ndef full_name_node(name, ctx=ast.Load()):\n \"\"\"Make an Attribute or Name node for name.\n\n Translate a qualified name into nested Attribute nodes (and a Name node).\n\n Args:\n name: The name to translate to a node.\n ctx: What context this name is used in. Defaults to Load()\n\n Returns:\n A Name or Attribute node.\n \"\"\"\n names = name.split(\".\")\n names.reverse()\n node = ast.Name(id=names.pop(), ctx=ast.Load())\n while names:\n node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())\n\n # Change outermost ctx to the one given to us (inner ones should be Load).\n node.ctx = ctx\n return node\n\n\ndef get_arg_value(node, arg_name, arg_pos=None):\n \"\"\"Get the value of an argument from a ast.Call node.\n\n This function goes through the positional and keyword arguments to check\n whether a given argument was used, and if so, returns its value (the node\n representing its value).\n\n This cannot introspect *args or **args, but it safely handles *args in\n Python3.5+.\n\n Args:\n node: The ast.Call node to extract arg values from.\n arg_name: The name of the argument to extract.\n arg_pos: The position of the argument (in case it's passed as a positional\n argument).\n\n Returns:\n A tuple (arg_present, arg_value) containing a boolean indicating whether\n the argument is present, and its value in case it is.\n \"\"\"\n # Check keyword args\n if arg_name is not None:\n for kw in node.keywords:\n if kw.arg == arg_name:\n return (True, kw.value)\n\n # Check positional args\n if arg_pos is not None:\n idx = 0\n for arg in node.args:\n if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):\n continue # Can't parse Starred\n if idx == arg_pos:\n return (True, arg)\n idx += 1\n\n return (False, None)\n\n\ndef uses_star_args_in_call(node):\n \"\"\"Check if an ast.Call node uses arbitrary-length positional *args.\n\n This function works with the AST call node format of Python3.5+\n as well as the different AST format of earlier versions of Python.\n\n Args:\n node: The ast.Call node to check arg values for.\n\n Returns:\n True if the node uses starred variadic positional args or keyword args.\n False if it does not.\n \"\"\"\n if sys.version_info[:2] >= (3, 5):\n # Check for an *args usage in python 3.5+\n for arg in node.args:\n if isinstance(arg, ast.Starred):\n return True\n else:\n if node.starargs:\n return True\n return False\n\n\ndef uses_star_kwargs_in_call(node):\n \"\"\"Check if an ast.Call node uses arbitrary-length **kwargs.\n\n This function works with the AST call node format of Python3.5+\n as well as the different AST format of earlier versions of Python.\n\n Args:\n node: The ast.Call node to check arg values for.\n\n Returns:\n True if the node uses starred variadic positional args or keyword args.\n False if it does not.\n \"\"\"\n if sys.version_info[:2] >= (3, 5):\n # Check for a **kwarg usage in python 3.5+\n for keyword in node.keywords:\n if keyword.arg is None:\n return True\n else:\n if node.kwargs:\n return True\n return False\n\n\ndef uses_star_args_or_kwargs_in_call(node):\n \"\"\"Check if an ast.Call node uses arbitrary-length *args or **kwargs.\n\n This function works with the AST call node format of Python3.5+\n as well as the different AST format of earlier versions of Python.\n\n Args:\n node: The ast.Call node to check arg values for.\n\n Returns:\n True if the node uses starred variadic positional args or keyword args.\n False if it does not.\n \"\"\"\n return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node)\n\n\ndef excluded_from_module_rename(module, import_rename_spec):\n \"\"\"Check if this module import should not be renamed.\n\n Args:\n module: (string) module name.\n import_rename_spec: ImportRename instance.\n\n Returns:\n True if this import should not be renamed according to the\n import_rename_spec.\n \"\"\"\n for excluded_prefix in import_rename_spec.excluded_prefixes:\n if module.startswith(excluded_prefix):\n return True\n return False\n\n\nclass APIChangeSpec:\n \"\"\"This class defines the transformations that need to happen.\n\n This class must provide the following fields:\n\n * `function_keyword_renames`: maps function names to a map of old -> new\n argument names\n * `symbol_renames`: maps function names to new function names\n * `change_to_function`: a set of function names that have changed (for\n notifications)\n * `function_reorders`: maps functions whose argument order has changed to the\n list of arguments in the new order\n * `function_warnings`: maps full names of functions to warnings that will be\n printed out if the function is used. (e.g. tf.nn.convolution())\n * `function_transformers`: maps function names to custom handlers\n * `module_deprecations`: maps module names to warnings that will be printed\n if the module is still used after all other transformations have run\n * `import_renames`: maps import name (must be a short name without '.')\n to ImportRename instance.\n\n For an example, see `TFAPIChangeSpec`.\n \"\"\"\n\n def preprocess(self, root_node): # pylint: disable=unused-argument\n \"\"\"Preprocess a parse tree. Return a preprocessed node, logs and errors.\"\"\"\n return root_node, [], []\n\n def clear_preprocessing(self):\n \"\"\"Restore this APIChangeSpec to before it preprocessed a file.\n\n This is needed if preprocessing a file changed any rewriting rules.\n \"\"\"\n pass\n\n\nclass NoUpdateSpec(APIChangeSpec):\n \"\"\"A specification of an API change which doesn't change anything.\"\"\"\n\n def __init__(self):\n self.function_handle = {}\n self.function_reorders = {}\n self.function_keyword_renames = {}\n self.symbol_renames = {}\n self.function_warnings = {}\n self.change_to_function = {}\n self.module_deprecations = {}\n self.function_transformers = {}\n self.import_renames = {}\n\n\nclass _PastaEditVisitor(ast.NodeVisitor):\n \"\"\"AST Visitor that processes function calls.\n\n Updates function calls from old API version to new API version using a given\n change spec.\n \"\"\"\n\n def __init__(self, api_change_spec):\n self._api_change_spec = api_change_spec\n self._log = [] # Holds 4-tuples: severity, line, col, msg.\n self._stack = [] # Allow easy access to parents.\n\n # Overridden to maintain a stack of nodes to allow for parent access\n def visit(self, node):\n self._stack.append(node)\n super(_PastaEditVisitor, self).visit(node)\n self._stack.pop()\n\n @property\n def errors(self):\n return [log for log in self._log if log[0] == ERROR]\n\n @property\n def warnings(self):\n return [log for log in self._log if log[0] == WARNING]\n\n @property\n def warnings_and_errors(self):\n return [log for log in self._log if log[0] in (WARNING, ERROR)]\n\n @property\n def info(self):\n return [log for log in self._log if log[0] == INFO]\n\n @property\n def log(self):\n return self._log\n\n def add_log(self, severity, lineno, col, msg):\n self._log.append((severity, lineno, col, msg))\n print(\"%s line %d:%d: %s\" % (severity, lineno, col, msg))\n\n def add_logs(self, logs):\n \"\"\"Record a log and print it.\n\n The log should be a tuple `(severity, lineno, col_offset, msg)`, which will\n be printed and recorded. It is part of the log available in the `self.log`\n property.\n\n Args:\n logs: The logs to add. Must be a list of tuples\n `(severity, lineno, col_offset, msg)`.\n \"\"\"\n self._log.extend(logs)\n for log in logs:\n print(\"%s line %d:%d: %s\" % log)\n\n def _get_applicable_entries(self, transformer_field, full_name, name):\n \"\"\"Get all list entries indexed by name that apply to full_name or name.\"\"\"\n # Transformers are indexed to full name, name, or no name\n # as a performance optimization.\n function_transformers = getattr(self._api_change_spec,\n transformer_field, {})\n\n glob_name = \"*.\" + name if name else None\n transformers = []\n if full_name in function_transformers:\n transformers.append(function_transformers[full_name])\n if glob_name in function_transformers:\n transformers.append(function_transformers[glob_name])\n if \"*\" in function_transformers:\n transformers.append(function_transformers[\"*\"])\n return transformers\n\n def _get_applicable_dict(self, transformer_field, full_name, name):\n \"\"\"Get all dict entries indexed by name that apply to full_name or name.\"\"\"\n # Transformers are indexed to full name, name, or no name\n # as a performance optimization.\n function_transformers = getattr(self._api_change_spec,\n transformer_field, {})\n\n glob_name = \"*.\" + name if name else None\n transformers = function_transformers.get(\"*\", {}).copy()\n transformers.update(function_transformers.get(glob_name, {}))\n transformers.update(function_transformers.get(full_name, {}))\n return transformers\n\n def _get_full_name(self, node):\n \"\"\"Traverse an Attribute node to generate a full name, e.g., \"tf.foo.bar\".\n\n This is the inverse of `full_name_node`.\n\n Args:\n node: A Node of type Attribute.\n\n Returns:\n a '.'-delimited full-name or None if node was not Attribute or Name.\n i.e. `foo()+b).bar` returns None, while `a.b.c` would return \"a.b.c\".\n \"\"\"\n curr = node\n items = []\n while not isinstance(curr, ast.Name):\n if not isinstance(curr, ast.Attribute):\n return None\n items.append(curr.attr)\n curr = curr.value\n items.append(curr.id)\n return \".\".join(reversed(items))\n\n def _maybe_add_warning(self, node, full_name):\n \"\"\"Adds an error to be printed about full_name at node.\"\"\"\n function_warnings = self._api_change_spec.function_warnings\n if full_name in function_warnings:\n level, message = function_warnings[full_name]\n message = message.replace(\"\", full_name)\n self.add_log(level, node.lineno, node.col_offset,\n \"%s requires manual check. %s\" % (full_name, message))\n return True\n else:\n return False\n\n def _maybe_add_module_deprecation_warning(self, node, full_name, whole_name):\n \"\"\"Adds a warning if full_name is a deprecated module.\"\"\"\n warnings = self._api_change_spec.module_deprecations\n if full_name in warnings:\n level, message = warnings[full_name]\n message = message.replace(\"\", whole_name)\n self.add_log(level, node.lineno, node.col_offset,\n \"Using member %s in deprecated module %s. %s\" % (whole_name,\n full_name,\n message))\n return True\n else:\n return False\n\n def _maybe_add_call_warning(self, node, full_name, name):\n \"\"\"Print a warning when specific functions are called with selected args.\n\n The function _print_warning_for_function matches the full name of the called\n function, e.g., tf.foo.bar(). This function matches the function name that\n is called, as long as the function is an attribute. For example,\n `tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`.\n\n Args:\n node: ast.Call object\n full_name: The precomputed full name of the callable, if one exists, None\n otherwise.\n name: The precomputed name of the callable, if one exists, None otherwise.\n\n Returns:\n Whether an error was recorded.\n \"\"\"\n # Only look for *.-warnings here, the other will be handled by the Attribute\n # visitor. Also, do not warn for bare functions, only if the call func is\n # an attribute.\n warned = False\n if isinstance(node.func, ast.Attribute):\n warned = self._maybe_add_warning(node, \"*.\" + name)\n\n # All arg warnings are handled here, since only we have the args\n arg_warnings = self._get_applicable_dict(\"function_arg_warnings\",\n full_name, name)\n\n variadic_args = uses_star_args_or_kwargs_in_call(node)\n\n for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()):\n present, _ = get_arg_value(node, kwarg, arg) or variadic_args\n if present:\n warned = True\n warning_message = warning.replace(\"\", full_name or name)\n template = \"%s called with %s argument, requires manual check: %s\"\n if variadic_args:\n template = (\"%s called with *args or **kwargs that may include %s, \"\n \"requires manual check: %s\")\n self.add_log(level, node.lineno, node.col_offset,\n template % (full_name or name, kwarg, warning_message))\n\n return warned\n\n def _maybe_rename(self, parent, node, full_name):\n \"\"\"Replace node (Attribute or Name) with a node representing full_name.\"\"\"\n new_name = self._api_change_spec.symbol_renames.get(full_name, None)\n if new_name:\n self.add_log(INFO, node.lineno, node.col_offset,\n \"Renamed %r to %r\" % (full_name, new_name))\n new_node = full_name_node(new_name, node.ctx)\n ast.copy_location(new_node, node)\n pasta.ast_utils.replace_child(parent, node, new_node)\n return True\n else:\n return False\n\n def _maybe_change_to_function_call(self, parent, node, full_name):\n \"\"\"Wraps node (typically, an Attribute or Expr) in a Call.\"\"\"\n if full_name in self._api_change_spec.change_to_function:\n if not isinstance(parent, ast.Call):\n # ast.Call's constructor is really picky about how many arguments it\n # wants, and also, it changed between Py2 and Py3.\n new_node = ast.Call(node, [], [])\n pasta.ast_utils.replace_child(parent, node, new_node)\n ast.copy_location(new_node, node)\n self.add_log(INFO, node.lineno, node.col_offset,\n \"Changed %r to a function call\" % full_name)\n return True\n return False\n\n def _maybe_add_arg_names(self, node, full_name):\n \"\"\"Make args into keyword args if function called full_name requires it.\"\"\"\n function_reorders = self._api_change_spec.function_reorders\n\n if full_name in function_reorders:\n if uses_star_args_in_call(node):\n self.add_log(WARNING, node.lineno, node.col_offset,\n \"(Manual check required) upgrading %s may require \"\n \"re-ordering the call arguments, but it was passed \"\n \"variable-length positional *args. The upgrade \"\n \"script cannot handle these automatically.\" % full_name)\n\n reordered = function_reorders[full_name]\n new_args = []\n new_keywords = []\n idx = 0\n for arg in node.args:\n if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):\n continue # Can't move Starred to keywords\n keyword_arg = reordered[idx]\n if keyword_arg:\n new_keywords.append(ast.keyword(arg=keyword_arg, value=arg))\n else:\n new_args.append(arg)\n idx += 1\n\n if new_keywords:\n self.add_log(INFO, node.lineno, node.col_offset,\n \"Added keywords to args of function %r\" % full_name)\n node.args = new_args\n node.keywords = new_keywords + (node.keywords or [])\n return True\n return False\n\n def _maybe_modify_args(self, node, full_name, name):\n \"\"\"Rename keyword args if the function called full_name requires it.\"\"\"\n renamed_keywords = self._get_applicable_dict(\"function_keyword_renames\",\n full_name, name)\n\n if not renamed_keywords:\n return False\n\n if uses_star_kwargs_in_call(node):\n self.add_log(WARNING, node.lineno, node.col_offset,\n \"(Manual check required) upgrading %s may require \"\n \"renaming or removing call arguments, but it was passed \"\n \"variable-length *args or **kwargs. The upgrade \"\n \"script cannot handle these automatically.\" %\n (full_name or name))\n modified = False\n new_keywords = []\n for keyword in node.keywords:\n argkey = keyword.arg\n if argkey in renamed_keywords:\n modified = True\n if renamed_keywords[argkey] is None:\n lineno = getattr(keyword, \"lineno\", node.lineno)\n col_offset = getattr(keyword, \"col_offset\", node.col_offset)\n self.add_log(INFO, lineno, col_offset,\n \"Removed argument %s for function %s\" % (\n argkey, full_name or name))\n else:\n keyword.arg = renamed_keywords[argkey]\n lineno = getattr(keyword, \"lineno\", node.lineno)\n col_offset = getattr(keyword, \"col_offset\", node.col_offset)\n self.add_log(INFO, lineno, col_offset,\n \"Renamed keyword argument for %s from %s to %s\" % (\n full_name, argkey, renamed_keywords[argkey]))\n new_keywords.append(keyword)\n else:\n new_keywords.append(keyword)\n\n if modified:\n node.keywords = new_keywords\n return modified\n\n def visit_Call(self, node): # pylint: disable=invalid-name\n \"\"\"Handle visiting a call node in the AST.\n\n Args:\n node: Current Node\n \"\"\"\n assert self._stack[-1] is node\n\n # Get the name for this call, so we can index stuff with it.\n full_name = self._get_full_name(node.func)\n if full_name:\n name = full_name.split(\".\")[-1]\n elif isinstance(node.func, ast.Name):\n name = node.func.id\n elif isinstance(node.func, ast.Attribute):\n name = node.func.attr\n else:\n name = None\n\n # Call standard transformers for this node.\n # Make sure warnings come first, since args or names triggering warnings\n # may be removed by the other transformations.\n self._maybe_add_call_warning(node, full_name, name)\n # Make all args into kwargs\n self._maybe_add_arg_names(node, full_name)\n # Argument name changes or deletions\n self._maybe_modify_args(node, full_name, name)\n\n # Call transformers. These have the ability to modify the node, and if they\n # do, will return the new node they created (or the same node if they just\n # changed it). The are given the parent, but we will take care of\n # integrating their changes into the parent if they return a new node.\n #\n # These are matched on the old name, since renaming is performed by the\n # Attribute visitor, which happens later.\n transformers = self._get_applicable_entries(\"function_transformers\",\n full_name, name)\n\n parent = self._stack[-2]\n\n if transformers:\n if uses_star_args_or_kwargs_in_call(node):\n self.add_log(WARNING, node.lineno, node.col_offset,\n \"(Manual check required) upgrading %s may require \"\n \"modifying call arguments, but it was passed \"\n \"variable-length *args or **kwargs. The upgrade \"\n \"script cannot handle these automatically.\" %\n (full_name or name))\n\n for transformer in transformers:\n logs = []\n new_node = transformer(parent, node, full_name, name, logs)\n self.add_logs(logs)\n if new_node and new_node is not node:\n pasta.ast_utils.replace_child(parent, node, new_node)\n node = new_node\n self._stack[-1] = node\n\n self.generic_visit(node)\n\n def visit_Attribute(self, node): # pylint: disable=invalid-name\n \"\"\"Handle bare Attributes i.e. [tf.foo, tf.bar].\"\"\"\n assert self._stack[-1] is node\n\n full_name = self._get_full_name(node)\n if full_name:\n parent = self._stack[-2]\n\n # Make sure the warning comes first, otherwise the name may have changed\n self._maybe_add_warning(node, full_name)\n\n # Once we did a modification, node is invalid and not worth inspecting\n # further. Also, we only perform modifications for simple nodes, so\n # There'd be no point in descending further.\n if self._maybe_rename(parent, node, full_name):\n return\n if self._maybe_change_to_function_call(parent, node, full_name):\n return\n\n # The isinstance check is enough -- a bare Attribute is never root.\n i = 2\n while isinstance(self._stack[-i], ast.Attribute):\n i += 1\n whole_name = pasta.dump(self._stack[-(i-1)])\n\n self._maybe_add_module_deprecation_warning(node, full_name, whole_name)\n\n self.generic_visit(node)\n\n def visit_Import(self, node): # pylint: disable=invalid-name\n \"\"\"Handle visiting an import node in the AST.\n\n Args:\n node: Current Node\n \"\"\"\n new_aliases = []\n import_updated = False\n import_renames = getattr(self._api_change_spec, \"import_renames\", {})\n max_submodule_depth = getattr(self._api_change_spec, \"max_submodule_depth\",\n 1)\n inserts_after_imports = getattr(self._api_change_spec,\n \"inserts_after_imports\", {})\n\n # This loop processes imports in the format\n # import foo as f, bar as b\n for import_alias in node.names:\n all_import_components = import_alias.name.split(\".\")\n # Look for rename, starting with longest import levels.\n found_update = False\n for i in reversed(list(range(1, max_submodule_depth + 1))):\n import_component = all_import_components[0]\n for j in range(1, min(i, len(all_import_components))):\n import_component += \".\" + all_import_components[j]\n import_rename_spec = import_renames.get(import_component, None)\n\n if not import_rename_spec or excluded_from_module_rename(\n import_alias.name, import_rename_spec):\n continue\n\n new_name = (\n import_rename_spec.new_name +\n import_alias.name[len(import_component):])\n\n # If current import is\n # import foo\n # then new import should preserve imported name:\n # import new_foo as foo\n # This happens when module has just one component.\n new_asname = import_alias.asname\n if not new_asname and \".\" not in import_alias.name:\n new_asname = import_alias.name\n\n new_alias = ast.alias(name=new_name, asname=new_asname)\n new_aliases.append(new_alias)\n import_updated = True\n found_update = True\n\n # Insert any followup lines that should happen after this import.\n full_import = (import_alias.name, import_alias.asname)\n insert_offset = 1\n for line_to_insert in inserts_after_imports.get(full_import, []):\n assert self._stack[-1] is node\n parent = self._stack[-2]\n\n new_line_node = pasta.parse(line_to_insert)\n ast.copy_location(new_line_node, node)\n parent.body.insert(\n parent.body.index(node) + insert_offset, new_line_node)\n insert_offset += 1\n\n # Insert a newline after the import if necessary\n old_suffix = pasta.base.formatting.get(node, \"suffix\")\n if old_suffix is None:\n old_suffix = os.linesep\n if os.linesep not in old_suffix:\n pasta.base.formatting.set(node, \"suffix\", old_suffix + os.linesep)\n\n # Apply indentation to new node.\n pasta.base.formatting.set(new_line_node, \"prefix\",\n pasta.base.formatting.get(node, \"prefix\"))\n pasta.base.formatting.set(new_line_node, \"suffix\", os.linesep)\n self.add_log(\n INFO, node.lineno, node.col_offset,\n \"Adding `%s` after import of %s\" %\n (new_line_node, import_alias.name))\n # Find one match, break\n if found_update:\n break\n # No rename is found for all levels\n if not found_update:\n new_aliases.append(import_alias) # no change needed\n\n # Replace the node if at least one import needs to be updated.\n if import_updated:\n assert self._stack[-1] is node\n parent = self._stack[-2]\n\n new_node = ast.Import(new_aliases)\n ast.copy_location(new_node, node)\n pasta.ast_utils.replace_child(parent, node, new_node)\n self.add_log(\n INFO, node.lineno, node.col_offset,\n \"Changed import from %r to %r.\" %\n (pasta.dump(node), pasta.dump(new_node)))\n\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node): # pylint: disable=invalid-name\n \"\"\"Handle visiting an import-from node in the AST.\n\n Args:\n node: Current Node\n \"\"\"\n if not node.module:\n self.generic_visit(node)\n return\n\n from_import = node.module\n\n # Look for rename based on first component of from-import.\n # i.e. based on foo in foo.bar.\n from_import_first_component = from_import.split(\".\")[0]\n import_renames = getattr(self._api_change_spec, \"import_renames\", {})\n import_rename_spec = import_renames.get(from_import_first_component, None)\n if not import_rename_spec:\n self.generic_visit(node)\n return\n\n # Split module aliases into the ones that require import update\n # and those that don't. For e.g. if we want to rename \"a\" to \"b\"\n # unless we import \"a.c\" in the following:\n # from a import c, d\n # we want to update import for \"d\" but not for \"c\".\n updated_aliases = []\n same_aliases = []\n for import_alias in node.names:\n full_module_name = \"%s.%s\" % (from_import, import_alias.name)\n if excluded_from_module_rename(full_module_name, import_rename_spec):\n same_aliases.append(import_alias)\n else:\n updated_aliases.append(import_alias)\n\n if not updated_aliases:\n self.generic_visit(node)\n return\n\n assert self._stack[-1] is node\n parent = self._stack[-2]\n\n # Replace first component of from-import with new name.\n new_from_import = (\n import_rename_spec.new_name +\n from_import[len(from_import_first_component):])\n updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)\n ast.copy_location(updated_node, node)\n pasta.ast_utils.replace_child(parent, node, updated_node)\n\n # If some imports had to stay the same, add another import for them.\n additional_import_log = \"\"\n if same_aliases:\n same_node = ast.ImportFrom(from_import, same_aliases, node.level,\n col_offset=node.col_offset, lineno=node.lineno)\n ast.copy_location(same_node, node)\n parent.body.insert(parent.body.index(updated_node), same_node)\n # Apply indentation to new node.\n pasta.base.formatting.set(\n same_node, \"prefix\",\n pasta.base.formatting.get(updated_node, \"prefix\"))\n additional_import_log = \" and %r\" % pasta.dump(same_node)\n\n self.add_log(\n INFO, node.lineno, node.col_offset,\n \"Changed import from %r to %r%s.\" %\n (pasta.dump(node),\n pasta.dump(updated_node),\n additional_import_log))\n\n self.generic_visit(node)\n\n\nclass AnalysisResult:\n \"\"\"This class represents an analysis result and how it should be logged.\n\n This class must provide the following fields:\n\n * `log_level`: The log level to which this detection should be logged\n * `log_message`: The message that should be logged for this detection\n\n For an example, see `VersionedTFImport`.\n \"\"\"\n\n\nclass APIAnalysisSpec:\n \"\"\"This class defines how `AnalysisResult`s should be generated.\n\n It specifies how to map imports and symbols to `AnalysisResult`s.\n\n This class must provide the following fields:\n\n * `symbols_to_detect`: maps function names to `AnalysisResult`s\n * `imports_to_detect`: maps imports represented as (full module name, alias)\n tuples to `AnalysisResult`s\n notifications)\n\n For an example, see `TFAPIImportAnalysisSpec`.\n \"\"\"\n\n\nclass PastaAnalyzeVisitor(_PastaEditVisitor):\n \"\"\"AST Visitor that looks for specific API usage without editing anything.\n\n This is used before any rewriting is done to detect if any symbols are used\n that require changing imports or disabling rewriting altogether.\n \"\"\"\n\n def __init__(self, api_analysis_spec):\n super(PastaAnalyzeVisitor, self).__init__(NoUpdateSpec())\n self._api_analysis_spec = api_analysis_spec\n self._results = [] # Holds AnalysisResult objects\n\n @property\n def results(self):\n return self._results\n\n def add_result(self, analysis_result):\n self._results.append(analysis_result)\n\n def visit_Attribute(self, node): # pylint: disable=invalid-name\n \"\"\"Handle bare Attributes i.e. [tf.foo, tf.bar].\"\"\"\n full_name = self._get_full_name(node)\n if full_name:\n detection = self._api_analysis_spec.symbols_to_detect.get(full_name, None)\n if detection:\n self.add_result(detection)\n self.add_log(\n detection.log_level, node.lineno, node.col_offset,\n detection.log_message)\n\n self.generic_visit(node)\n\n def visit_Import(self, node): # pylint: disable=invalid-name\n \"\"\"Handle visiting an import node in the AST.\n\n Args:\n node: Current Node\n \"\"\"\n for import_alias in node.names:\n # Detect based on full import name and alias)\n full_import = (import_alias.name, import_alias.asname)\n detection = (self._api_analysis_spec\n .imports_to_detect.get(full_import, None))\n if detection:\n self.add_result(detection)\n self.add_log(\n detection.log_level, node.lineno, node.col_offset,\n detection.log_message)\n\n self.generic_visit(node)\n\n def visit_ImportFrom(self, node): # pylint: disable=invalid-name\n \"\"\"Handle visiting an import-from node in the AST.\n\n Args:\n node: Current Node\n \"\"\"\n if not node.module:\n self.generic_visit(node)\n return\n\n from_import = node.module\n\n for import_alias in node.names:\n # Detect based on full import name(to & as)\n full_module_name = \"%s.%s\" % (from_import, import_alias.name)\n full_import = (full_module_name, import_alias.asname)\n detection = (self._api_analysis_spec\n .imports_to_detect.get(full_import, None))\n if detection:\n self.add_result(detection)\n self.add_log(\n detection.log_level, node.lineno, node.col_offset,\n detection.log_message)\n\n self.generic_visit(node)\n\n\nclass ASTCodeUpgrader:\n \"\"\"Handles upgrading a set of Python files using a given API change spec.\"\"\"\n\n def __init__(self, api_change_spec):\n if not isinstance(api_change_spec, APIChangeSpec):\n raise TypeError(\"Must pass APIChangeSpec to ASTCodeUpgrader, got %s\" %\n type(api_change_spec))\n self._api_change_spec = api_change_spec\n\n def process_file(self,\n in_filename,\n out_filename,\n no_change_to_outfile_on_error=False):\n \"\"\"Process the given python file for incompatible changes.\n\n Args:\n in_filename: filename to parse\n out_filename: output file to write to\n no_change_to_outfile_on_error: not modify the output file on errors\n Returns:\n A tuple representing number of files processed, log of actions, errors\n \"\"\"\n\n # Write to a temporary file, just in case we are doing an implace modify.\n # pylint: disable=g-backslash-continuation\n with open(in_filename, \"r\") as in_file, \\\n tempfile.NamedTemporaryFile(\"w\", delete=False) as temp_file:\n ret = self.process_opened_file(in_filename, in_file, out_filename,\n temp_file)\n # pylint: enable=g-backslash-continuation\n\n if no_change_to_outfile_on_error and ret[0] == 0:\n os.remove(temp_file.name)\n else:\n shutil.move(temp_file.name, out_filename)\n return ret\n\n def format_log(self, log, in_filename):\n log_string = \"%d:%d: %s: %s\" % (log[1], log[2], log[0], log[3])\n if in_filename:\n return in_filename + \":\" + log_string\n else:\n return log_string\n\n def update_string_pasta(self, text, in_filename):\n \"\"\"Updates a file using pasta.\"\"\"\n try:\n t = pasta.parse(text)\n except (SyntaxError, ValueError, TypeError):\n log = [\"ERROR: Failed to parse.\\n\" + traceback.format_exc()]\n return 0, \"\", log, []\n\n t, preprocess_logs, preprocess_errors = self._api_change_spec.preprocess(t)\n\n visitor = _PastaEditVisitor(self._api_change_spec)\n visitor.visit(t)\n\n self._api_change_spec.clear_preprocessing()\n\n logs = [self.format_log(log, None) for log in (preprocess_logs +\n visitor.log)]\n errors = [self.format_log(error, in_filename)\n for error in (preprocess_errors +\n visitor.warnings_and_errors)]\n return 1, pasta.dump(t), logs, errors\n\n def _format_log(self, log, in_filename, out_filename):\n text = \"-\" * 80 + \"\\n\"\n text += \"Processing file %r\\n outputting to %r\\n\" % (in_filename,\n out_filename)\n text += \"-\" * 80 + \"\\n\\n\"\n text += \"\\n\".join(log) + \"\\n\"\n text += \"-\" * 80 + \"\\n\\n\"\n return text\n\n def process_opened_file(self, in_filename, in_file, out_filename, out_file):\n \"\"\"Process the given python file for incompatible changes.\n\n This function is split out to facilitate StringIO testing from\n tf_upgrade_test.py.\n\n Args:\n in_filename: filename to parse\n in_file: opened file (or StringIO)\n out_filename: output file to write to\n out_file: opened file (or StringIO)\n Returns:\n A tuple representing number of files processed, log of actions, errors\n \"\"\"\n lines = in_file.readlines()\n processed_file, new_file_content, log, process_errors = (\n self.update_string_pasta(\"\".join(lines), in_filename))\n\n if out_file and processed_file:\n out_file.write(new_file_content)\n\n return (processed_file,\n self._format_log(log, in_filename, out_filename),\n process_errors)\n\n def process_tree(self, root_directory, output_root_directory,\n copy_other_files):\n \"\"\"Processes upgrades on an entire tree of python files in place.\n\n Note that only Python files. If you have custom code in other languages,\n you will need to manually upgrade those.\n\n Args:\n root_directory: Directory to walk and process.\n output_root_directory: Directory to use as base.\n copy_other_files: Copy files that are not touched by this converter.\n\n Returns:\n A tuple of files processed, the report string for all files, and a dict\n mapping filenames to errors encountered in that file.\n \"\"\"\n\n if output_root_directory == root_directory:\n return self.process_tree_inplace(root_directory)\n\n # make sure output directory doesn't exist\n if output_root_directory and os.path.exists(output_root_directory):\n print(\"Output directory %r must not already exist.\" %\n (output_root_directory))\n sys.exit(1)\n\n # make sure output directory does not overlap with root_directory\n norm_root = os.path.split(os.path.normpath(root_directory))\n norm_output = os.path.split(os.path.normpath(output_root_directory))\n if norm_root == norm_output:\n print(\"Output directory %r same as input directory %r\" %\n (root_directory, output_root_directory))\n sys.exit(1)\n\n # Collect list of files to process (we do this to correctly handle if the\n # user puts the output directory in some sub directory of the input dir)\n files_to_process = []\n files_to_copy = []\n for dir_name, _, file_list in os.walk(root_directory):\n py_files = [f for f in file_list if f.endswith(\".py\")]\n copy_files = [f for f in file_list if not f.endswith(\".py\")]\n for filename in py_files:\n fullpath = os.path.join(dir_name, filename)\n fullpath_output = os.path.join(output_root_directory,\n os.path.relpath(fullpath,\n root_directory))\n files_to_process.append((fullpath, fullpath_output))\n if copy_other_files:\n for filename in copy_files:\n fullpath = os.path.join(dir_name, filename)\n fullpath_output = os.path.join(output_root_directory,\n os.path.relpath(\n fullpath, root_directory))\n files_to_copy.append((fullpath, fullpath_output))\n\n file_count = 0\n tree_errors = {}\n report = \"\"\n report += (\"=\" * 80) + \"\\n\"\n report += \"Input tree: %r\\n\" % root_directory\n report += (\"=\" * 80) + \"\\n\"\n\n for input_path, output_path in files_to_process:\n output_directory = os.path.dirname(output_path)\n if not os.path.isdir(output_directory):\n os.makedirs(output_directory)\n\n if os.path.islink(input_path):\n link_target = os.readlink(input_path)\n link_target_output = os.path.join(\n output_root_directory, os.path.relpath(link_target, root_directory))\n if (link_target, link_target_output) in files_to_process:\n # Create a link to the new location of the target file\n os.symlink(link_target_output, output_path)\n else:\n report += \"Copying symlink %s without modifying its target %s\" % (\n input_path, link_target)\n os.symlink(link_target, output_path)\n continue\n\n file_count += 1\n _, l_report, l_errors = self.process_file(input_path, output_path)\n tree_errors[input_path] = l_errors\n report += l_report\n\n for input_path, output_path in files_to_copy:\n output_directory = os.path.dirname(output_path)\n if not os.path.isdir(output_directory):\n os.makedirs(output_directory)\n shutil.copy(input_path, output_path)\n return file_count, report, tree_errors\n\n def process_tree_inplace(self, root_directory):\n \"\"\"Process a directory of python files in place.\"\"\"\n files_to_process = []\n for dir_name, _, file_list in os.walk(root_directory):\n py_files = [\n os.path.join(dir_name, f) for f in file_list if f.endswith(\".py\")\n ]\n files_to_process += py_files\n\n file_count = 0\n tree_errors = {}\n report = \"\"\n report += (\"=\" * 80) + \"\\n\"\n report += \"Input tree: %r\\n\" % root_directory\n report += (\"=\" * 80) + \"\\n\"\n\n for path in files_to_process:\n if os.path.islink(path):\n report += \"Skipping symlink %s.\\n\" % path\n continue\n file_count += 1\n _, l_report, l_errors = self.process_file(path, path)\n tree_errors[path] = l_errors\n report += l_report\n\n return file_count, report, tree_errors\n", "output": ["get_arg_value", "full_name_node", "uses_star_kwargs_in_call", "excluded_from_module_rename", "uses_star_args_or_kwargs_in_call", "uses_star_args_in_call", "ASTCodeUpgrader", "APIChangeSpec", "_PastaEditVisitor", "AnalysisResult", "PastaAnalyzeVisitor", "NoUpdateSpec", "APIAnalysisSpec"], "metadata": {"file_path": "tensorflow-master/tensorflow/tools/compatibility/ast_edits.py", "file_length": 11965, "symbol_dict": [{"symbol": "uses_star_kwargs_in_call", "type": "mannual_defined_function", "byte_location": 3622, "location": 1104}, {"symbol": "full_name_node", "type": "mannual_defined_function", "byte_location": 1172, "location": 343}, {"symbol": "get_arg_value", "type": "mannual_defined_function", "byte_location": 1791, "location": 539}, {"symbol": "excluded_from_module_rename", "type": "mannual_defined_function", "byte_location": 4778, "location": 1462}, {"symbol": "uses_star_args_in_call", "type": "mannual_defined_function", "byte_location": 2966, "location": 899}, {"symbol": "uses_star_args_or_kwargs_in_call", "type": "mannual_defined_function", "byte_location": 4270, "location": 1304}, {"symbol": "NoUpdateSpec", "type": "mannual_defined_class", "byte_location": 6628, "location": 1968}, {"symbol": "_PastaEditVisitor", "type": "mannual_defined_class", "byte_location": 7054, "location": 2099}, {"symbol": "ASTCodeUpgrader", "type": "mannual_defined_class", "byte_location": 31361, "location": 9437}, {"symbol": "APIChangeSpec", "type": "mannual_defined_class", "byte_location": 5231, "location": 1600}, {"symbol": "AnalysisResult", "type": "mannual_defined_class", "byte_location": 28047, "location": 8426}, {"symbol": "PastaAnalyzeVisitor", "type": "mannual_defined_class", "byte_location": 28853, "location": 8651}, {"symbol": "APIAnalysisSpec", "type": "mannual_defined_class", "byte_location": 28390, "location": 8513}]}} {"input": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=not-callable\n# pylint: disable=redefined-builtin\n\"\"\"Layers that can merge several inputs into one.\"\"\"\n\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import array_ops_stack\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\n\n\nclass _Merge(Layer):\n \"\"\"Generic merge layer for elementwise merge functions.\n\n Used to implement `Sum`, `Average`, etc.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Intializes a Merge layer.\n\n Args:\n **kwargs: standard layer keyword arguments.\n \"\"\"\n super(_Merge, self).__init__(**kwargs)\n self.supports_masking = True\n\n def _merge_function(self, inputs):\n raise NotImplementedError\n\n def _compute_elemwise_op_output_shape(self, shape1, shape2):\n \"\"\"Computes the shape of the resultant of an elementwise operation.\n\n Args:\n shape1: tuple or None. Shape of the first tensor\n shape2: tuple or None. Shape of the second tensor\n\n Returns:\n expected output shape when an element-wise operation is\n carried out on 2 tensors with shapes shape1 and shape2.\n tuple or None.\n\n Raises:\n ValueError: if shape1 and shape2 are not compatible for\n element-wise operations.\n \"\"\"\n if None in [shape1, shape2]:\n return None\n elif len(shape1) < len(shape2):\n return self._compute_elemwise_op_output_shape(shape2, shape1)\n elif not shape2:\n return shape1\n output_shape = list(shape1[:-len(shape2)])\n for i, j in zip(shape1[-len(shape2):], shape2):\n if i is None or j is None:\n output_shape.append(None)\n elif i == 1:\n output_shape.append(j)\n elif j == 1:\n output_shape.append(i)\n else:\n if i != j:\n raise ValueError(\n 'Operands could not be broadcast '\n 'together with shapes ' + str(shape1) + ' ' + str(shape2))\n output_shape.append(i)\n return tuple(output_shape)\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n # Used purely for shape validation.\n if not isinstance(input_shape[0], tuple):\n raise ValueError('A merge layer should be called on a list of inputs.')\n if len(input_shape) < 2:\n raise ValueError('A merge layer should be called '\n 'on a list of at least 2 inputs. '\n 'Got ' + str(len(input_shape)) + ' inputs.')\n batch_sizes = {s[0] for s in input_shape if s} - {None}\n if len(batch_sizes) > 1:\n raise ValueError(\n 'Can not merge tensors with different '\n 'batch sizes. Got tensors with shapes : ' + str(input_shape))\n if input_shape[0] is None:\n output_shape = None\n else:\n output_shape = input_shape[0][1:]\n for i in range(1, len(input_shape)):\n if input_shape[i] is None:\n shape = None\n else:\n shape = input_shape[i][1:]\n output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)\n # If the inputs have different ranks, we have to reshape them\n # to make them broadcastable.\n if None not in input_shape and len(set(map(len, input_shape))) == 1:\n self._reshape_required = False\n else:\n self._reshape_required = True\n\n def call(self, inputs):\n if not isinstance(inputs, (list, tuple)):\n raise ValueError('A merge layer should be called on a list of inputs.')\n if self._reshape_required:\n reshaped_inputs = []\n input_ndims = list(map(backend.ndim, inputs))\n if None not in input_ndims:\n # If ranks of all inputs are available,\n # we simply expand each of them at axis=1\n # until all of them have the same rank.\n max_ndim = max(input_ndims)\n for x in inputs:\n x_ndim = backend.ndim(x)\n for _ in range(max_ndim - x_ndim):\n x = array_ops.expand_dims(x, axis=1)\n reshaped_inputs.append(x)\n return self._merge_function(reshaped_inputs)\n else:\n # Transpose all inputs so that batch size is the last dimension.\n # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size)\n transposed = False\n for x in inputs:\n x_ndim = backend.ndim(x)\n if x_ndim is None:\n x_shape = array_ops.shape(x)\n batch_size = x_shape[0]\n new_shape = backend.concatenate(\n [x_shape[1:],\n array_ops.expand_dims(batch_size, axis=-1)])\n x_transposed = array_ops.reshape(\n x,\n array_ops_stack.stack(\n [batch_size, math_ops.reduce_prod(x_shape[1:])], axis=0))\n x_transposed = array_ops.transpose(x_transposed, perm=(1, 0))\n x_transposed = array_ops.reshape(x_transposed, new_shape)\n reshaped_inputs.append(x_transposed)\n transposed = True\n elif x_ndim > 1:\n dims = list(range(1, x_ndim)) + [0]\n reshaped_inputs.append(array_ops.transpose(x, perm=dims))\n transposed = True\n else:\n # We don't transpose inputs if they are 1D vectors or scalars.\n reshaped_inputs.append(x)\n y = self._merge_function(reshaped_inputs)\n y_ndim = backend.ndim(y)\n if transposed:\n # If inputs have been transposed, we have to transpose the output too.\n if y_ndim is None:\n y_shape = array_ops.shape(y)\n y_ndim = array_ops.shape(y_shape)[0]\n batch_size = y_shape[y_ndim - 1]\n new_shape = backend.concatenate([\n array_ops.expand_dims(batch_size, axis=-1), y_shape[:y_ndim - 1]\n ])\n y = array_ops.reshape(y, (-1, batch_size))\n y = array_ops.transpose(y, perm=(1, 0))\n y = array_ops.reshape(y, new_shape)\n elif y_ndim > 1:\n dims = [y_ndim - 1] + list(range(y_ndim - 1))\n y = array_ops.transpose(y, perm=dims)\n return y\n else:\n return self._merge_function(inputs)\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if input_shape[0] is None:\n output_shape = None\n else:\n output_shape = input_shape[0][1:]\n for i in range(1, len(input_shape)):\n if input_shape[i] is None:\n shape = None\n else:\n shape = input_shape[i][1:]\n output_shape = self._compute_elemwise_op_output_shape(output_shape, shape)\n batch_sizes = {s[0] for s in input_shape if s is not None} - {None}\n if len(batch_sizes) == 1:\n output_shape = (list(batch_sizes)[0],) + output_shape\n else:\n output_shape = (None,) + output_shape\n return output_shape\n\n def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, (tuple, list)):\n raise ValueError('`mask` should be a list.')\n if not isinstance(inputs, (tuple, list)):\n raise ValueError('`inputs` should be a list.')\n if len(mask) != len(inputs):\n raise ValueError('The lists `inputs` and `mask` '\n 'should have the same length.')\n if all(m is None for m in mask):\n return None\n masks = [array_ops.expand_dims(m, axis=0) for m in mask if m is not None]\n return backend.all(\n backend.concatenate(masks, axis=0), axis=0, keepdims=False)\n\n\nclass Add(_Merge):\n \"\"\"Layer that adds a list of inputs.\n\n It takes as input a list of tensors,\n all of the same shape, and returns\n a single tensor (also of the same shape).\n\n Examples:\n\n >>> input_shape = (2, 3, 4)\n >>> x1 = tf.random.normal(input_shape)\n >>> x2 = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Add()([x1, x2])\n >>> print(y.shape)\n (2, 3, 4)\n\n Used in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)\n >>> # equivalent to `added = tf.keras.layers.add([x1, x2])`\n >>> added = tf.keras.layers.Add()([x1, x2])\n >>> out = tf.keras.layers.Dense(4)(added)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n \"\"\"\n\n def _merge_function(self, inputs):\n output = inputs[0]\n for i in range(1, len(inputs)):\n output += inputs[i]\n return output\n\n\nclass Subtract(_Merge):\n \"\"\"Layer that subtracts two inputs.\n\n It takes as input a list of tensors of size 2,\n both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]),\n also of the same shape.\n\n Examples:\n\n ```python\n import keras\n\n input1 = keras.layers.Input(shape=(16,))\n x1 = keras.layers.Dense(8, activation='relu')(input1)\n input2 = keras.layers.Input(shape=(32,))\n x2 = keras.layers.Dense(8, activation='relu')(input2)\n # Equivalent to subtracted = keras.layers.subtract([x1, x2])\n subtracted = keras.layers.Subtract()([x1, x2])\n\n out = keras.layers.Dense(4)(subtracted)\n model = keras.models.Model(inputs=[input1, input2], outputs=out)\n ```\n \"\"\"\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n super(Subtract, self).build(input_shape)\n if len(input_shape) != 2:\n raise ValueError('A `Subtract` layer should be called '\n 'on exactly 2 inputs')\n\n def _merge_function(self, inputs):\n if len(inputs) != 2:\n raise ValueError('A `Subtract` layer should be called '\n 'on exactly 2 inputs')\n return inputs[0] - inputs[1]\n\n\nclass Multiply(_Merge):\n \"\"\"Layer that multiplies (element-wise) a list of inputs.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n >>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1),\n ... np.arange(5, 10).reshape(5, 1)])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> multiplied = tf.keras.layers.Multiply()([x1, x2])\n >>> multiplied.shape\n TensorShape([5, 8])\n \"\"\"\n\n def _merge_function(self, inputs):\n output = inputs[0]\n for i in range(1, len(inputs)):\n output = output * inputs[i]\n return output\n\n\nclass Average(_Merge):\n \"\"\"Layer that averages a list of inputs element-wise.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n Example:\n\n >>> x1 = np.ones((2, 2))\n >>> x2 = np.zeros((2, 2))\n >>> y = tf.keras.layers.Average()([x1, x2])\n >>> y.numpy().tolist()\n [[0.5, 0.5], [0.5, 0.5]]\n\n Usage in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)\n >>> avg = tf.keras.layers.Average()([x1, x2])\n >>> out = tf.keras.layers.Dense(4)(avg)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n Raises:\n ValueError: If there is a shape mismatch between the inputs and the shapes\n cannot be broadcasted to match.\n \"\"\"\n\n def _merge_function(self, inputs):\n output = inputs[0]\n for i in range(1, len(inputs)):\n output += inputs[i]\n return output / len(inputs)\n\n\nclass Maximum(_Merge):\n \"\"\"Layer that computes the maximum (element-wise) a list of inputs.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n >>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1),\n ... np.arange(5, 10).reshape(5, 1)])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> maxed = tf.keras.layers.Maximum()([x1, x2])\n >>> maxed.shape\n TensorShape([5, 8])\n \"\"\"\n\n def _merge_function(self, inputs):\n output = inputs[0]\n for i in range(1, len(inputs)):\n output = math_ops.maximum(output, inputs[i])\n return output\n\n\nclass Minimum(_Merge):\n \"\"\"Layer that computes the minimum (element-wise) a list of inputs.\n\n It takes as input a list of tensors, all of the same shape, and returns\n a single tensor (also of the same shape).\n\n >>> tf.keras.layers.Minimum()([np.arange(5).reshape(5, 1),\n ... np.arange(5, 10).reshape(5, 1)])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> minned = tf.keras.layers.Minimum()([x1, x2])\n >>> minned.shape\n TensorShape([5, 8])\n \"\"\"\n\n def _merge_function(self, inputs):\n output = inputs[0]\n for i in range(1, len(inputs)):\n output = math_ops.minimum(output, inputs[i])\n return output\n\n\nclass Concatenate(_Merge):\n \"\"\"Layer that concatenates a list of inputs.\n\n It takes as input a list of tensors, all of the same shape except\n for the concatenation axis, and returns a single tensor that is the\n concatenation of all inputs.\n\n >>> x = np.arange(20).reshape(2, 2, 5)\n >>> print(x)\n [[[ 0 1 2 3 4]\n [ 5 6 7 8 9]]\n [[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> y = np.arange(20, 30).reshape(2, 1, 5)\n >>> print(y)\n [[[20 21 22 23 24]]\n [[25 26 27 28 29]]]\n >>> tf.keras.layers.Concatenate(axis=1)([x, y])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> concatted = tf.keras.layers.Concatenate()([x1, x2])\n >>> concatted.shape\n TensorShape([5, 16])\n\n \"\"\"\n\n def __init__(self, axis=-1, **kwargs):\n \"\"\"Instantiates a Concatenate layer.\n\n >>> x = np.arange(20).reshape(2, 2, 5)\n >>> print(x)\n [[[ 0 1 2 3 4]\n [ 5 6 7 8 9]]\n [[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> y = np.arange(20, 30).reshape(2, 1, 5)\n >>> print(y)\n [[[20 21 22 23 24]]\n [[25 26 27 28 29]]]\n >>> tf.keras.layers.Concatenate(axis=1)([x, y])\n \n\n Args:\n axis: Axis along which to concatenate.\n **kwargs: standard layer keyword arguments.\n \"\"\"\n super(Concatenate, self).__init__(**kwargs)\n self.axis = axis\n self.supports_masking = True\n self._reshape_required = False\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n # Used purely for shape validation.\n if not isinstance(input_shape[0], tuple) or len(input_shape) < 1:\n raise ValueError('A `Concatenate` layer should be called '\n 'on a list of at least 1 input.')\n if all(shape is None for shape in input_shape):\n return\n reduced_inputs_shapes = [list(shape) for shape in input_shape]\n shape_set = set()\n for i in range(len(reduced_inputs_shapes)):\n del reduced_inputs_shapes[i][self.axis]\n shape_set.add(tuple(reduced_inputs_shapes[i]))\n\n if len(shape_set) != 1:\n err_msg = ('A `Concatenate` layer requires inputs with matching shapes '\n 'except for the concat axis. Got inputs shapes: %s' %\n input_shape)\n # Make sure all the shapes have same ranks.\n ranks = set(len(shape) for shape in shape_set)\n if len(ranks) != 1:\n raise ValueError(err_msg)\n # Get the only rank for the set.\n (rank,) = ranks\n for axis in range(rank):\n # Skip the Nones in the shape since they are dynamic, also the axis for\n # concat has been removed above.\n unique_dims = set(\n shape[axis] for shape in shape_set if shape[axis] is not None)\n if len(unique_dims) > 1:\n raise ValueError(err_msg)\n\n def _merge_function(self, inputs):\n return backend.concatenate(inputs, axis=self.axis)\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if ((not isinstance(input_shape, (tuple, list))) or\n (not isinstance(input_shape[0], (tuple, list)))):\n # The tf_utils.shape_type_conversion decorator turns tensorshapes\n # into tuples, so we need to verify that `input_shape` is a list/tuple,\n # *and* that the individual elements are themselves shape tuples.\n raise ValueError('A `Concatenate` layer should be called '\n 'on a list of inputs.')\n input_shapes = input_shape\n output_shape = list(input_shapes[0])\n for shape in input_shapes[1:]:\n if output_shape[self.axis] is None or shape[self.axis] is None:\n output_shape[self.axis] = None\n break\n output_shape[self.axis] += shape[self.axis]\n return tuple(output_shape)\n\n def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, (tuple, list)):\n raise ValueError('`mask` should be a list.')\n if not isinstance(inputs, (tuple, list)):\n raise ValueError('`inputs` should be a list.')\n if len(mask) != len(inputs):\n raise ValueError('The lists `inputs` and `mask` '\n 'should have the same length.')\n if all(m is None for m in mask):\n return None\n # Make a list of masks while making sure\n # the dimensionality of each mask\n # is the same as the corresponding input.\n masks = []\n for input_i, mask_i in zip(inputs, mask):\n if mask_i is None:\n # Input is unmasked. Append all 1s to masks,\n masks.append(array_ops.ones_like(input_i, dtype='bool'))\n elif backend.ndim(mask_i) < backend.ndim(input_i):\n # Mask is smaller than the input, expand it\n masks.append(array_ops.expand_dims(mask_i, axis=-1))\n else:\n masks.append(mask_i)\n concatenated = backend.concatenate(masks, axis=self.axis)\n return backend.all(concatenated, axis=-1, keepdims=False)\n\n def get_config(self):\n config = {\n 'axis': self.axis,\n }\n base_config = super(Concatenate, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Dot(_Merge):\n \"\"\"Layer that computes a dot product between samples in two tensors.\n\n E.g. if applied to a list of two tensors `a` and `b` of shape\n `(batch_size, n)`, the output will be a tensor of shape `(batch_size, 1)`\n where each entry `i` will be the dot product between\n `a[i]` and `b[i]`.\n\n >>> x = np.arange(10).reshape(1, 5, 2)\n >>> print(x)\n [[[0 1]\n [2 3]\n [4 5]\n [6 7]\n [8 9]]]\n >>> y = np.arange(10, 20).reshape(1, 2, 5)\n >>> print(y)\n [[[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> tf.keras.layers.Dot(axes=(1, 2))([x, y])\n \n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> dotted = tf.keras.layers.Dot(axes=1)([x1, x2])\n >>> dotted.shape\n TensorShape([5, 1])\n\n\n \"\"\"\n\n def __init__(self, axes, normalize=False, **kwargs):\n \"\"\"Initializes a layer that computes the element-wise dot product.\n\n >>> x = np.arange(10).reshape(1, 5, 2)\n >>> print(x)\n [[[0 1]\n [2 3]\n [4 5]\n [6 7]\n [8 9]]]\n >>> y = np.arange(10, 20).reshape(1, 2, 5)\n >>> print(y)\n [[[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> tf.keras.layers.Dot(axes=(1, 2))([x, y])\n \n\n Args:\n axes: Integer or tuple of integers,\n axis or axes along which to take the dot product. If a tuple, should\n be two integers corresponding to the desired axis from the first input\n and the desired axis from the second input, respectively. Note that the\n size of the two selected axes must match.\n normalize: Whether to L2-normalize samples along the\n dot product axis before taking the dot product.\n If set to True, then the output of the dot product\n is the cosine proximity between the two samples.\n **kwargs: Standard layer keyword arguments.\n \"\"\"\n super(Dot, self).__init__(**kwargs)\n if not isinstance(axes, int):\n if not isinstance(axes, (list, tuple)):\n raise TypeError('Invalid type for `axes` - '\n 'should be a list or an int.')\n if len(axes) != 2:\n raise ValueError('Invalid format for `axes` - '\n 'should contain two elements.')\n if not isinstance(axes[0], int) or not isinstance(axes[1], int):\n raise ValueError('Invalid format for `axes` - '\n 'list elements should be \"int\".')\n self.axes = axes\n self.normalize = normalize\n self.supports_masking = True\n self._reshape_required = False\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n # Used purely for shape validation.\n if not isinstance(input_shape[0], tuple) or len(input_shape) != 2:\n raise ValueError('A `Dot` layer should be called '\n 'on a list of 2 inputs.')\n shape1 = input_shape[0]\n shape2 = input_shape[1]\n if shape1 is None or shape2 is None:\n return\n if isinstance(self.axes, int):\n if self.axes < 0:\n axes = [self.axes % len(shape1), self.axes % len(shape2)]\n else:\n axes = [self.axes] * 2\n else:\n axes = self.axes\n if shape1[axes[0]] != shape2[axes[1]]:\n raise ValueError('Dimension incompatibility '\n '%s != %s. ' % (shape1[axes[0]], shape2[axes[1]]) +\n 'Layer shapes: %s, %s. ' % (shape1, shape2) +\n 'Chosen axes: %s, %s' % (axes[0], axes[1]))\n\n def _merge_function(self, inputs):\n base_layer_utils.no_ragged_support(inputs, self.name)\n if len(inputs) != 2:\n raise ValueError('A `Dot` layer should be called on exactly 2 inputs')\n x1 = inputs[0]\n x2 = inputs[1]\n if isinstance(self.axes, int):\n if self.axes < 0:\n axes = [self.axes % backend.ndim(x1), self.axes % backend.ndim(x2)]\n else:\n axes = [self.axes] * 2\n else:\n axes = []\n for i in range(len(self.axes)):\n if self.axes[i] < 0:\n axes.append(self.axes[i] % backend.ndim(inputs[i]))\n else:\n axes.append(self.axes[i])\n if self.normalize:\n x1 = nn.l2_normalize(x1, axis=axes[0])\n x2 = nn.l2_normalize(x2, axis=axes[1])\n output = backend.batch_dot(x1, x2, axes)\n return output\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if not isinstance(input_shape, (tuple, list)) or len(input_shape) != 2:\n raise ValueError('A `Dot` layer should be called '\n 'on a list of 2 inputs.')\n shape1 = list(input_shape[0])\n shape2 = list(input_shape[1])\n if isinstance(self.axes, int):\n if self.axes < 0:\n axes = [self.axes % len(shape1), self.axes % len(shape2)]\n else:\n axes = [self.axes] * 2\n else:\n axes = self.axes\n shape1.pop(axes[0])\n shape2.pop(axes[1])\n shape2.pop(0)\n output_shape = shape1 + shape2\n if len(output_shape) == 1:\n output_shape += [1]\n return tuple(output_shape)\n\n def compute_mask(self, inputs, mask=None):\n return None\n\n def get_config(self):\n config = {\n 'axes': self.axes,\n 'normalize': self.normalize,\n }\n base_config = super(Dot, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef add(inputs, **kwargs):\n \"\"\"Functional interface to the `tf.keras.layers.Add` layer.\n\n Args:\n inputs: A list of input tensors (at least 2) with the same shape.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor as the sum of the inputs. It has the same shape as the inputs.\n\n Examples:\n\n >>> input_shape = (2, 3, 4)\n >>> x1 = tf.random.normal(input_shape)\n >>> x2 = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.add([x1, x2])\n >>> print(y.shape)\n (2, 3, 4)\n\n Used in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)\n >>> added = tf.keras.layers.add([x1, x2])\n >>> out = tf.keras.layers.Dense(4)(added)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n \"\"\"\n return Add(**kwargs)(inputs)\n\n\ndef subtract(inputs, **kwargs):\n \"\"\"Functional interface to the `Subtract` layer.\n\n Args:\n inputs: A list of input tensors (exactly 2).\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the difference of the inputs.\n\n Examples:\n\n ```python\n import keras\n\n input1 = keras.layers.Input(shape=(16,))\n x1 = keras.layers.Dense(8, activation='relu')(input1)\n input2 = keras.layers.Input(shape=(32,))\n x2 = keras.layers.Dense(8, activation='relu')(input2)\n subtracted = keras.layers.subtract([x1, x2])\n\n out = keras.layers.Dense(4)(subtracted)\n model = keras.models.Model(inputs=[input1, input2], outputs=out)\n ```\n \"\"\"\n return Subtract(**kwargs)(inputs)\n\n\ndef multiply(inputs, **kwargs):\n \"\"\"Functional interface to the `Multiply` layer.\n\n Example:\n\n >>> x1 = np.arange(3.0)\n >>> x2 = np.arange(3.0)\n >>> tf.keras.layers.multiply([x1, x2])\n \n\n Usage in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)\n >>> out = tf.keras.layers.multiply([x1,x2]) #shape=(None, 8)\n >>> out = tf.keras.layers.Dense(4)(out)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n Args:\n inputs: A list of input tensors (at least 2).\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the element-wise product of the inputs.\n \"\"\"\n return Multiply(**kwargs)(inputs)\n\n\ndef average(inputs, **kwargs):\n \"\"\"Functional interface to the `tf.keras.layers.Average` layer.\n\n Example:\n\n >>> x1 = np.ones((2, 2))\n >>> x2 = np.zeros((2, 2))\n >>> y = tf.keras.layers.Average()([x1, x2])\n >>> y.numpy().tolist()\n [[0.5, 0.5], [0.5, 0.5]]\n\n Usage in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)\n >>> avg = tf.keras.layers.Average()([x1, x2])\n >>> out = tf.keras.layers.Dense(4)(avg)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n Args:\n inputs: A list of input tensors (at least 2).\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the average of the inputs.\n\n Raises:\n ValueError: If there is a shape mismatch between the inputs and the shapes\n cannot be broadcasted to match.\n \"\"\"\n return Average(**kwargs)(inputs)\n\n\ndef maximum(inputs, **kwargs):\n \"\"\"Functional interface to compute maximum (element-wise) list of `inputs`.\n\n This is equivalent to the `tf.keras.layers.Maximum` layer.\n\n For example:\n\n ```python\n input1 = tf.keras.layers.Input(shape=(16,))\n x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)\n input2 = tf.keras.layers.Input(shape=(32,))\n x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)\n max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8)\n out = tf.keras.layers.Dense(4)(max_inp)\n model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n ```\n\n Args:\n inputs: A list of input tensors (at least 2) of same shape.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor (of same shape as input tensor) with the element-wise\n maximum of the inputs.\n\n Raises:\n ValueError: If input tensors are of different shape.\n \"\"\"\n return Maximum(**kwargs)(inputs)\n\n\ndef minimum(inputs, **kwargs):\n \"\"\"Functional interface to the `Minimum` layer.\n\n Args:\n inputs: A list of input tensors (at least 2).\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the element-wise minimum of the inputs.\n \"\"\"\n return Minimum(**kwargs)(inputs)\n\n\ndef concatenate(inputs, axis=-1, **kwargs):\n \"\"\"Functional interface to the `Concatenate` layer.\n\n >>> x = np.arange(20).reshape(2, 2, 5)\n >>> print(x)\n [[[ 0 1 2 3 4]\n [ 5 6 7 8 9]]\n [[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> y = np.arange(20, 30).reshape(2, 1, 5)\n >>> print(y)\n [[[20 21 22 23 24]]\n [[25 26 27 28 29]]]\n >>> tf.keras.layers.concatenate([x, y],\n ... axis=1)\n \n\n Args:\n inputs: A list of input tensors (at least 2).\n axis: Concatenation axis.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the concatenation of the inputs alongside axis `axis`.\n \"\"\"\n return Concatenate(axis=axis, **kwargs)(inputs)\n\n\ndef dot(inputs, axes, normalize=False, **kwargs):\n \"\"\"Functional interface to the `Dot` layer.\n\n Args:\n inputs: A list of input tensors (at least 2).\n axes: Integer or tuple of integers,\n axis or axes along which to take the dot product.\n normalize: Whether to L2-normalize samples along the\n dot product axis before taking the dot product.\n If set to True, then the output of the dot product\n is the cosine proximity between the two samples.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the dot product of the samples from the inputs.\n \"\"\"\n return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)\n", "output": ["subtract", "dot", "add", "maximum", "minimum", "multiply", "concatenate", "average", "Multiply", "_Merge", "Subtract", "Add", "Dot", "Concatenate", "Minimum", "Average", "Maximum"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/keras/layers/merge.py", "file_length": 11506, "symbol_dict": [{"symbol": "average", "type": "mannual_defined_function", "byte_location": 27716, "location": 10009}, {"symbol": "maximum", "type": "mannual_defined_function", "byte_location": 28739, "location": 10406}, {"symbol": "concatenate", "type": "mannual_defined_function", "byte_location": 30012, "location": 10852}, {"symbol": "multiply", "type": "mannual_defined_function", "byte_location": 26759, "location": 9628}, {"symbol": "dot", "type": "mannual_defined_function", "byte_location": 30964, "location": 11320}, {"symbol": "minimum", "type": "mannual_defined_function", "byte_location": 29709, "location": 10760}, {"symbol": "add", "type": "mannual_defined_function", "byte_location": 25060, "location": 8992}, {"symbol": "subtract", "type": "mannual_defined_function", "byte_location": 26029, "location": 9369}, {"symbol": "Minimum", "type": "mannual_defined_class", "byte_location": 13170, "location": 4482}, {"symbol": "Multiply", "type": "mannual_defined_class", "byte_location": 10398, "location": 3386}, {"symbol": "Dot", "type": "mannual_defined_class", "byte_location": 19607, "location": 6955}, {"symbol": "Maximum", "type": "mannual_defined_class", "byte_location": 12319, "location": 4141}, {"symbol": "Subtract", "type": "mannual_defined_class", "byte_location": 9214, "location": 2974}, {"symbol": "Concatenate", "type": "mannual_defined_class", "byte_location": 14023, "location": 4823}, {"symbol": "Average", "type": "mannual_defined_class", "byte_location": 11241, "location": 3726}, {"symbol": "Add", "type": "mannual_defined_class", "byte_location": 8200, "location": 2573}, {"symbol": "_Merge", "type": "mannual_defined_class", "byte_location": 1201, "location": 331}]}} {"input": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Various learning rate decay functions.\"\"\"\n\nimport abc\nimport math\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_conversion\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import cond\nfrom tensorflow.python.ops import control_flow_case\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.util import nest\n\n\nclass LearningRateSchedule(object):\n \"\"\"The learning rate schedule base class.\n\n You can use a learning rate schedule to modulate how the learning rate\n of your optimizer changes over time.\n\n Several built-in learning rate schedules are available, such as\n `tf.keras.optimizers.schedules.ExponentialDecay` or\n `tf.keras.optimizers.schedules.PiecewiseConstantDecay`:\n\n ```python\n lr_schedule = keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate=1e-2,\n decay_steps=10000,\n decay_rate=0.9)\n optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)\n ```\n\n A `LearningRateSchedule` instance can be passed in as the `learning_rate`\n argument of any optimizer.\n\n To implement your own schedule object, you should implement the `__call__`\n method, which takes a `step` argument (scalar integer tensor, the\n current training step count).\n Like for any other Keras object, you can also optionally\n make your object serializable by implementing the `get_config`\n and `from_config` methods.\n\n Example:\n\n ```python\n class MyLRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n\n def __init__(self, initial_learning_rate):\n self.initial_learning_rate = initial_learning_rate\n\n def __call__(self, step):\n return self.initial_learning_rate / (step + 1)\n\n optimizer = tf.keras.optimizers.SGD(learning_rate=MyLRSchedule(0.1))\n ```\n \"\"\"\n\n @abc.abstractmethod\n def __call__(self, step):\n raise NotImplementedError(\"Learning rate schedule must override __call__\")\n\n @abc.abstractmethod\n def get_config(self):\n raise NotImplementedError(\"Learning rate schedule must override get_config\")\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Instantiates a `LearningRateSchedule` from its config.\n\n Args:\n config: Output of `get_config()`.\n\n Returns:\n A `LearningRateSchedule` instance.\n \"\"\"\n return cls(**config)\n\n\nclass ExponentialDecay(LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses an exponential decay schedule.\n\n When training a model, it is often useful to lower the learning rate as\n the training progresses. This schedule applies an exponential decay function\n to an optimizer step, given a provided initial learning rate.\n\n The schedule a 1-arg callable that produces a decayed learning\n rate when passed the current optimizer step. This can be useful for changing\n the learning rate value across different invocations of optimizer functions.\n It is computed as:\n\n ```python\n def decayed_learning_rate(step):\n return initial_learning_rate * decay_rate ^ (step / decay_steps)\n ```\n\n If the argument `staircase` is `True`, then `step / decay_steps` is\n an integer division and the decayed learning rate follows a\n staircase function.\n\n You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`\n as the learning rate.\n Example: When fitting a Keras model, decay every 100000 steps with a base\n of 0.96:\n\n ```python\n initial_learning_rate = 0.1\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate,\n decay_steps=100000,\n decay_rate=0.96,\n staircase=True)\n\n model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(data, labels, epochs=5)\n ```\n\n The learning rate schedule is also serializable and deserializable using\n `tf.keras.optimizers.schedules.serialize` and\n `tf.keras.optimizers.schedules.deserialize`.\n\n Returns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar `Tensor` of the same\n type as `initial_learning_rate`.\n \"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n decay_steps,\n decay_rate,\n staircase=False,\n name=None):\n \"\"\"Applies exponential decay to the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a\n Python number. The initial learning rate.\n decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\n Must be positive. See the decay computation above.\n decay_rate: A scalar `float32` or `float64` `Tensor` or a\n Python number. The decay rate.\n staircase: Boolean. If `True` decay the learning rate at discrete\n intervals\n name: String. Optional name of the operation. Defaults to\n 'ExponentialDecay'.\n \"\"\"\n super(ExponentialDecay, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.decay_rate = decay_rate\n self.staircase = staircase\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope_v2(self.name or \"ExponentialDecay\") as name:\n initial_learning_rate = (\n tensor_conversion.convert_to_tensor_v2_with_dispatch(\n self.initial_learning_rate, name=\"initial_learning_rate\"\n )\n )\n dtype = initial_learning_rate.dtype\n decay_steps = math_ops.cast(self.decay_steps, dtype)\n decay_rate = math_ops.cast(self.decay_rate, dtype)\n\n global_step_recomp = math_ops.cast(step, dtype)\n p = global_step_recomp / decay_steps\n if self.staircase:\n p = math_ops.floor(p)\n return math_ops.multiply(\n initial_learning_rate, math_ops.pow(decay_rate, p), name=name)\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"decay_steps\": self.decay_steps,\n \"decay_rate\": self.decay_rate,\n \"staircase\": self.staircase,\n \"name\": self.name\n }\n\n\nclass PiecewiseConstantDecay(LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses a piecewise constant decay schedule.\n\n The function returns a 1-arg callable to compute the piecewise constant\n when passed the current optimizer step. This can be useful for changing the\n learning rate value across different invocations of optimizer functions.\n\n Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5\n for the next 10000 steps, and 0.1 for any additional steps.\n\n ```python\n step = tf.Variable(0, trainable=False)\n boundaries = [100000, 110000]\n values = [1.0, 0.5, 0.1]\n learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(\n boundaries, values)\n\n # Later, whenever we perform an optimization step, we pass in the step.\n learning_rate = learning_rate_fn(step)\n ```\n\n You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`\n as the learning rate. The learning rate schedule is also serializable and\n deserializable using `tf.keras.optimizers.schedules.serialize` and\n `tf.keras.optimizers.schedules.deserialize`.\n\n Returns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar `Tensor` of the same\n type as the boundary tensors.\n\n The output of the 1-arg function that takes the `step`\n is `values[0]` when `step <= boundaries[0]`,\n `values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ...,\n and values[-1] when `step > boundaries[-1]`.\n \"\"\"\n\n def __init__(\n self,\n boundaries,\n values,\n name=None):\n \"\"\"Piecewise constant from boundaries and interval values.\n\n Args:\n boundaries: A list of `Tensor`s or `int`s or `float`s with strictly\n increasing entries, and with all elements having the same type as the\n optimizer step.\n values: A list of `Tensor`s or `float`s or `int`s that specifies the\n values for the intervals defined by `boundaries`. It should have one\n more element than `boundaries`, and all elements should have the same\n type.\n name: A string. Optional name of the operation. Defaults to\n 'PiecewiseConstant'.\n\n Raises:\n ValueError: if the number of elements in the lists do not match.\n \"\"\"\n super(PiecewiseConstantDecay, self).__init__()\n\n if len(boundaries) != len(values) - 1:\n raise ValueError(\n \"The length of boundaries should be 1 less than the length of values\")\n\n self.boundaries = boundaries\n self.values = values\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope_v2(self.name or \"PiecewiseConstant\"):\n boundaries = nest.map_structure(\n tensor_conversion.convert_to_tensor_v2_with_dispatch,\n nest.flatten(self.boundaries),\n )\n values = nest.map_structure(\n tensor_conversion.convert_to_tensor_v2_with_dispatch,\n nest.flatten(self.values),\n )\n x_recomp = tensor_conversion.convert_to_tensor_v2_with_dispatch(step)\n for i, b in enumerate(boundaries):\n if b.dtype.base_dtype != x_recomp.dtype.base_dtype:\n # We cast the boundaries to have the same type as the step\n b = math_ops.cast(b, x_recomp.dtype.base_dtype)\n boundaries[i] = b\n pred_fn_pairs = []\n pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))\n pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))\n for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):\n # Need to bind v here; can do this with lambda v=v: ...\n pred = (x_recomp > low) & (x_recomp <= high)\n pred_fn_pairs.append((pred, lambda v=v: v))\n\n # The default isn't needed here because our conditions are mutually\n # exclusive and exhaustive, but tf.case requires it.\n default = lambda: values[0]\n return control_flow_case.case(pred_fn_pairs, default, exclusive=True)\n\n def get_config(self):\n return {\n \"boundaries\": self.boundaries,\n \"values\": self.values,\n \"name\": self.name\n }\n\n\nclass PolynomialDecay(LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses a polynomial decay schedule.\n\n It is commonly observed that a monotonically decreasing learning rate, whose\n degree of change is carefully chosen, results in a better performing model.\n This schedule applies a polynomial decay function to an optimizer step,\n given a provided `initial_learning_rate`, to reach an `end_learning_rate`\n in the given `decay_steps`.\n\n It requires a `step` value to compute the decayed learning rate. You\n can just pass a TensorFlow variable that you increment at each training\n step.\n\n The schedule is a 1-arg callable that produces a decayed learning rate\n when passed the current optimizer step. This can be useful for changing the\n learning rate value across different invocations of optimizer functions.\n It is computed as:\n\n ```python\n def decayed_learning_rate(step):\n step = min(step, decay_steps)\n return ((initial_learning_rate - end_learning_rate) *\n (1 - step / decay_steps) ^ (power)\n ) + end_learning_rate\n ```\n\n If `cycle` is True then a multiple of `decay_steps` is used, the first one\n that is bigger than `step`.\n\n ```python\n def decayed_learning_rate(step):\n decay_steps = decay_steps * ceil(step / decay_steps)\n return ((initial_learning_rate - end_learning_rate) *\n (1 - step / decay_steps) ^ (power)\n ) + end_learning_rate\n ```\n\n You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`\n as the learning rate.\n Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using\n sqrt (i.e. power=0.5):\n\n ```python\n ...\n starter_learning_rate = 0.1\n end_learning_rate = 0.01\n decay_steps = 10000\n learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(\n starter_learning_rate,\n decay_steps,\n end_learning_rate,\n power=0.5)\n\n model.compile(optimizer=tf.keras.optimizers.SGD(\n learning_rate=learning_rate_fn),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(data, labels, epochs=5)\n ```\n\n The learning rate schedule is also serializable and deserializable using\n `tf.keras.optimizers.schedules.serialize` and\n `tf.keras.optimizers.schedules.deserialize`.\n\n Returns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar `Tensor` of the same\n type as `initial_learning_rate`.\n \"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n decay_steps,\n end_learning_rate=0.0001,\n power=1.0,\n cycle=False,\n name=None):\n \"\"\"Applies a polynomial decay to the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a\n Python number. The initial learning rate.\n decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\n Must be positive. See the decay computation above.\n end_learning_rate: A scalar `float32` or `float64` `Tensor` or a\n Python number. The minimal end learning rate.\n power: A scalar `float32` or `float64` `Tensor` or a\n Python number. The power of the polynomial. Defaults to linear, 1.0.\n cycle: A boolean, whether or not it should cycle beyond decay_steps.\n name: String. Optional name of the operation. Defaults to\n 'PolynomialDecay'.\n \"\"\"\n super(PolynomialDecay, self).__init__()\n\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.end_learning_rate = end_learning_rate\n self.power = power\n self.cycle = cycle\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope_v2(self.name or \"PolynomialDecay\") as name:\n initial_learning_rate = (\n tensor_conversion.convert_to_tensor_v2_with_dispatch(\n self.initial_learning_rate, name=\"initial_learning_rate\"\n )\n )\n dtype = initial_learning_rate.dtype\n end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)\n power = math_ops.cast(self.power, dtype)\n\n global_step_recomp = math_ops.cast(step, dtype)\n decay_steps_recomp = math_ops.cast(self.decay_steps, dtype)\n if self.cycle:\n # Find the first multiple of decay_steps that is bigger than\n # global_step. If global_step is zero set the multiplier to 1\n multiplier = array_ops.where_v2(\n math_ops.equal(global_step_recomp, 0), 1.0,\n math_ops.ceil(global_step_recomp / self.decay_steps))\n decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)\n else:\n # Make sure that the global_step used is not bigger than decay_steps.\n global_step_recomp = math_ops.minimum(global_step_recomp,\n decay_steps_recomp)\n\n p = math_ops.divide(global_step_recomp, decay_steps_recomp)\n return math_ops.add(\n math_ops.multiply(initial_learning_rate - end_learning_rate,\n math_ops.pow(1 - p, power)),\n end_learning_rate,\n name=name)\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"decay_steps\": self.decay_steps,\n \"end_learning_rate\": self.end_learning_rate,\n \"power\": self.power,\n \"cycle\": self.cycle,\n \"name\": self.name\n }\n\n\nclass InverseTimeDecay(LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses an inverse time decay schedule.\n\n When training a model, it is often useful to lower the learning rate as\n the training progresses. This schedule applies the inverse decay function\n to an optimizer step, given a provided initial learning rate.\n It requires a `step` value to compute the decayed learning rate. You can\n just pass a TensorFlow variable that you increment at each training step.\n\n The schedule a 1-arg callable that produces a decayed learning\n rate when passed the current optimizer step. This can be useful for changing\n the learning rate value across different invocations of optimizer functions.\n It is computed as:\n\n ```python\n def decayed_learning_rate(step):\n return initial_learning_rate / (1 + decay_rate * step / decay_step)\n ```\n\n or, if `staircase` is `True`, as:\n\n ```python\n def decayed_learning_rate(step):\n return initial_learning_rate / (1 + decay_rate * floor(step / decay_step))\n ```\n\n You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`\n as the learning rate.\n Example: Fit a Keras model when decaying 1/t with a rate of 0.5:\n\n ```python\n ...\n initial_learning_rate = 0.1\n decay_steps = 1.0\n decay_rate = 0.5\n learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay(\n initial_learning_rate, decay_steps, decay_rate)\n\n model.compile(optimizer=tf.keras.optimizers.SGD(\n learning_rate=learning_rate_fn),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(data, labels, epochs=5)\n ```\n\n Returns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar `Tensor` of the same\n type as `initial_learning_rate`.\n \"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n decay_steps,\n decay_rate,\n staircase=False,\n name=None):\n \"\"\"Applies inverse time decay to the initial learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a\n Python number. The initial learning rate.\n decay_steps: How often to apply decay.\n decay_rate: A Python number. The decay rate.\n staircase: Whether to apply decay in a discrete staircase, as opposed to\n continuous, fashion.\n name: String. Optional name of the operation. Defaults to\n 'InverseTimeDecay'.\n \"\"\"\n super(InverseTimeDecay, self).__init__()\n\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.decay_rate = decay_rate\n self.staircase = staircase\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope_v2(self.name or \"InverseTimeDecay\") as name:\n initial_learning_rate = (\n tensor_conversion.convert_to_tensor_v2_with_dispatch(\n self.initial_learning_rate, name=\"initial_learning_rate\"\n )\n )\n dtype = initial_learning_rate.dtype\n decay_steps = math_ops.cast(self.decay_steps, dtype)\n decay_rate = math_ops.cast(self.decay_rate, dtype)\n\n global_step_recomp = math_ops.cast(step, dtype)\n p = global_step_recomp / decay_steps\n if self.staircase:\n p = math_ops.floor(p)\n const = math_ops.cast(constant_op.constant(1), dtype)\n denom = math_ops.add(const, math_ops.multiply(decay_rate, p))\n return math_ops.divide(initial_learning_rate, denom, name=name)\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"decay_steps\": self.decay_steps,\n \"decay_rate\": self.decay_rate,\n \"staircase\": self.staircase,\n \"name\": self.name\n }\n\n\nclass CosineDecay(LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses a cosine decay schedule.\n\n See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),\n SGDR: Stochastic Gradient Descent with Warm Restarts.\n\n When training a model, it is often useful to lower the learning rate as\n the training progresses. This schedule applies a cosine decay function\n to an optimizer step, given a provided initial learning rate.\n It requires a `step` value to compute the decayed learning rate. You can\n just pass a TensorFlow variable that you increment at each training step.\n\n The schedule a 1-arg callable that produces a decayed learning\n rate when passed the current optimizer step. This can be useful for changing\n the learning rate value across different invocations of optimizer functions.\n It is computed as:\n\n ```python\n def decayed_learning_rate(step):\n step = min(step, decay_steps)\n cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps))\n decayed = (1 - alpha) * cosine_decay + alpha\n return initial_learning_rate * decayed\n ```\n\n Example usage:\n ```python\n decay_steps = 1000\n lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(\n initial_learning_rate, decay_steps)\n ```\n\n You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`\n as the learning rate. The learning rate schedule is also serializable and\n deserializable using `tf.keras.optimizers.schedules.serialize` and\n `tf.keras.optimizers.schedules.deserialize`.\n\n Returns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar `Tensor` of the same\n type as `initial_learning_rate`.\n \"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n decay_steps,\n alpha=0.0,\n name=None):\n \"\"\"Applies cosine decay to the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` Tensor or a\n Python number. The initial learning rate.\n decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\n Number of steps to decay over.\n alpha: A scalar `float32` or `float64` Tensor or a Python number.\n Minimum learning rate value as a fraction of initial_learning_rate.\n name: String. Optional name of the operation. Defaults to 'CosineDecay'.\n \"\"\"\n super(CosineDecay, self).__init__()\n\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.alpha = alpha\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope_v2(self.name or \"CosineDecay\"):\n initial_learning_rate = (\n tensor_conversion.convert_to_tensor_v2_with_dispatch(\n self.initial_learning_rate, name=\"initial_learning_rate\"\n )\n )\n dtype = initial_learning_rate.dtype\n decay_steps = math_ops.cast(self.decay_steps, dtype)\n\n global_step_recomp = math_ops.cast(step, dtype)\n global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)\n completed_fraction = global_step_recomp / decay_steps\n cosine_decayed = 0.5 * (1.0 + math_ops.cos(\n constant_op.constant(math.pi) * completed_fraction))\n\n decayed = (1 - self.alpha) * cosine_decayed + self.alpha\n return math_ops.multiply(initial_learning_rate, decayed)\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"decay_steps\": self.decay_steps,\n \"alpha\": self.alpha,\n \"name\": self.name\n }\n\n\nclass CosineDecayRestarts(LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses a cosine decay schedule with restarts.\n\n See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),\n SGDR: Stochastic Gradient Descent with Warm Restarts.\n\n When training a model, it is often useful to lower the learning rate as\n the training progresses. This schedule applies a cosine decay function with\n restarts to an optimizer step, given a provided initial learning rate.\n It requires a `step` value to compute the decayed learning rate. You can\n just pass a TensorFlow variable that you increment at each training step.\n\n The schedule a 1-arg callable that produces a decayed learning\n rate when passed the current optimizer step. This can be useful for changing\n the learning rate value across different invocations of optimizer functions.\n\n The learning rate multiplier first decays\n from 1 to `alpha` for `first_decay_steps` steps. Then, a warm\n restart is performed. Each new warm restart runs for `t_mul` times more\n steps and with `m_mul` times smaller initial learning rate.\n\n Example usage:\n ```python\n first_decay_steps = 1000\n lr_decayed_fn = (\n tf.keras.optimizers.schedules.CosineDecayRestarts(\n initial_learning_rate,\n first_decay_steps))\n ```\n\n You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`\n as the learning rate. The learning rate schedule is also serializable and\n deserializable using `tf.keras.optimizers.schedules.serialize` and\n `tf.keras.optimizers.schedules.deserialize`.\n\n Returns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar `Tensor` of the same\n type as `initial_learning_rate`.\n \"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n first_decay_steps,\n t_mul=2.0,\n m_mul=1.0,\n alpha=0.0,\n name=None):\n \"\"\"Applies cosine decay with restarts to the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python\n number. The initial learning rate.\n first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python\n number. Number of steps to decay over.\n t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.\n Used to derive the number of iterations in the i-th period\n m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.\n Used to derive the initial learning rate of the i-th period:\n alpha: A scalar `float32` or `float64` Tensor or a Python number.\n Minimum learning rate value as a fraction of the initial_learning_rate.\n name: String. Optional name of the operation. Defaults to 'SGDRDecay'.\n \"\"\"\n super(CosineDecayRestarts, self).__init__()\n\n self.initial_learning_rate = initial_learning_rate\n self.first_decay_steps = first_decay_steps\n self._t_mul = t_mul\n self._m_mul = m_mul\n self.alpha = alpha\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope_v2(self.name or \"SGDRDecay\") as name:\n initial_learning_rate = (\n tensor_conversion.convert_to_tensor_v2_with_dispatch(\n self.initial_learning_rate, name=\"initial_learning_rate\"\n )\n )\n dtype = initial_learning_rate.dtype\n first_decay_steps = math_ops.cast(self.first_decay_steps, dtype)\n alpha = math_ops.cast(self.alpha, dtype)\n t_mul = math_ops.cast(self._t_mul, dtype)\n m_mul = math_ops.cast(self._m_mul, dtype)\n\n global_step_recomp = math_ops.cast(step, dtype)\n completed_fraction = global_step_recomp / first_decay_steps\n\n def compute_step(completed_fraction, geometric=False):\n \"\"\"Helper for `cond` operation.\"\"\"\n if geometric:\n i_restart = math_ops.floor(\n math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /\n math_ops.log(t_mul))\n\n sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)\n completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart\n\n else:\n i_restart = math_ops.floor(completed_fraction)\n completed_fraction -= i_restart\n\n return i_restart, completed_fraction\n\n i_restart, completed_fraction = cond.cond(\n math_ops.equal(t_mul, 1.0),\n lambda: compute_step(completed_fraction, geometric=False),\n lambda: compute_step(completed_fraction, geometric=True))\n\n m_fac = m_mul**i_restart\n cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(\n constant_op.constant(math.pi) * completed_fraction))\n decayed = (1 - alpha) * cosine_decayed + alpha\n\n return math_ops.multiply(initial_learning_rate, decayed, name=name)\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"first_decay_steps\": self.first_decay_steps,\n \"t_mul\": self._t_mul,\n \"m_mul\": self._m_mul,\n \"alpha\": self.alpha,\n \"name\": self.name\n }\n\n\n# Note: this code is still used by V1 APIs.\nclass LinearCosineDecay(LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses a linear cosine decay schedule.\n\n See [Bello et al., ICML2017] Neural Optimizer Search with RL.\n https://arxiv.org/abs/1709.07417\n\n For the idea of warm starts here controlled by `num_periods`,\n see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent\n with Warm Restarts. https://arxiv.org/abs/1608.03983\n\n Note that linear cosine decay is more aggressive than cosine decay and\n larger initial learning rates can typically be used.\n\n When training a model, it is often recommended to lower the learning rate as\n the training progresses. This schedule applies a linear cosine decay\n function to an optimizer step, given a provided initial learning rate.\n It requires a `step` value to compute the decayed learning rate. You can\n just pass a TensorFlow variable that you increment at each training step.\n\n The schedule a 1-arg callable that produces a decayed learning\n rate when passed the current optimizer step. This can be useful for changing\n the learning rate value across different invocations of optimizer functions.\n It is computed as:\n\n ```python\n def decayed_learning_rate(step):\n step = min(step, decay_steps)\n linear_decay = (decay_steps - step) / decay_steps\n cosine_decay = 0.5 * (\n 1 + cos(pi * 2 * num_periods * step / decay_steps))\n decayed = (alpha + linear_decay) * cosine_decay + beta\n return initial_learning_rate * decayed\n ```\n\n Example usage:\n ```python\n decay_steps = 1000\n lr_decayed_fn = (\n tf.keras.experimental.LinearCosineDecay(\n initial_learning_rate, decay_steps))\n ```\n\n You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`\n as the learning rate. The learning rate schedule is also serializable and\n deserializable using `tf.keras.optimizers.schedules.serialize` and\n `tf.keras.optimizers.schedules.deserialize`.\n\n Returns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar `Tensor` of the same\n type as `initial_learning_rate`.\n \"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n decay_steps,\n num_periods=0.5,\n alpha=0.0,\n beta=0.001,\n name=None):\n \"\"\"Applies linear cosine decay to the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python\n number. The initial learning rate.\n decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\n Number of steps to decay over.\n num_periods: Number of periods in the cosine part of the decay.\n See computation above.\n alpha: See computation above.\n beta: See computation above.\n name: String. Optional name of the operation. Defaults to\n 'LinearCosineDecay'.\n \"\"\"\n super(LinearCosineDecay, self).__init__()\n\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.num_periods = num_periods\n self.alpha = alpha\n self.beta = beta\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope_v2(self.name or \"LinearCosineDecay\") as name:\n initial_learning_rate = (\n tensor_conversion.convert_to_tensor_v2_with_dispatch(\n self.initial_learning_rate, name=\"initial_learning_rate\"\n )\n )\n dtype = initial_learning_rate.dtype\n decay_steps = math_ops.cast(self.decay_steps, dtype)\n num_periods = math_ops.cast(self.num_periods, dtype)\n alpha = math_ops.cast(self.alpha, dtype)\n beta = math_ops.cast(self.beta, dtype)\n\n global_step_recomp = math_ops.cast(step, dtype)\n global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)\n linear_decayed = (decay_steps - global_step_recomp) / decay_steps\n completed_fraction = global_step_recomp / decay_steps\n fraction = 2.0 * num_periods * completed_fraction\n cosine_decayed = 0.5 * (\n 1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))\n\n linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta\n return math_ops.multiply(initial_learning_rate, linear_cosine_decayed,\n name=name)\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"decay_steps\": self.decay_steps,\n \"num_periods\": self.num_periods,\n \"alpha\": self.alpha,\n \"beta\": self.beta,\n \"name\": self.name\n }\n\n\n# Note: this code is still used by V1 APIs.\nclass NoisyLinearCosineDecay(LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses a noisy linear cosine decay schedule.\n\n See [Bello et al., ICML2017] Neural Optimizer Search with RL.\n https://arxiv.org/abs/1709.07417\n\n For the idea of warm starts here controlled by `num_periods`,\n see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent\n with Warm Restarts. https://arxiv.org/abs/1608.03983\n\n Note that linear cosine decay is more aggressive than cosine decay and\n larger initial learning rates can typically be used.\n\n When training a model, it is often recommended to lower the learning rate as\n the training progresses. This schedule applies a noisy linear cosine decay\n function to an optimizer step, given a provided initial learning rate.\n It requires a `step` value to compute the decayed learning rate. You can\n just pass a TensorFlow variable that you increment at each training step.\n\n The schedule a 1-arg callable that produces a decayed learning\n rate when passed the current optimizer step. This can be useful for changing\n the learning rate value across different invocations of optimizer functions.\n It is computed as:\n\n ```python\n def decayed_learning_rate(step):\n step = min(step, decay_steps)\n linear_decay = (decay_steps - step) / decay_steps)\n cosine_decay = 0.5 * (\n 1 + cos(pi * 2 * num_periods * step / decay_steps))\n decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta\n return initial_learning_rate * decayed\n ```\n where eps_t is 0-centered gaussian noise with variance\n initial_variance / (1 + global_step) ** variance_decay\n\n Example usage:\n ```python\n decay_steps = 1000\n lr_decayed_fn = (\n tf.keras.experimental.NoisyLinearCosineDecay(\n initial_learning_rate, decay_steps))\n ```\n\n You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`\n as the learning rate. The learning rate schedule is also serializable and\n deserializable using `tf.keras.optimizers.schedules.serialize` and\n `tf.keras.optimizers.schedules.deserialize`.\n\n Returns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar `Tensor` of the same\n type as `initial_learning_rate`.\n \"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n decay_steps,\n initial_variance=1.0,\n variance_decay=0.55,\n num_periods=0.5,\n alpha=0.0,\n beta=0.001,\n name=None):\n \"\"\"Applies noisy linear cosine decay to the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python\n number. The initial learning rate.\n decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\n Number of steps to decay over.\n initial_variance: initial variance for the noise. See computation above.\n variance_decay: decay for the noise's variance. See computation above.\n num_periods: Number of periods in the cosine part of the decay.\n See computation above.\n alpha: See computation above.\n beta: See computation above.\n name: String. Optional name of the operation. Defaults to\n 'NoisyLinearCosineDecay'.\n \"\"\"\n super(NoisyLinearCosineDecay, self).__init__()\n\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.initial_variance = initial_variance\n self.variance_decay = variance_decay\n self.num_periods = num_periods\n self.alpha = alpha\n self.beta = beta\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope_v2(self.name or \"NoisyLinearCosineDecay\") as name:\n initial_learning_rate = (\n tensor_conversion.convert_to_tensor_v2_with_dispatch(\n self.initial_learning_rate, name=\"initial_learning_rate\"\n )\n )\n dtype = initial_learning_rate.dtype\n decay_steps = math_ops.cast(self.decay_steps, dtype)\n initial_variance = math_ops.cast(self.initial_variance, dtype)\n variance_decay = math_ops.cast(self.variance_decay, dtype)\n num_periods = math_ops.cast(self.num_periods, dtype)\n alpha = math_ops.cast(self.alpha, dtype)\n beta = math_ops.cast(self.beta, dtype)\n\n global_step_recomp = math_ops.cast(step, dtype)\n global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)\n linear_decayed = (decay_steps - global_step_recomp) / decay_steps\n variance = initial_variance / (\n math_ops.pow(1.0 + global_step_recomp, variance_decay))\n std = math_ops.sqrt(variance)\n noisy_linear_decayed = (\n linear_decayed + random_ops.random_normal(\n linear_decayed.shape, stddev=std))\n\n completed_fraction = global_step_recomp / decay_steps\n fraction = 2.0 * num_periods * completed_fraction\n cosine_decayed = 0.5 * (\n 1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))\n noisy_linear_cosine_decayed = (\n (alpha + noisy_linear_decayed) * cosine_decayed + beta)\n\n return math_ops.multiply(\n initial_learning_rate, noisy_linear_cosine_decayed, name=name)\n\n def get_config(self):\n return {\n \"initial_learning_rate\": self.initial_learning_rate,\n \"decay_steps\": self.decay_steps,\n \"initial_variance\": self.initial_variance,\n \"variance_decay\": self.variance_decay,\n \"num_periods\": self.num_periods,\n \"alpha\": self.alpha,\n \"beta\": self.beta,\n \"name\": self.name\n }\n\n\ndef serialize(learning_rate_schedule):\n return generic_utils.serialize_keras_object(learning_rate_schedule)\n\n\ndef deserialize(config, custom_objects=None):\n return generic_utils.deserialize_keras_object(\n config,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"decay\")\n", "output": ["serialize", "deserialize", "LinearCosineDecay", "LearningRateSchedule", "ExponentialDecay", "PolynomialDecay", "CosineDecay", "NoisyLinearCosineDecay", "CosineDecayRestarts", "InverseTimeDecay", "PiecewiseConstantDecay"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py", "file_length": 12264, "symbol_dict": [{"symbol": "serialize", "type": "mannual_defined_function", "byte_location": 38955, "location": 12165}, {"symbol": "deserialize", "type": "mannual_defined_function", "byte_location": 39066, "location": 12200}, {"symbol": "CosineDecay", "type": "mannual_defined_class", "byte_location": 20242, "location": 6209}, {"symbol": "InverseTimeDecay", "type": "mannual_defined_class", "byte_location": 16483, "location": 5047}, {"symbol": "LinearCosineDecay", "type": "mannual_defined_class", "byte_location": 28859, "location": 8954}, {"symbol": "LearningRateSchedule", "type": "mannual_defined_class", "byte_location": 1234, "location": 318}, {"symbol": "PiecewiseConstantDecay", "type": "mannual_defined_class", "byte_location": 6933, "location": 2095}, {"symbol": "ExponentialDecay", "type": "mannual_defined_class", "byte_location": 3152, "location": 922}, {"symbol": "CosineDecayRestarts", "type": "mannual_defined_class", "byte_location": 23794, "location": 7328}, {"symbol": "NoisyLinearCosineDecay", "type": "mannual_defined_class", "byte_location": 33439, "location": 10404}, {"symbol": "PolynomialDecay", "type": "mannual_defined_class", "byte_location": 11041, "location": 3368}]}} {"input": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras initializers for TF 2.\"\"\"\n# pylint: disable=g-classes-have-attributes\n\nimport math\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_linalg_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import stateless_random_ops\n\n_PARTITION_SHAPE = 'partition_shape'\n_PARTITION_OFFSET = 'partition_offset'\n\n\nclass Initializer(object):\n \"\"\"Initializer base class: all Keras initializers inherit from this class.\n\n Initializers should implement a `__call__` method with the following\n signature:\n\n ```python\n def __call__(self, shape, dtype=None, **kwargs):\n # returns a tensor of shape `shape` and dtype `dtype`\n # containing values drawn from a distribution of your choice.\n ```\n\n Optionally, you an also implement the method `get_config` and the class\n method `from_config` in order to support serialization -- just like with\n any Keras object.\n\n Here's a simple example: a random normal initializer.\n\n ```python\n import tensorflow as tf\n\n class ExampleRandomNormal(tf.keras.initializers.Initializer):\n\n def __init__(self, mean, stddev):\n self.mean = mean\n self.stddev = stddev\n\n def __call__(self, shape, dtype=None, **kwargs):\n return tf.random.normal(\n shape, mean=self.mean, stddev=self.stddev, dtype=dtype)\n\n def get_config(self): # To support serialization\n return {\"mean\": self.mean, \"stddev\": self.stddev}\n ```\n\n Note that we don't have to implement `from_config` in the example above since\n the constructor arguments of the class the keys in the config returned by\n `get_config` are the same. In this case, the default `from_config`\n works fine.\n \"\"\"\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError\n\n def get_config(self):\n \"\"\"Returns the configuration of the initializer as a JSON-serializable dict.\n\n Returns:\n A JSON-serializable Python dict.\n \"\"\"\n return {}\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Instantiates an initializer from a configuration dictionary.\n\n Example:\n\n ```python\n initializer = RandomUniform(-1, 1)\n config = initializer.get_config()\n initializer = RandomUniform.from_config(config)\n ```\n\n Args:\n config: A Python dictionary, the output of `get_config`.\n\n Returns:\n A `tf.keras.initializers.Initializer` instance.\n \"\"\"\n config.pop('dtype', None)\n return cls(**config)\n\n\nclass Zeros(Initializer):\n \"\"\"Initializer that generates tensors initialized to 0.\n\n Also available via the shortcut function `tf.keras.initializers.zeros`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.Zeros()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.Zeros()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n \"\"\"\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _get_dtype(dtype)\n if not dtype.is_numpy_compatible or dtype == dtypes.string:\n raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return array_ops.zeros(shape, dtype)\n\n\nclass Ones(Initializer):\n \"\"\"Initializer that generates tensors initialized to 1.\n\n Also available via the shortcut function `tf.keras.initializers.ones`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.Ones()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.Ones()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n \"\"\"\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _get_dtype(dtype)\n if not dtype.is_numpy_compatible or dtype == dtypes.string:\n raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return array_ops.ones(shape, dtype)\n\n\nclass Constant(Initializer):\n \"\"\"Initializer that generates tensors with constant values.\n\n Also available via the shortcut function `tf.keras.initializers.constant`.\n\n Only scalar values are allowed.\n The constant value provided must be convertible to the dtype requested\n when calling the initializer.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.Constant(3.)\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.Constant(3.)\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n value: A Python scalar.\n \"\"\"\n\n def __init__(self, value=0):\n self.value = value\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized to `self.value`.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. If not specified,\n `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n \"\"\"\n del kwargs\n return constant_op.constant(\n self.value, dtype=_get_dtype(dtype), shape=shape)\n\n def get_config(self):\n return {'value': self.value}\n\n\nclass RandomUniform(Initializer):\n \"\"\"Initializer that generates tensors with a uniform distribution.\n\n Also available via the shortcut function\n `tf.keras.initializers.random_uniform`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n minval: A python scalar or a scalar tensor. Lower bound of the range of\n random values to generate (inclusive).\n maxval: A python scalar or a scalar tensor. Upper bound of the range of\n random values to generate (exclusive).\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n \"\"\"\n\n def __init__(self, minval=-0.05, maxval=0.05, seed=None):\n self.minval = minval\n self.maxval = maxval\n self.seed = seed\n self._random_generator = _RandomGenerator(seed)\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point and integer\n types are supported. If not specified,\n `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _get_dtype(dtype)\n if not dtype.is_floating and not dtype.is_integer:\n raise ValueError('Expected float or integer dtype, got %s.' % dtype)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return self._random_generator.random_uniform(shape, self.minval,\n self.maxval, dtype)\n\n def get_config(self):\n return {\n 'minval': self.minval,\n 'maxval': self.maxval,\n 'seed': self.seed\n }\n\n\nclass RandomNormal(Initializer):\n \"\"\"Initializer that generates tensors with a normal distribution.\n\n Also available via the shortcut function\n `tf.keras.initializers.random_normal`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values to\n generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the random\n values to generate.\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n \"\"\"\n\n def __init__(self, mean=0.0, stddev=0.05, seed=None):\n self.mean = mean\n self.stddev = stddev\n self.seed = seed\n self._random_generator = _RandomGenerator(seed)\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized to random normal values.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used, which\n default to `float32` unless you configured it otherwise (via\n `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return self._random_generator.random_normal(shape, self.mean, self.stddev,\n dtype)\n\n def get_config(self):\n return {\n 'mean': self.mean,\n 'stddev': self.stddev,\n 'seed': self.seed\n }\n\n\nclass TruncatedNormal(Initializer):\n \"\"\"Initializer that generates a truncated normal distribution.\n\n Also available via the shortcut function\n `tf.keras.initializers.truncated_normal`.\n\n The values generated are similar to values from a\n `tf.keras.initializers.RandomNormal` initializer except that values more\n than two standard deviations from the mean are\n discarded and re-drawn.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values\n to generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the\n random values to generate before truncation.\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n \"\"\"\n\n def __init__(self, mean=0.0, stddev=0.05, seed=None):\n self.mean = mean\n self.stddev = stddev\n self.seed = seed\n self._random_generator = _RandomGenerator(seed)\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized to random normal values (truncated).\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used, which\n default to `float32` unless you configured it otherwise (via\n `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return self._random_generator.truncated_normal(shape, self.mean,\n self.stddev, dtype)\n\n def get_config(self):\n return {\n 'mean': self.mean,\n 'stddev': self.stddev,\n 'seed': self.seed\n }\n\n\nclass VarianceScaling(Initializer):\n \"\"\"Initializer capable of adapting its scale to the shape of weights tensors.\n\n Also available via the shortcut function\n `tf.keras.initializers.variance_scaling`.\n\n With `distribution=\"truncated_normal\" or \"untruncated_normal\"`, samples are\n drawn from a truncated/untruncated normal distribution with a mean of zero and\n a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`,\n where `n` is:\n\n - number of input units in the weight tensor, if `mode=\"fan_in\"`\n - number of output units, if `mode=\"fan_out\"`\n - average of the numbers of input and output units, if `mode=\"fan_avg\"`\n\n With `distribution=\"uniform\"`, samples are drawn from a uniform distribution\n within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.VarianceScaling(\n ... scale=0.1, mode='fan_in', distribution='uniform')\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.VarianceScaling(\n ... scale=0.1, mode='fan_in', distribution='uniform')\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n scale: Scaling factor (positive float).\n mode: One of \"fan_in\", \"fan_out\", \"fan_avg\".\n distribution: Random distribution to use. One of \"truncated_normal\",\n \"untruncated_normal\" and \"uniform\".\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n \"\"\"\n\n def __init__(self,\n scale=1.0,\n mode='fan_in',\n distribution='truncated_normal',\n seed=None):\n if scale <= 0.:\n raise ValueError('`scale` must be positive float.')\n if mode not in {'fan_in', 'fan_out', 'fan_avg'}:\n raise ValueError('Invalid `mode` argument:', mode)\n distribution = distribution.lower()\n # Compatibility with keras-team/keras.\n if distribution == 'normal':\n distribution = 'truncated_normal'\n if distribution not in {'uniform', 'truncated_normal',\n 'untruncated_normal'}:\n raise ValueError('Invalid `distribution` argument:', distribution)\n self.scale = scale\n self.mode = mode\n self.distribution = distribution\n self.seed = seed\n self._random_generator = _RandomGenerator(seed)\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used, which\n default to `float32` unless you configured it otherwise (via\n `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n scale = self.scale\n fan_in, fan_out = _compute_fans(shape)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n if self.mode == 'fan_in':\n scale /= max(1., fan_in)\n elif self.mode == 'fan_out':\n scale /= max(1., fan_out)\n else:\n scale /= max(1., (fan_in + fan_out) / 2.)\n if self.distribution == 'truncated_normal':\n # constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)\n stddev = math.sqrt(scale) / .87962566103423978\n return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)\n elif self.distribution == 'untruncated_normal':\n stddev = math.sqrt(scale)\n return self._random_generator.random_normal(shape, 0.0, stddev, dtype)\n else:\n limit = math.sqrt(3.0 * scale)\n return self._random_generator.random_uniform(shape, -limit, limit, dtype)\n\n def get_config(self):\n return {\n 'scale': self.scale,\n 'mode': self.mode,\n 'distribution': self.distribution,\n 'seed': self.seed\n }\n\n\nclass Orthogonal(Initializer):\n \"\"\"Initializer that generates an orthogonal matrix.\n\n Also available via the shortcut function `tf.keras.initializers.orthogonal`.\n\n If the shape of the tensor to initialize is two-dimensional, it is initialized\n with an orthogonal matrix obtained from the QR decomposition of a matrix of\n random numbers drawn from a normal distribution.\n If the matrix has fewer rows than columns then the output will have orthogonal\n rows. Otherwise, the output will have orthogonal columns.\n\n If the shape of the tensor to initialize is more than two-dimensional,\n a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`\n is initialized, where `n` is the length of the shape vector.\n The matrix is subsequently reshaped to give a tensor of the desired shape.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.Orthogonal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.Orthogonal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n gain: multiplicative factor to apply to the orthogonal matrix\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)\n ([pdf](https://arxiv.org/pdf/1312.6120.pdf))\n \"\"\"\n\n def __init__(self, gain=1.0, seed=None):\n self.gain = gain\n self.seed = seed\n self._random_generator = _RandomGenerator(seed)\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized to an orthogonal matrix.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n # Check the shape\n if len(shape) < 2:\n raise ValueError('The tensor to initialize must be '\n 'at least two-dimensional')\n # Flatten the input shape with the last dimension remaining\n # its original shape so it works for conv2d\n num_rows = 1\n for dim in shape[:-1]:\n num_rows *= dim\n num_cols = shape[-1]\n flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))\n\n # Generate a random matrix\n a = self._random_generator.random_normal(flat_shape, dtype=dtype)\n # Compute the qr factorization\n q, r = gen_linalg_ops.qr(a, full_matrices=False)\n # Make Q uniform\n d = array_ops.tensor_diag_part(r)\n q *= math_ops.sign(d)\n if num_rows < num_cols:\n q = array_ops.matrix_transpose(q)\n return self.gain * array_ops.reshape(q, shape)\n\n def get_config(self):\n return {'gain': self.gain, 'seed': self.seed}\n\n\nclass Identity(Initializer):\n \"\"\"Initializer that generates the identity matrix.\n\n Also available via the shortcut function `tf.keras.initializers.identity`.\n\n Only usable for generating 2D matrices.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.Identity()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.Identity()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n gain: Multiplicative factor to apply to the identity matrix.\n \"\"\"\n\n def __init__(self, gain=1.0):\n self.gain = gain\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized to a 2D identity matrix.\n\n Args:\n shape: Shape of the tensor. It should have exactly rank 2.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n if len(shape) != 2:\n raise ValueError(\n 'Identity matrix initializer can only be used for 2D matrices.')\n initializer = linalg_ops.eye(*shape, dtype=dtype)\n return self.gain * initializer\n\n def get_config(self):\n return {'gain': self.gain}\n\n\nclass GlorotUniform(VarianceScaling):\n \"\"\"The Glorot uniform initializer, also called Xavier uniform initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.glorot_uniform`.\n\n Draws samples from a uniform distribution within `[-limit, limit]`, where\n `limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units\n in the weight tensor and `fan_out` is the number of output units).\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.GlorotUniform()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.GlorotUniform()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)\n ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))\n \"\"\"\n\n def __init__(self, seed=None):\n super(GlorotUniform, self).__init__(\n scale=1.0,\n mode='fan_avg',\n distribution='uniform',\n seed=seed)\n\n def get_config(self):\n return {'seed': self.seed}\n\n\nclass GlorotNormal(VarianceScaling):\n \"\"\"The Glorot normal initializer, also called Xavier normal initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.glorot_normal`.\n\n Draws samples from a truncated normal distribution centered on 0 with `stddev\n = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in\n the weight tensor and `fan_out` is the number of output units in the weight\n tensor.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.GlorotNormal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.GlorotNormal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)\n ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))\n \"\"\"\n\n def __init__(self, seed=None):\n super(GlorotNormal, self).__init__(\n scale=1.0,\n mode='fan_avg',\n distribution='truncated_normal',\n seed=seed)\n\n def get_config(self):\n return {'seed': self.seed}\n\n\nclass LecunNormal(VarianceScaling):\n \"\"\"Lecun normal initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.lecun_normal`.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Draws samples from a truncated normal distribution centered on 0 with `stddev\n = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight\n tensor.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.LecunNormal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.LecunNormal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. Used to seed the random generator.\n\n References:\n - Self-Normalizing Neural Networks,\n [Klambauer et al., 2017]\n (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)\n ([pdf]\n (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))\n - Efficient Backprop,\n [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n \"\"\"\n\n def __init__(self, seed=None):\n super(LecunNormal, self).__init__(\n scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)\n\n def get_config(self):\n return {'seed': self.seed}\n\n\nclass LecunUniform(VarianceScaling):\n \"\"\"Lecun uniform initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.lecun_uniform`.\n\n Draws samples from a uniform distribution within `[-limit, limit]`,\n where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the\n weight tensor).\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.LecunUniform()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.LecunUniform()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n - Self-Normalizing Neural Networks,\n [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long\n ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))\n - Efficient Backprop,\n [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n \"\"\"\n\n def __init__(self, seed=None):\n super(LecunUniform, self).__init__(\n scale=1., mode='fan_in', distribution='uniform', seed=seed)\n\n def get_config(self):\n return {'seed': self.seed}\n\n\nclass HeNormal(VarianceScaling):\n \"\"\"He normal initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.he_normal`.\n\n It draws samples from a truncated normal distribution centered on 0 with\n `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the\n weight tensor.\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.HeNormal()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.HeNormal()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))\n \"\"\"\n\n def __init__(self, seed=None):\n super(HeNormal, self).__init__(\n scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)\n\n def get_config(self):\n return {'seed': self.seed}\n\n\nclass HeUniform(VarianceScaling):\n \"\"\"He uniform variance scaling initializer.\n\n Also available via the shortcut function\n `tf.keras.initializers.he_uniform`.\n\n Draws samples from a uniform distribution within `[-limit, limit]`, where\n `limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the\n weight tensor).\n\n Examples:\n\n >>> # Standalone usage:\n >>> initializer = tf.keras.initializers.HeUniform()\n >>> values = initializer(shape=(2, 2))\n\n >>> # Usage in a Keras layer:\n >>> initializer = tf.keras.initializers.HeUniform()\n >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\n Args:\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\n References:\n [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))\n \"\"\"\n\n def __init__(self, seed=None):\n super(HeUniform, self).__init__(\n scale=2., mode='fan_in', distribution='uniform', seed=seed)\n\n def get_config(self):\n return {'seed': self.seed}\n\n\ndef _get_dtype(dtype):\n if dtype is None:\n dtype = backend.floatx()\n return dtypes.as_dtype(dtype)\n\n\ndef _assert_float_dtype(dtype):\n \"\"\"Validate and return floating point type based on `dtype`.\n\n `dtype` must be a floating point type.\n\n Args:\n dtype: The data type to validate.\n\n Returns:\n Validated type.\n\n Raises:\n ValueError: if `dtype` is not a floating point type.\n \"\"\"\n dtype = dtypes.as_dtype(dtype)\n if not dtype.is_floating:\n raise ValueError('Expected floating point type, got %s.' % dtype)\n return dtype\n\n\nclass _RandomGenerator(object):\n \"\"\"Random generator that selects appropriate random ops.\"\"\"\n\n def __init__(self, seed=None):\n super(_RandomGenerator, self).__init__()\n if seed is not None:\n # Stateless random ops requires 2-int seed.\n self.seed = [seed, 0]\n else:\n self.seed = None\n\n def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):\n \"\"\"A deterministic random normal if seed is passed.\"\"\"\n if self.seed:\n op = stateless_random_ops.stateless_random_normal\n else:\n op = random_ops.random_normal\n return op(\n shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)\n\n def random_uniform(self, shape, minval, maxval, dtype):\n \"\"\"A deterministic random uniform if seed is passed.\"\"\"\n if self.seed:\n op = stateless_random_ops.stateless_random_uniform\n else:\n op = random_ops.random_uniform\n return op(\n shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)\n\n def truncated_normal(self, shape, mean, stddev, dtype):\n \"\"\"A deterministic truncated normal if seed is passed.\"\"\"\n if self.seed:\n op = stateless_random_ops.stateless_truncated_normal\n else:\n op = random_ops.truncated_normal\n return op(\n shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)\n\n\ndef _compute_fans(shape):\n \"\"\"Computes the number of input and output units for a weight shape.\n\n Args:\n shape: Integer shape tuple or TF tensor shape.\n\n Returns:\n A tuple of integer scalars (fan_in, fan_out).\n \"\"\"\n if len(shape) < 1: # Just to avoid errors for constants.\n fan_in = fan_out = 1\n elif len(shape) == 1:\n fan_in = fan_out = shape[0]\n elif len(shape) == 2:\n fan_in = shape[0]\n fan_out = shape[1]\n else:\n # Assuming convolution kernels (2D, 3D, or more).\n # kernel shape: (..., input_depth, depth)\n receptive_field_size = 1\n for dim in shape[:-2]:\n receptive_field_size *= dim\n fan_in = shape[-2] * receptive_field_size\n fan_out = shape[-1] * receptive_field_size\n return int(fan_in), int(fan_out)\n\n\ndef _validate_kwargs(cls_name, kwargs, support_partition=True):\n for kwarg in kwargs:\n if kwarg not in [_PARTITION_SHAPE, _PARTITION_OFFSET]:\n raise TypeError('Unknown keyword arguments: %s' % kwarg)\n elif not support_partition:\n raise ValueError('%s initializer doesn\\'t support partition-related '\n 'arguments' % cls_name)\n", "output": ["_compute_fans", "_assert_float_dtype", "_validate_kwargs", "_get_dtype", "HeNormal", "RandomUniform", "LecunUniform", "_RandomGenerator", "VarianceScaling", "Initializer", "GlorotUniform", "GlorotNormal", "TruncatedNormal", "LecunNormal", "Identity", "HeUniform", "Zeros", "RandomNormal", "Ones", "Orthogonal", "Constant"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/keras/initializers/initializers_v2.py", "file_length": 10596, "symbol_dict": [{"symbol": "_compute_fans", "type": "mannual_defined_function", "byte_location": 32475, "location": 10208}, {"symbol": "_assert_float_dtype", "type": "mannual_defined_function", "byte_location": 30705, "location": 9630}, {"symbol": "_validate_kwargs", "type": "mannual_defined_function", "byte_location": 33238, "location": 10491}, {"symbol": "_get_dtype", "type": "mannual_defined_function", "byte_location": 30599, "location": 9587}, {"symbol": "Ones", "type": "mannual_defined_class", "byte_location": 4868, "location": 1419}, {"symbol": "LecunUniform", "type": "mannual_defined_class", "byte_location": 26671, "location": 8224}, {"symbol": "Initializer", "type": "mannual_defined_class", "byte_location": 1285, "location": 356}, {"symbol": "LecunNormal", "type": "mannual_defined_class", "byte_location": 25222, "location": 7728}, {"symbol": "Identity", "type": "mannual_defined_class", "byte_location": 21077, "location": 6370}, {"symbol": "GlorotUniform", "type": "mannual_defined_class", "byte_location": 22626, "location": 6850}, {"symbol": "Constant", "type": "mannual_defined_class", "byte_location": 6159, "location": 1819}, {"symbol": "HeUniform", "type": "mannual_defined_class", "byte_location": 29314, "location": 9145}, {"symbol": "RandomNormal", "type": "mannual_defined_class", "byte_location": 9665, "location": 2880}, {"symbol": "Orthogonal", "type": "mannual_defined_class", "byte_location": 17960, "location": 5408}, {"symbol": "GlorotNormal", "type": "mannual_defined_class", "byte_location": 23911, "location": 7286}, {"symbol": "VarianceScaling", "type": "mannual_defined_class", "byte_location": 13899, "location": 4142}, {"symbol": "TruncatedNormal", "type": "mannual_defined_class", "byte_location": 11658, "location": 3476}, {"symbol": "Zeros", "type": "mannual_defined_class", "byte_location": 3572, "location": 1017}, {"symbol": "HeNormal", "type": "mannual_defined_class", "byte_location": 28039, "location": 8704}, {"symbol": "RandomUniform", "type": "mannual_defined_class", "byte_location": 7469, "location": 2220}, {"symbol": "_RandomGenerator", "type": "mannual_defined_class", "byte_location": 31143, "location": 9779}]}} {"input": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Deprecation tests.\"\"\"\n\n# pylint: disable=unused-import\n\nimport collections\nimport enum\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import strict_mode\nfrom tensorflow.python.framework import tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import tf_inspect\n\n\nclass DeprecatedAliasTest(test.TestCase):\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_function_alias(self, mock_warning):\n deprecated_func = deprecation.deprecated_alias(\"deprecated.func\",\n \"real.func\",\n logging.error)\n\n logging.error(\"fake error logged\")\n self.assertEqual(0, mock_warning.call_count)\n deprecated_func(\"FAKE ERROR!\")\n self.assertEqual(1, mock_warning.call_count)\n # Make sure the error points to the right file.\n self.assertRegex(mock_warning.call_args[0][1], r\"deprecation_test\\.py:\")\n deprecated_func(\"ANOTHER FAKE ERROR!\")\n self.assertEqual(1, mock_warning.call_count)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_class_alias(self, mock_warning):\n class MyClass(object):\n \"\"\"My docstring.\"\"\"\n\n init_args = []\n\n def __init__(self, arg):\n MyClass.init_args.append(arg)\n\n deprecated_cls = deprecation.deprecated_alias(\"deprecated.cls\",\n \"real.cls\",\n MyClass)\n\n print(deprecated_cls.__name__)\n print(deprecated_cls.__module__)\n print(deprecated_cls.__doc__)\n\n MyClass(\"test\")\n self.assertEqual(0, mock_warning.call_count)\n deprecated_cls(\"deprecated\")\n self.assertEqual(1, mock_warning.call_count)\n # Make sure the error points to the right file.\n self.assertRegex(mock_warning.call_args[0][1], r\"deprecation_test\\.py:\")\n deprecated_cls(\"deprecated again\")\n self.assertEqual(1, mock_warning.call_count)\n\n self.assertEqual([\"test\", \"deprecated\", \"deprecated again\"],\n MyClass.init_args)\n\n # Check __init__ signature matches for doc generation.\n self.assertEqual(\n tf_inspect.getfullargspec(MyClass.__init__),\n tf_inspect.getfullargspec(deprecated_cls.__init__))\n\n\nclass DeprecationTest(test.TestCase):\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_deprecated_once(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions, warn_once=True)\n def _fn():\n pass\n\n _fn()\n self.assertEqual(1, mock_warning.call_count)\n _fn()\n self.assertEqual(1, mock_warning.call_count)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_deprecated_init_class(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions, warn_once=True)\n class MyClass():\n \"\"\"A test class.\"\"\"\n\n def __init__(self, a):\n pass\n\n MyClass(\"\")\n self.assertEqual(1, mock_warning.call_count)\n MyClass(\"\")\n self.assertEqual(1, mock_warning.call_count)\n self.assertIn(\"IS DEPRECATED\", MyClass.__doc__)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_deprecated_new_class(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions, warn_once=True)\n class MyStr(str):\n\n def __new__(cls, value):\n return str.__new__(cls, value)\n\n MyStr(\"abc\")\n self.assertEqual(1, mock_warning.call_count)\n MyStr(\"abc\")\n self.assertEqual(1, mock_warning.call_count)\n self.assertIn(\"IS DEPRECATED\", MyStr.__doc__)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_deprecated_enum(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions, warn_once=True)\n class MyEnum(enum.Enum):\n a = 1\n b = 2\n\n self.assertIs(MyEnum(1), MyEnum.a)\n self.assertEqual(1, mock_warning.call_count)\n self.assertIs(MyEnum(2), MyEnum.b)\n self.assertEqual(1, mock_warning.call_count)\n self.assertIn(\"IS DEPRECATED\", MyEnum.__doc__)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_deprecated_namedtuple(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n mytuple = deprecation.deprecated(\n date, instructions, warn_once=True)(\n collections.namedtuple(\"my_tuple\", [\"field1\", \"field2\"]))\n\n mytuple(1, 2)\n self.assertEqual(1, mock_warning.call_count)\n mytuple(3, 4)\n self.assertEqual(1, mock_warning.call_count)\n self.assertIn(\"IS DEPRECATED\", mytuple.__doc__)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_silence(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions, warn_once=False)\n def _fn():\n pass\n\n _fn()\n self.assertEqual(1, mock_warning.call_count)\n\n with deprecation.silence():\n _fn()\n self.assertEqual(1, mock_warning.call_count)\n\n _fn()\n self.assertEqual(2, mock_warning.call_count)\n\n def test_strict_mode_deprecation(self):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions, warn_once=True)\n def _fn():\n pass\n\n strict_mode.enable_strict_mode()\n with self.assertRaises(RuntimeError):\n _fn()\n\n def _assert_subset(self, expected_subset, actual_set):\n self.assertTrue(\n actual_set.issuperset(expected_subset),\n msg=\"%s is not a superset of %s.\" % (actual_set, expected_subset))\n\n def test_deprecated_illegal_args(self):\n instructions = \"This is how you update...\"\n with self.assertRaisesRegex(ValueError, \"YYYY-MM-DD\"):\n deprecation.deprecated(\"\", instructions)\n with self.assertRaisesRegex(ValueError, \"YYYY-MM-DD\"):\n deprecation.deprecated(\"07-04-2016\", instructions)\n date = \"2016-07-04\"\n with self.assertRaisesRegex(ValueError, \"instructions\"):\n deprecation.deprecated(date, None)\n with self.assertRaisesRegex(ValueError, \"instructions\"):\n deprecation.deprecated(date, \"\")\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_no_date(self, mock_warning):\n date = None\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions)\n def _fn(arg0, arg1):\n \"\"\"fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n\n Returns:\n Sum of args.\n \"\"\"\n return arg0 + arg1\n\n self.assertEqual(\n \"fn doc. (deprecated)\"\n \"\\n\"\n \"\\nDeprecated: THIS FUNCTION IS DEPRECATED. \"\n \"It will be removed in a future version.\"\n \"\\nInstructions for updating:\\n%s\"\n \"\\n\"\n \"\\nArgs:\"\n \"\\n arg0: Arg 0.\"\n \"\\n arg1: Arg 1.\"\n \"\\n\"\n \"\\nReturns:\"\n \"\\n Sum of args.\" % instructions, _fn.__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"in a future version\", instructions]),\n set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_static_fn_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions)\n def _fn(arg0, arg1):\n \"\"\"fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n\n Returns:\n Sum of args.\n \"\"\"\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated)\"\n \"\\n\"\n \"\\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\"\n \"\\n\"\n \"\\nArgs:\"\n \"\\n arg0: Arg 0.\"\n \"\\n arg1: Arg 1.\"\n \"\\n\"\n \"\\nReturns:\"\n \"\\n Sum of args.\" % (date, instructions), _fn.__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_static_fn_with_one_line_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions)\n def _fn(arg0, arg1):\n \"\"\"fn doc.\"\"\"\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated)\"\n \"\\n\"\n \"\\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\" % (date, instructions), _fn.__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_static_fn_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated(date, instructions)\n def _fn(arg0, arg1):\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"DEPRECATED FUNCTION\"\n \"\\n\"\n \"\\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions), _fn.__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_instance_fn_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @deprecation.deprecated(date, instructions)\n def _fn(self, arg0, arg1):\n \"\"\"fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n\n Returns:\n Sum of args.\n \"\"\"\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"fn doc. (deprecated)\"\n \"\\n\"\n \"\\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\"\n \"\\n\"\n \"\\nArgs:\"\n \"\\n arg0: Arg 0.\"\n \"\\n arg1: Arg 1.\"\n \"\\n\"\n \"\\nReturns:\"\n \"\\n Sum of args.\" % (date, instructions),\n getattr(_Object, \"_fn\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _Object()._fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_instance_fn_with_one_line_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @deprecation.deprecated(date, instructions)\n def _fn(self, arg0, arg1):\n \"\"\"fn doc.\"\"\"\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"fn doc. (deprecated)\"\n \"\\n\"\n \"\\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\" % (date, instructions),\n getattr(_Object, \"_fn\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _Object()._fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_instance_fn_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @deprecation.deprecated(date, instructions)\n def _fn(self, arg0, arg1):\n return arg0 + arg1\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"DEPRECATED FUNCTION\"\n \"\\n\"\n \"\\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions),\n getattr(_Object, \"_fn\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _Object()._fn(1, 2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n def test_prop_wrong_order(self):\n with self.assertRaisesRegex(\n ValueError,\n \"make sure @property appears before @deprecated in your source code\"):\n # pylint: disable=unused-variable\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @deprecation.deprecated(\"2016-07-04\", \"Instructions.\")\n @property\n def _prop(self):\n return \"prop_wrong_order\"\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_prop_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @property\n @deprecation.deprecated(date, instructions)\n def _prop(self):\n \"\"\"prop doc.\n\n Returns:\n String.\n \"\"\"\n return \"prop_with_doc\"\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"prop doc. (deprecated)\"\n \"\\n\"\n \"\\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\"\n \"\\n\"\n \"\\nReturns:\"\n \"\\n String.\" % (date, instructions),\n getattr(_Object, \"_prop\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(\"prop_with_doc\", _Object()._prop)\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_prop_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n class _Object(object):\n\n def __init(self):\n pass\n\n @property\n @deprecation.deprecated(date, instructions)\n def _prop(self):\n return \"prop_no_doc\"\n\n # Assert function docs are properly updated.\n self.assertEqual(\n \"DEPRECATED FUNCTION\"\n \"\\n\"\n \"\\nDeprecated: THIS FUNCTION IS DEPRECATED. It will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions),\n getattr(_Object, \"_prop\").__doc__)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(\"prop_no_doc\", _Object()._prop)\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n\nclass DeprecatedArgsTest(test.TestCase):\n\n def _assert_subset(self, expected_subset, actual_set):\n self.assertTrue(\n actual_set.issuperset(expected_subset),\n msg=\"%s is not a superset of %s.\" % (actual_set, expected_subset))\n\n def test_deprecated_illegal_args(self):\n instructions = \"This is how you update...\"\n date = \"2016-07-04\"\n with self.assertRaisesRegex(ValueError, \"YYYY-MM-DD\"):\n deprecation.deprecated_args(\"\", instructions, \"deprecated\")\n with self.assertRaisesRegex(ValueError, \"YYYY-MM-DD\"):\n deprecation.deprecated_args(\"07-04-2016\", instructions, \"deprecated\")\n with self.assertRaisesRegex(ValueError, \"instructions\"):\n deprecation.deprecated_args(date, None, \"deprecated\")\n with self.assertRaisesRegex(ValueError, \"instructions\"):\n deprecation.deprecated_args(date, \"\", \"deprecated\")\n with self.assertRaisesRegex(ValueError, \"argument\"):\n deprecation.deprecated_args(date, instructions)\n\n def test_deprecated_missing_args(self):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n def _fn(arg0, arg1, deprecated=None):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert calls without the deprecated argument log nothing.\n with self.assertRaisesRegex(ValueError, \"not present.*\\\\['missing'\\\\]\"):\n deprecation.deprecated_args(date, instructions, \"missing\")(_fn)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_static_fn_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, deprecated=True):\n \"\"\"fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n deprecated: Deprecated!\n\n Returns:\n Sum of args.\n \"\"\"\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated arguments)\"\n \"\\n\"\n \"\\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(deprecated)`. \"\n \"They will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\"\n \"\\n\"\n \"\\nArgs:\"\n \"\\n arg0: Arg 0.\"\n \"\\n arg1: Arg 1.\"\n \"\\n deprecated: Deprecated!\"\n \"\\n\"\n \"\\nReturns:\"\n \"\\n Sum of args.\" % (date, instructions), _fn.__doc__)\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_static_fn_with_one_line_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, deprecated=True):\n \"\"\"fn doc.\"\"\"\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated arguments)\"\n \"\\n\"\n \"\\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(deprecated)`. \"\n \"They will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\" % (date, instructions), _fn.__doc__)\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_static_fn_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, deprecated=True):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"DEPRECATED FUNCTION ARGUMENTS\"\n \"\\n\"\n \"\\nDeprecated: SOME ARGUMENTS ARE DEPRECATED: `(deprecated)`. \"\n \"They will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions), _fn.__doc__)\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_varargs(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, *deprecated):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, True, False))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_kwargs(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(arg0, arg1, **deprecated):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(1, 2, a=True, b=False))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_positional_and_named(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"d1\", \"d2\")\n def _fn(arg0, d1=None, arg1=2, d2=None):\n return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1\n\n # Assert calls without the deprecated arguments log nothing.\n self.assertEqual(2, _fn(1, arg1=2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated arguments log warnings.\n self.assertEqual(2, _fn(1, None, 2, d2=False))\n self.assertEqual(2, mock_warning.call_count)\n (args1, _) = mock_warning.call_args_list[0]\n self.assertRegex(args1[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions, \"d1\"]),\n set(args1[1:]))\n (args2, _) = mock_warning.call_args_list[1]\n self.assertRegex(args2[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions, \"d2\"]),\n set(args2[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_positional_and_named_with_ok_vals(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, (\"d1\", None),\n (\"d2\", \"my_ok_val\"))\n def _fn(arg0, d1=None, arg1=2, d2=None):\n return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1\n\n # Assert calls without the deprecated arguments log nothing.\n self.assertEqual(2, _fn(1, arg1=2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated arguments log warnings.\n self.assertEqual(2, _fn(1, False, 2, d2=False))\n self.assertEqual(2, mock_warning.call_count)\n (args1, _) = mock_warning.call_args_list[0]\n self.assertRegex(args1[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions, \"d1\"]),\n set(args1[1:]))\n (args2, _) = mock_warning.call_args_list[1]\n self.assertRegex(args2[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions, \"d2\"]),\n set(args2[1:]))\n\n # Assert calls with the deprecated arguments don't log warnings if\n # the value matches the 'ok_val'.\n mock_warning.reset_mock()\n self.assertEqual(3, _fn(1, None, 2, d2=\"my_ok_val\"))\n self.assertEqual(0, mock_warning.call_count)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_kwonlyargs(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"deprecated\")\n def _fn(*, arg0, arg1, deprecated=None):\n return arg0 + arg1 if deprecated is not None else arg1 + arg0\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(3, _fn(arg0=1, arg1=2))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated argument log a warning.\n self.assertEqual(3, _fn(arg0=1, arg1=2, deprecated=2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_kwonlyargs_and_args(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions,\n (\"deprecated_arg1\", \"deprecated_arg2\"))\n def _fn(arg0, arg1, *, kw1,\n deprecated_arg1=None,\n deprecated_arg2=None):\n res = arg0 + arg1 + kw1\n if deprecated_arg1 is not None:\n res += deprecated_arg1\n if deprecated_arg2 is not None:\n res += deprecated_arg2\n return res\n\n # Assert calls without the deprecated argument log nothing.\n self.assertEqual(6, _fn(1, 2, kw1=3))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calls with the deprecated_arg1 argument log a warning.\n self.assertEqual(8, _fn(1, 2, kw1=3, deprecated_arg1=2))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n # Assert calls with the deprecated arguments log a warning.\n self.assertEqual(12, _fn(1, 2, kw1=3, deprecated_arg1=2, deprecated_arg2=4))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_deprecated_args_once(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"arg\", warn_once=True)\n def _fn(arg=0): # pylint: disable=unused-argument\n pass\n\n _fn()\n self.assertEqual(0, mock_warning.call_count)\n _fn(arg=0)\n self.assertEqual(1, mock_warning.call_count)\n _fn(arg=1)\n self.assertEqual(1, mock_warning.call_count)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_deprecated_multiple_args_once_each(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_args(date, instructions, \"arg0\", \"arg1\",\n warn_once=True)\n def _fn(arg0=0, arg1=0): # pylint: disable=unused-argument\n pass\n\n _fn(arg0=0)\n self.assertEqual(1, mock_warning.call_count)\n _fn(arg0=0)\n self.assertEqual(1, mock_warning.call_count)\n _fn(arg1=0)\n self.assertEqual(2, mock_warning.call_count)\n _fn(arg0=0)\n self.assertEqual(2, mock_warning.call_count)\n _fn(arg1=0)\n self.assertEqual(2, mock_warning.call_count)\n\n\nclass DeprecatedArgValuesTest(test.TestCase):\n\n def _assert_subset(self, expected_subset, actual_set):\n self.assertTrue(\n actual_set.issuperset(expected_subset),\n msg=\"%s is not a superset of %s.\" % (actual_set, expected_subset))\n\n def test_deprecated_illegal_args(self):\n instructions = \"This is how you update...\"\n with self.assertRaisesRegex(ValueError, \"YYYY-MM-DD\"):\n deprecation.deprecated_arg_values(\"\", instructions, deprecated=True)\n with self.assertRaisesRegex(ValueError, \"YYYY-MM-DD\"):\n deprecation.deprecated_arg_values(\n \"07-04-2016\", instructions, deprecated=True)\n date = \"2016-07-04\"\n with self.assertRaisesRegex(ValueError, \"instructions\"):\n deprecation.deprecated_arg_values(date, None, deprecated=True)\n with self.assertRaisesRegex(ValueError, \"instructions\"):\n deprecation.deprecated_arg_values(date, \"\", deprecated=True)\n with self.assertRaisesRegex(ValueError, \"argument\"):\n deprecation.deprecated_arg_values(date, instructions)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_static_fn_with_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_arg_values(date, instructions, warn_once=False,\n deprecated=True)\n def _fn(arg0, arg1, deprecated=True):\n \"\"\"fn doc.\n\n Args:\n arg0: Arg 0.\n arg1: Arg 1.\n deprecated: Deprecated!\n\n Returns:\n Sum of args.\n \"\"\"\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated argument values)\"\n \"\\n\"\n \"\\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(deprecated=True)`. \"\n \"They will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\"\n \"\\n\"\n \"\\nArgs:\"\n \"\\n arg0: Arg 0.\"\n \"\\n arg1: Arg 1.\"\n \"\\n deprecated: Deprecated!\"\n \"\\n\"\n \"\\nReturns:\"\n \"\\n Sum of args.\" % (date, instructions), _fn.__doc__)\n\n # Assert calling new fn with non-deprecated value logs nothing.\n self.assertEqual(3, _fn(1, 2, deprecated=False))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calling new fn with deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2, deprecated=True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n # Assert calling new fn with default deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(2, mock_warning.call_count)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_static_fn_with_one_line_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_arg_values(date, instructions, warn_once=False,\n deprecated=True)\n def _fn(arg0, arg1, deprecated=True):\n \"\"\"fn doc.\"\"\"\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"fn doc. (deprecated argument values)\"\n \"\\n\"\n \"\\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(deprecated=True)`. \"\n \"They will be removed after %s.\"\n \"\\nInstructions for updating:\\n%s\" % (date, instructions), _fn.__doc__)\n\n # Assert calling new fn with non-deprecated value logs nothing.\n self.assertEqual(3, _fn(1, 2, deprecated=False))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calling new fn with deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2, deprecated=True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n # Assert calling new fn with default deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(2, mock_warning.call_count)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_deprecated_v1\n def test_static_fn_no_doc(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_arg_values(date, instructions, warn_once=False,\n deprecated=True)\n def _fn(arg0, arg1, deprecated=True):\n return arg0 + arg1 if deprecated else arg1 + arg0\n\n # Assert function docs are properly updated.\n self.assertEqual(\"_fn\", _fn.__name__)\n self.assertEqual(\n \"DEPRECATED FUNCTION ARGUMENT VALUES\"\n \"\\n\"\n \"\\nDeprecated: SOME ARGUMENT VALUES ARE DEPRECATED: `(deprecated=True)`. \"\n \"They will be removed after %s.\"\n \"\\nInstructions for updating:\"\n \"\\n%s\" % (date, instructions), _fn.__doc__)\n\n # Assert calling new fn with non-deprecated value logs nothing.\n self.assertEqual(3, _fn(1, 2, deprecated=False))\n self.assertEqual(0, mock_warning.call_count)\n\n # Assert calling new fn issues log warning.\n self.assertEqual(3, _fn(1, 2, deprecated=True))\n self.assertEqual(1, mock_warning.call_count)\n (args, _) = mock_warning.call_args\n self.assertRegex(args[0], r\"deprecated and will be removed\")\n self._assert_subset(set([\"after \" + date, instructions]), set(args[1:]))\n\n # Assert calling new fn with default deprecated value issues log warning.\n self.assertEqual(3, _fn(1, 2))\n self.assertEqual(2, mock_warning.call_count)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_deprecated_arg_values_once(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_arg_values(date, instructions, warn_once=True,\n deprecated=True)\n def _fn(deprecated): # pylint: disable=unused-argument\n pass\n\n _fn(deprecated=False)\n self.assertEqual(0, mock_warning.call_count)\n _fn(deprecated=True)\n self.assertEqual(1, mock_warning.call_count)\n _fn(deprecated=True)\n self.assertEqual(1, mock_warning.call_count)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def test_deprecated_multiple_arg_values_once_each(self, mock_warning):\n date = \"2016-07-04\"\n instructions = \"This is how you update...\"\n\n @deprecation.deprecated_arg_values(date, instructions, warn_once=True,\n arg0=\"forbidden\", arg1=\"disallowed\")\n def _fn(arg0, arg1): # pylint: disable=unused-argument\n pass\n\n _fn(arg0=\"allowed\", arg1=\"also allowed\")\n self.assertEqual(0, mock_warning.call_count)\n _fn(arg0=\"forbidden\", arg1=\"disallowed\")\n self.assertEqual(2, mock_warning.call_count)\n _fn(arg0=\"forbidden\", arg1=\"allowed\")\n self.assertEqual(2, mock_warning.call_count)\n _fn(arg0=\"forbidden\", arg1=\"disallowed\")\n self.assertEqual(2, mock_warning.call_count)\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n @test_util.run_in_graph_and_eager_modes\n def test_deprecated_arg_values_when_value_is_none(self, mock_warning):\n\n @deprecation.deprecated_arg_values(\"2016-07-04\",\n \"This is how you update...\",\n warn_once=True,\n arg0=None)\n def _fn(arg0): # pylint: disable=unused-argument\n pass\n\n tensor.enable_tensor_equality()\n initial_count = mock_warning.call_count\n # Check that we avoid error from explicit `var == None` check.\n _fn(arg0=variables.Variable(0))\n self.assertEqual(initial_count, mock_warning.call_count)\n _fn(arg0=None)\n self.assertEqual(initial_count + 1, mock_warning.call_count)\n tensor.disable_tensor_equality()\n\n\nclass DeprecationArgumentsTest(test.TestCase):\n\n def testDeprecatedArgumentLookup(self):\n good_value = 3\n self.assertEqual(\n deprecation.deprecated_argument_lookup(\"val_new\", good_value, \"val_old\",\n None), good_value)\n self.assertEqual(\n deprecation.deprecated_argument_lookup(\"val_new\", None, \"val_old\",\n good_value), good_value)\n with self.assertRaisesRegex(ValueError,\n \"Cannot specify both 'val_old' and 'val_new'\"):\n\n deprecation.deprecated_argument_lookup(\"val_new\", good_value,\n \"val_old\", good_value)\n\n def testRewriteArgumentDocstring(self):\n docs = \"\"\"Add `a` and `b`\n\n Args:\n a: first arg\n b: second arg\n \"\"\"\n new_docs = deprecation.rewrite_argument_docstring(\n deprecation.rewrite_argument_docstring(docs, \"a\", \"left\"), \"b\", \"right\")\n new_docs_ref = \"\"\"Add `left` and `right`\n\n Args:\n left: first arg\n right: second arg\n \"\"\"\n self.assertEqual(new_docs, new_docs_ref)\n\n\nclass DeprecatedEndpointsTest(test.TestCase):\n\n def testSingleDeprecatedEndpoint(self):\n @deprecation.deprecated_endpoints(\"foo1\")\n def foo():\n pass\n self.assertEqual((\"foo1\",), foo._tf_deprecated_api_names)\n\n def testMultipleDeprecatedEndpoint(self):\n @deprecation.deprecated_endpoints(\"foo1\", \"foo2\")\n def foo():\n pass\n self.assertEqual((\"foo1\", \"foo2\"), foo._tf_deprecated_api_names)\n\n def testCannotSetDeprecatedEndpointsTwice(self):\n with self.assertRaises(deprecation.DeprecatedNamesAlreadySetError):\n @deprecation.deprecated_endpoints(\"foo1\")\n @deprecation.deprecated_endpoints(\"foo2\")\n def foo(): # pylint: disable=unused-variable\n pass\n\n\nclass DeprecateMovedModuleTest(test.TestCase):\n\n @test.mock.patch.object(logging, \"warning\", autospec=True)\n def testCallDeprecatedModule(self, mock_warning):\n from tensorflow.python.util import deprecated_module # pylint: disable=g-import-not-at-top\n self.assertEqual(0, mock_warning.call_count)\n result = deprecated_module.a()\n self.assertEqual(1, mock_warning.call_count)\n self.assertEqual(1, result)\n\n deprecated_module.a()\n self.assertEqual(1, mock_warning.call_count)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "output": ["DeprecatedArgValuesTest", "MyClass", "_Object", "DeprecateMovedModuleTest", "DeprecatedEndpointsTest", "DeprecationTest", "MyEnum", "DeprecatedAliasTest", "DeprecationArgumentsTest", "DeprecatedArgsTest", "MyStr"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/util/deprecation_test.py", "file_length": 14002, "symbol_dict": [{"symbol": "DeprecationTest", "type": "mannual_defined_class", "byte_location": 3254, "location": 932}, {"symbol": "_Object", "type": "mannual_defined_class", "byte_location": 15080, "location": 5048}, {"symbol": "DeprecatedArgValuesTest", "type": "mannual_defined_class", "byte_location": 30971, "location": 10526}, {"symbol": "DeprecatedAliasTest", "type": "mannual_defined_class", "byte_location": 1289, "location": 329}, {"symbol": "DeprecatedEndpointsTest", "type": "mannual_defined_class", "byte_location": 40271, "location": 13577}, {"symbol": "DeprecationArgumentsTest", "type": "mannual_defined_class", "byte_location": 39139, "location": 13241}, {"symbol": "MyClass", "type": "mannual_defined_class", "byte_location": 2144, "location": 592}, {"symbol": "MyStr", "type": "mannual_defined_class", "byte_location": 4463, "location": 1361}, {"symbol": "DeprecateMovedModuleTest", "type": "mannual_defined_class", "byte_location": 40975, "location": 13819}, {"symbol": "MyEnum", "type": "mannual_defined_class", "byte_location": 4985, "location": 1549}, {"symbol": "DeprecatedArgsTest", "type": "mannual_defined_class", "byte_location": 17448, "location": 5823}]}} {"input": "\"\"\"Trackable data structures.\"\"\"\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport collections\nimport copy\nimport sys\n\ntry:\n import wrapt\nexcept ImportError:\n # Fall back to the build-time dependency if the system package is not available.\n from .....third_party import wrapt # pylint: disable=relative-beyond-top-level\n\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function as defun\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.trackable import base\nfrom tensorflow.python.trackable import layer_utils\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.tf_export import tf_export\n\n\nclass NoDependency:\n \"\"\"Allows attribute assignment to `Trackable` objects with no dependency.\n\n Example usage:\n ```python\n obj = Trackable()\n obj.has_dependency = tf.Variable(0., name=\"dep\")\n obj.no_dependency = NoDependency(tf.Variable(1., name=\"nodep\"))\n assert obj.no_dependency.name == \"nodep:0\"\n ```\n\n `obj` in this example has a dependency on the variable \"dep\", and both\n attributes contain un-wrapped `Variable` objects.\n\n `NoDependency` also works with `tf.keras.Model`, but only for checkpoint\n dependencies: wrapping a `Layer` in `NoDependency` will assign the (unwrapped)\n `Layer` to the attribute without a checkpoint dependency, but the `Model` will\n still track the `Layer` (so it will appear in `Model.layers`, and its\n variables will appear in `Model.variables`).\n \"\"\"\n\n __slots__ = [\"value\"]\n\n def __init__(self, value):\n self.value = value\n\n\ndef _should_wrap_tuple(t):\n \"\"\"Determine if a tuple has any trackable components.\"\"\"\n # pylint: disable=unidiomatic-typecheck\n # Exact type checking to avoid mucking up custom logic in list/dict\n # subclasses, e.g. collections.Counter.\n for element in t:\n if isinstance(element, NoDependency):\n return True # We should remove the NoDependency object from the tuple.\n if isinstance(element, base.Trackable):\n return True\n if type(element) == dict:\n return True\n if type(element) == collections.OrderedDict:\n return True\n if type(element) == list:\n return True\n if isinstance(element, tuple) and _should_wrap_tuple(element):\n return True\n # There are no trackable elements or data structures. Tuples are immutable, so\n # mutation isn't a concern. Don't wrap.\n return False\n # pylint: enable=unidiomatic-typecheck\n\n\n@tf_export(\"__internal__.tracking.wrap\", v1=[])\ndef wrap_or_unwrap(value):\n \"\"\"Wraps input value into trackable data structures.\n\n This is mostly useful for containers like list, dict, etc, which could contain\n trackable objects in it. Wrapped data structure will be tracked when\n associated with a `tf.Module`, so that save model/checkpoint can properly\n track the dependency.\n\n It will also unwrap NoDependency objects.\n\n Args:\n value: the input object to be wrapped.\n\n Returns:\n Wrapped trackable data structure.\n \"\"\"\n # pylint: disable=unidiomatic-typecheck\n # Exact type checking to avoid mucking up custom logic in list/dict\n # subclasses, e.g. collections.Counter.\n if isinstance(value, NoDependency):\n return value.value\n if isinstance(value, base.Trackable):\n return value # Skip conversion for already trackable objects.\n elif type(value) == dict:\n return _DictWrapper(value)\n elif type(value) == collections.OrderedDict:\n return _DictWrapper(value)\n elif type(value) == list:\n return ListWrapper(value)\n elif isinstance(value, tuple) and _should_wrap_tuple(value):\n # There are trackable elements or data structures. Wrap the tuple.\n return _TupleWrapper(value)\n else:\n return value\n # pylint: enable=unidiomatic-typecheck\n\n\n@tf_export(\"__internal__.tracking.sticky_attribute_assignment\", v1=[])\ndef sticky_attribute_assignment(trackable, name, value):\n \"\"\"Adds dependencies, generally called from __setattr__.\n\n This behavior is shared between Trackable and Model.\n\n Respects NoDependency indicators, but otherwise makes trackable objects\n out of common data structures and tracks objects by their attribute names.\n\n Args:\n trackable: The object to add dependencies to (generally the one having\n an attribute assigned).\n name: The attribute name being assigned.\n value: The value being assigned. Not necessarily a trackable object.\n\n Returns:\n The value which should be stored in the attribute (unwrapped from a\n NoDependency object if necessary).\n \"\"\"\n if isinstance(value, NoDependency):\n add_dependency = False\n else:\n add_dependency = True\n value = wrap_or_unwrap(value)\n if not add_dependency:\n return value\n if isinstance(value, base.Trackable):\n trackable._track_trackable( # pylint: disable=protected-access\n value, name=name,\n # Allow the user to switch the Trackable which is tracked by this\n # name, since assigning a new variable to an attribute has\n # historically been fine (e.g. Adam did this).\n overwrite=True)\n return value\n\n\nclass _UntrackableError(ValueError):\n\n def __init__(self, value): # pylint: disable=super-init-not-called\n self._value = value\n\n def __str__(self):\n return (\"Only trackable objects (such as Layers or Optimizers) may be \"\n f\"stored in a List object. Got {self._value}, which does not \"\n \"inherit from Trackable.\")\n\n\n@tf_export(\"__internal__.tracking.TrackableDataStructure\", v1=[])\nclass TrackableDataStructure(base.Trackable):\n \"\"\"Base class for data structures which contain trackable objects.\"\"\"\n\n def __init__(self):\n # Attributes prefixed with \"_self_\" for compatibility with\n # wrapt.ObjectProxy. All additional attrs MUST conform to this pattern, as\n # extending `__slots__` on a subclass of ObjectProxy breaks in a variety of\n # ways.\n self._self_trainable = True\n self._self_extra_variables = []\n self._self_attribute_sentinel = layer_utils.AttributeSentinel(True)\n\n @property\n def _attribute_sentinel(self):\n return self._self_attribute_sentinel\n\n @property\n def trainable(self):\n return self._self_trainable\n\n @trainable.setter\n def trainable(self, value):\n self._self_trainable = value\n\n def _track_value(self, value, name):\n \"\"\"Add a dependency on `value`.\"\"\"\n value = sticky_attribute_assignment(\n trackable=self, value=value, name=name)\n if isinstance(value, variables.Variable):\n self._self_extra_variables.append(value)\n if not isinstance(value, base.Trackable):\n raise _UntrackableError(value)\n if hasattr(value, \"_use_resource_variables\"):\n # In subclassed models, legacy layers (tf.layers) must always use\n # resource variables.\n value._use_resource_variables = True # pylint: disable=protected-access\n value_attribute_sentinel = getattr(value, \"_attribute_sentinel\", None)\n if value_attribute_sentinel:\n value_attribute_sentinel.add_parent(self._attribute_sentinel)\n return value\n\n @property\n def _values(self):\n \"\"\"An iterable/sequence which may contain trackable objects.\"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n @property\n def _layers(self):\n \"\"\"All Layers and Layer containers, including empty containers.\"\"\"\n # Filter objects on demand so that wrapper objects use values from the thing\n # they're wrapping if out of sync.\n collected = []\n for obj in self._values:\n if (isinstance(obj, TrackableDataStructure)\n or layer_utils.is_layer(obj)\n or layer_utils.has_weights(obj)):\n collected.append(obj)\n return collected\n\n @property\n def layers(self):\n return list(layer_utils.filter_empty_layer_containers(self._layers))\n\n @property\n def trainable_weights(self):\n if not self._self_trainable:\n return []\n trainable_variables = []\n for obj in self._values:\n if isinstance(obj, base.Trackable) and hasattr(\n obj, \"trainable_variables\"):\n trainable_variables += obj.trainable_variables\n trainable_extra_variables = [\n v for v in self._self_extra_variables if v.trainable\n ]\n return trainable_variables + trainable_extra_variables\n\n @property\n def non_trainable_weights(self):\n trainable_extra_variables = [\n v for v in self._self_extra_variables if v.trainable\n ]\n non_trainable_extra_variables = [\n v for v in self._self_extra_variables if not v.trainable\n ]\n non_trainable_variables = []\n for obj in self._values:\n if isinstance(obj, base.Trackable) and hasattr(\n obj, \"non_trainable_variables\"):\n non_trainable_variables += obj.non_trainable_variables\n\n if not self._self_trainable:\n # Return order is all trainable vars, then all non-trainable vars.\n trainable_variables = []\n for obj in self._values:\n if isinstance(obj, base.Trackable) and hasattr(\n obj, \"trainable_variables\"):\n trainable_variables += obj.trainable_variables\n\n non_trainable_variables = (\n trainable_variables + trainable_extra_variables +\n non_trainable_variables + non_trainable_extra_variables)\n else:\n non_trainable_variables = (\n non_trainable_variables + non_trainable_extra_variables)\n\n return non_trainable_variables\n\n @property\n def weights(self):\n return self.trainable_weights + self.non_trainable_weights\n\n @property\n def trainable_variables(self):\n return self.trainable_weights\n\n @property\n def non_trainable_variables(self):\n return self.non_trainable_weights\n\n @property\n def variables(self):\n return self.weights\n\n @property\n def updates(self):\n \"\"\"Aggregate updates from any `Layer` instances.\"\"\"\n # Updates and conditional losses are forwarded as-is rather than being\n # filtered based on inputs, since this is just a container and won't ever\n # have any inputs.\n aggregated = []\n for layer in self.layers:\n if hasattr(layer, \"updates\"):\n aggregated += layer.updates\n return aggregated\n\n @property\n def losses(self):\n \"\"\"Aggregate losses from any `Layer` instances.\"\"\"\n aggregated = []\n for layer in self.layers:\n if hasattr(layer, \"losses\"):\n aggregated += layer.losses\n return aggregated\n\n def __hash__(self):\n # Support object-identity hashing, so these structures can be used as keys\n # in sets/dicts.\n return id(self)\n\n def __eq__(self, other):\n # Similar to Tensors, trackable data structures use object-identity\n # equality to support set/dict membership.\n return self is other\n\n\nclass List(TrackableDataStructure, collections_abc.Sequence):\n \"\"\"An append-only sequence type which is trackable.\n\n Maintains checkpoint dependencies on its contents (which must also be\n trackable), and forwards any `Layer` metadata such as updates and losses.\n\n Note that `List` is purely a container. It lets a `tf.keras.Model` or\n other trackable object know about its contents, but does not call any\n `Layer` instances which are added to it. To indicate a sequence of `Layer`\n instances which should be called sequentially, use `tf.keras.Sequential`.\n\n Example usage:\n ```python\n class HasList(tf.keras.Model):\n\n def __init__(self):\n super().__init__()\n self.layer_list = List([layers.Dense(3)])\n self.layer_list.append(layers.Dense(4))\n\n def call(self, x):\n aggregation = 0.\n for l in self.layer_list:\n x = l(x)\n aggregation += tf.reduce_sum(x)\n return aggregation\n ```\n\n This kind of wrapping is necessary because `Trackable` objects do not\n (yet) deeply inspect regular Python data structures, so for example assigning\n a regular list (`self.layer_list = [layers.Dense(3)]`) does not create a\n checkpoint dependency and does not add the `Layer` instance's weights to its\n parent `Model`.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Construct a new sequence. Arguments are passed to `list()`.\"\"\"\n super().__init__()\n self._storage = self._make_storage(*args, **kwargs)\n for index, element in enumerate(self._storage):\n self._storage[index] = self._track_value(\n element, name=self._name_element(index))\n\n def copy(self):\n return type(self)(copy.copy(self._storage))\n\n def __copy__(self):\n return self.copy()\n\n def __deepcopy__(self, memo):\n return type(self)(copy.deepcopy(self._storage, memo))\n\n def _make_storage(self, *args, **kwargs):\n \"\"\"Determines the backing storage (overridden in subclasses).\"\"\"\n return list(*args, **kwargs)\n\n def _name_element(self, index):\n return \"%d\" % (index,)\n\n @property\n def _values(self):\n \"\"\"Collect values for TrackableDataStructure.\"\"\"\n return self\n\n def append(self, value):\n \"\"\"Add a new trackable value.\"\"\"\n value = self._track_value(value, self._name_element(len(self._storage)))\n self._storage.append(value)\n\n def extend(self, values):\n \"\"\"Add a sequence of trackable values.\"\"\"\n for value in values:\n self.append(value)\n\n def __iadd__(self, values):\n self.extend(values)\n return self\n\n def __add__(self, other):\n return self._storage + getattr(other, \"_storage\", other)\n\n def __imul__(self, y):\n if y <= 0:\n raise ValueError(\n f\"List only supports append, multiplying in place by {y} removes \"\n \"elements.\")\n\n n = len(self._storage)\n for _ in range(y - 1):\n for i in range(n):\n self.append(self._storage[i])\n\n return self\n\n def __mul__(self, n):\n return self._storage * n\n\n def __rmul__(self, n):\n return self * n\n\n def __radd__(self, other):\n return other + self._storage\n\n def __getitem__(self, key):\n return self._storage[key]\n\n def __getslice__(self, i, j):\n return self._storage[slice(i, j)]\n\n def __len__(self):\n return len(self._storage)\n\n def __repr__(self):\n return \"List(%s)\" % (repr(self._storage),)\n\n def __sizeof__(self):\n return super().__sizeof__() + sys.getsizeof(self._storage)\n\n\n# TODO(tomhennigan) Update to collections.UserList?\n# TODO(allenl): Try switching this to wrapt.ObjectProxy again when we drop\n# Python 3.4 support (may still be tricky).\nclass ListWrapper(\n List,\n collections_abc.MutableSequence,\n # Shadowed, but there for isinstance checks.\n list):\n \"\"\"Wraps the built-in `list` to support restore-on-create for variables.\n\n Unlike `List`, this sequence type is mutable in the same ways built-in lists\n are. Instead of throwing an error immediately like `List`, it records\n problematic mutations (e.g. assigning a new element to a position already\n occupied, meaning both elements get the same names at different times) and\n refuses to save.\n\n On assignment to an attribute of a Model or Trackable object, Python\n lists are replaced with ListWrapper. Wrapping a list in a\n `NoDependency` object prevents this.\n \"\"\"\n\n def __init__(self, wrapped_list):\n \"\"\"Construct a new list wrapper.\n\n Args:\n wrapped_list: The initial value of the data structure. A shallow copy may\n be maintained for error checking. `wrapped_list` itself should not be\n modified directly after constructing the `ListWrapper`, and if changes\n are detected the `ListWrapper` will throw an exception on save.\n \"\"\"\n # Monotonic flags which indicate this object would not be restored properly,\n # and therefore should throw an error on save to avoid giving the impression\n # that restoring it will work.\n self._non_append_mutation_value = False\n self._external_modification_value = False\n super().__init__(wrapped_list)\n self._last_wrapped_list_snapshot = list(self._storage)\n\n @property\n def _non_append_mutation(self):\n return self._non_append_mutation_value\n\n @_non_append_mutation.setter\n def _non_append_mutation(self, value):\n # Trackable only cares that a mutation occurred at some point; when\n # attempting to save it checks whether a mutation occurred and the object is\n # in a \"dirty\" state but otherwise the specifics of how it got to that state\n # are ignored. By contrast, the attribute cache needs to signal the mutation\n # immediately since a caller could query the value of an attribute (And\n # should not hit the cached value since the mutation may have affected the\n # result.)\n self._attribute_sentinel.invalidate_all()\n self._non_append_mutation_value = value\n\n @property\n def _external_modification(self):\n return self._external_modification_value\n\n @_external_modification.setter\n def _external_modification(self, value):\n # Invalidate for the same reason as `_non_append_mutation`\n self._attribute_sentinel.invalidate_all()\n self._external_modification_value = value\n\n # pylint: disable=protected-access\n def __copy__(self):\n copied = super().__copy__()\n copied._non_append_mutation = self._non_append_mutation\n copied._external_modification = self._external_modification\n return copied\n\n def __deepcopy__(self, memo):\n copied = super().__deepcopy__(memo)\n copied._non_append_mutation = self._non_append_mutation\n copied._external_modification = self._external_modification\n return copied\n # pylint: enable=protected-access\n\n def __reduce_ex__(self, protocol):\n return (self.__class__,\n (self._storage,))\n\n def _make_storage(self, wrapped_list):\n \"\"\"Use the user's original list for storage.\"\"\"\n return wrapped_list\n\n def _check_external_modification(self):\n \"\"\"Checks for any changes to the wrapped list not through the wrapper.\"\"\"\n if self._external_modification or self._non_append_mutation:\n return\n if self._storage != self._last_wrapped_list_snapshot:\n self._external_modification = True\n self._last_wrapped_list_snapshot = None\n\n def _update_snapshot(self):\n \"\"\"Acknowledges tracked changes to the wrapped list.\"\"\"\n\n # Mutation tracking for attributes reuses the same infrastructure as\n # Trackable mutation tracking.\n self._attribute_sentinel.invalidate_all()\n if self._external_modification or self._non_append_mutation:\n return\n self._last_wrapped_list_snapshot = list(self._storage)\n\n def _trackable_children(self, save_type=base.SaveType.CHECKPOINT, **kwargs):\n self._check_external_modification()\n if self._non_append_mutation:\n raise ValueError(\n f\"Unable to save the object {self} (a list wrapper constructed to \"\n \"track trackable TensorFlow objects). A list element was replaced \"\n \"(__setitem__, __setslice__), deleted (__delitem__, __delslice__), \"\n \"or moved (sort). In order to support restoration on object \"\n \"creation, tracking is exclusively for append-only data structures.\"\n \"\\n\\nIf you don't need this list checkpointed, wrap it in a \"\n \"non-trackable object; it will be subsequently ignored.\")\n if self._external_modification:\n raise ValueError(\n f\"Unable to save the object {self} (a list wrapper constructed to \"\n \"track trackable TensorFlow objects). The wrapped list was modified \"\n f\"outside the wrapper (its final value was {self._storage}, its value\"\n \" when a checkpoint dependency was added was \"\n f\"{self._last_wrapped_list_snapshot}), which breaks \"\n \"restoration on object creation.\\n\\nIf you don't need this list \"\n \"checkpointed, wrap it in a NoDependency object; it will be \"\n \"subsequently ignored.\")\n children = super()._trackable_children(save_type, **kwargs)\n\n if save_type == base.SaveType.SAVEDMODEL:\n # Add functions to be serialized.\n children.update({\n str(key): value\n for key, value in enumerate(self)\n if _is_function(value)\n })\n\n return children\n\n def _has_mutation_or_trackable(self):\n \"\"\"Short-circuits a check for trackables if there's already a mutation.\"\"\"\n if self._non_append_mutation:\n return True\n return any(isinstance(element, base.Trackable) for element in self._storage)\n\n def __delitem__(self, key):\n self._check_external_modification()\n if self._has_mutation_or_trackable():\n self._non_append_mutation = True\n del self._storage[key]\n self._update_snapshot()\n\n def __setitem__(self, key, value):\n self._check_external_modification()\n\n if isinstance(key, slice):\n # Note: this is quite inefficient, but the list API supports a broad range\n # of slice setters (e.g. truncate, extend, replace) and imitating this\n # for a range of Python versions is non-trivial.\n storage_copy = list(self._storage)\n self._storage[key] = value\n\n len_before = len(storage_copy)\n len_now = len(self._storage)\n for i in range(max(len_before, len_now)):\n value_now = self._storage[i] if i < len_now else None\n value_before = storage_copy[i] if i < len_before else None\n\n if isinstance(value_before, base.Trackable):\n self._non_append_mutation = True\n\n if value_now is not None and value_now != value_before:\n self._storage[i] = self._track_value(self._storage[i],\n self._name_element(i))\n\n else:\n if isinstance(self._storage[key], base.Trackable):\n self._non_append_mutation = True\n self._storage[key] = self._track_value(value, self._name_element(key))\n\n self._update_snapshot()\n\n def append(self, value):\n \"\"\"Add a new trackable value.\"\"\"\n self._check_external_modification()\n super().append(value)\n self._update_snapshot()\n\n def extend(self, values):\n \"\"\"Add a sequence of trackable values.\"\"\"\n self._check_external_modification()\n super().extend(values)\n self._update_snapshot()\n\n def __imul__(self, y):\n if y <= 0:\n self._check_external_modification()\n if self._has_mutation_or_trackable():\n self._non_append_mutation = True\n self._storage *= y\n self._update_snapshot()\n return self\n\n # Relies on super() calling append, which updates the snapshot.\n return super().__imul__(y)\n\n def __eq__(self, other):\n return self._storage == getattr(other, \"_storage\", other)\n\n def __ne__(self, other):\n return self._storage != getattr(other, \"_storage\", other)\n\n def __lt__(self, other):\n return self._storage < getattr(other, \"_storage\", other)\n\n def __le__(self, other):\n return self._storage <= getattr(other, \"_storage\", other)\n\n def __gt__(self, other):\n return self._storage > getattr(other, \"_storage\", other)\n\n def __ge__(self, other):\n return self._storage >= getattr(other, \"_storage\", other)\n\n def __hash__(self):\n # List wrappers need to compare like regular lists, and so like regular\n # lists they don't belong in hash tables.\n raise TypeError(\"unhashable type: 'ListWrapper'\")\n\n def insert(self, index, obj):\n self._check_external_modification()\n if (self._has_mutation_or_trackable() or isinstance(obj, base.Trackable)):\n self._non_append_mutation = True\n self._storage.insert(index, obj)\n self._update_snapshot()\n\n def sort(self):\n self._check_external_modification()\n if self._has_mutation_or_trackable():\n self._non_append_mutation = True\n self._storage.sort()\n self._update_snapshot()\n\n def __setslice__(self, i, j, y):\n self.__setitem__(slice(i, j), y)\n\n def __delslice__(self, i, j):\n self._check_external_modification()\n if self._has_mutation_or_trackable():\n self._non_append_mutation = True\n del self._storage[slice(i, j)]\n self._update_snapshot()\n\n def _track_value(self, value, name):\n \"\"\"Allows storage of non-trackable objects.\"\"\"\n try:\n value = super()._track_value(value=value, name=name)\n except ValueError:\n # Even if this value isn't trackable, we need to make sure\n # NoDependency objects get unwrapped.\n value = sticky_attribute_assignment(\n trackable=self, value=value, name=name)\n return value\n\n def __repr__(self):\n return \"ListWrapper(%s)\" % (repr(self._storage),)\n\n\nclass Mapping(TrackableDataStructure, collections_abc.Mapping):\n \"\"\"An append-only trackable mapping data structure with string keys.\n\n Maintains checkpoint dependencies on its contents (which must also be\n trackable), named based on its keys.\n\n Note that once a key has been added, it may not be deleted or replaced.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Construct a new sequence. Arguments are passed to `dict()`.\"\"\"\n super().__init__()\n self._storage = self._make_storage(*args, **kwargs)\n self._storage.update(\n {key: self._track_value(\n value, name=self._name_element(key))\n for key, value in self._storage.items()})\n\n def __copy__(self):\n return type(self)(copy.copy(self._storage))\n\n def __deepcopy__(self, memo):\n return type(self)(copy.deepcopy(self._storage, memo))\n\n def _make_storage(self, *args, **kwargs):\n return dict(*args, **kwargs)\n\n @property\n def _values(self):\n \"\"\"Collect values for TrackableDataStructure.\"\"\"\n # Sort items deterministically by key\n ordered = list(zip(*sorted(self.items(), key=lambda it: it[0])))\n if ordered:\n return ordered[1]\n return []\n\n def _name_element(self, key):\n if not isinstance(key, str):\n raise TypeError(\n f\"Mapping accepts only string keys, but got a key {repr(key)}.\")\n return str(key)\n\n def __setitem__(self, key, value):\n name = self._name_element(key)\n value = self._track_value(value, name=name)\n current_value = self._storage.setdefault(key, value)\n if current_value is not value:\n raise ValueError(\n \"Mappings are an append-only data structure. Tried to overwrite the \"\n f\"key '{key}' with value {value}, but it already contains \"\n f\"{current_value}\")\n\n def update(self, *args, **kwargs):\n for key, value in dict(*args, **kwargs).items():\n self[key] = value\n\n def __getitem__(self, key):\n return self._storage[key]\n\n def __len__(self):\n return len(self._storage)\n\n def __repr__(self):\n return \"Mapping(%s)\" % (repr(self._storage),)\n\n def __iter__(self):\n return iter(self._storage)\n\n\nclass _DictWrapper(TrackableDataStructure, wrapt.ObjectProxy):\n \"\"\"Wraps built-in dicts to support restore-on-create for variables.\n\n _DictWrapper is to Mapping as ListWrapper is to List. Unlike Mapping,\n _DictWrapper allows non-string keys and values and arbitrary mutations (delete\n keys, reassign values). Like ListWrapper, these mutations mean that\n _DictWrapper will raise an exception on save.\n \"\"\"\n\n def __init__(self, wrapped_dict=None):\n if wrapped_dict is None:\n # Allow zero-argument construction, e.g. from session.run's re-wrapping.\n wrapped_dict = {}\n if not isinstance(wrapped_dict, collections_abc.Mapping):\n # Allow construction from a sequence, e.g. from nest.pack_sequence_as.\n wrapped_dict = dict(wrapped_dict)\n wrapt.ObjectProxy.__init__(self, wrapped_dict)\n TrackableDataStructure.__init__(self)\n self._self_non_string_key = False\n self._self_external_modification = False\n self.__wrapped__.update(\n {key: self._track_value(\n value, name=self._name_element(key))\n for key, value in self.__wrapped__.items()})\n self._update_snapshot()\n\n def __reduce_ex__(self, protocol):\n return (self.__class__,\n (self.__wrapped__,))\n\n def __getattribute__(self, name):\n if (hasattr(type(self), name)\n and isinstance(getattr(type(self), name), property)):\n # Bypass ObjectProxy for properties. Whether this workaround is necessary\n # appears to depend on the Python version but not the wrapt version: 3.4\n # in particular seems to look up properties on the wrapped object instead\n # of the wrapper without this logic.\n return object.__getattribute__(self, name)\n else:\n # Raise TypeError as AttributeError to fix breakage in wrapt 1.15 for\n # `__getattribute__` as suggested in discussion with library author in\n # GitHub https://github.com/GrahamDumpleton/wrapt/issues/231\n try:\n return super().__getattribute__(name)\n except TypeError as e:\n raise AttributeError from e\n\n def copy(self):\n return copy.copy(self)\n\n # pylint: disable=protected-access\n def __copy__(self):\n copied = _DictWrapper(copy.copy(self.__wrapped__))\n copied._self_external_modification = self._self_external_modification\n copied._self_non_string_key = self._self_non_string_key\n return copied\n\n def __deepcopy__(self, memo):\n copied = _DictWrapper(copy.deepcopy(self.__wrapped__, memo))\n copied._self_external_modification = self._self_external_modification\n copied._self_non_string_key = self._self_non_string_key\n return copied\n # pylint: enable=protected-access\n\n @property\n def _values(self):\n \"\"\"Collect values for TrackableDataStructure.\"\"\"\n # Sort items deterministically by key\n ordered = list(zip(*sorted(self.items(), key=lambda it: it[0])))\n if ordered:\n return ordered[1]\n return []\n\n def _trackable_children(self, save_type=base.SaveType.CHECKPOINT, **kwargs):\n \"\"\"Check that the object is saveable before listing its dependencies.\"\"\"\n self._check_self_external_modification()\n if self._self_non_string_key:\n raise ValueError(\n f\"Unable to save the object {self} (a dictionary wrapper constructed \"\n \"automatically on attribute assignment). The wrapped dictionary \"\n \"contains a non-string key which maps to a trackable object or \"\n \"mutable data structure.\\n\\nIf you don't need this dictionary \"\n \"checkpointed, wrap it in a non-trackable \"\n \"object; it will be subsequently ignored.\")\n if self._self_external_modification:\n raise ValueError(\n f\"Unable to save the object {self} (a dictionary wrapper constructed \"\n \"automatically on attribute assignment). The wrapped dictionary was \"\n f\"modified outside the wrapper (its final value was {self}, its value\"\n \" when a checkpoint dependency was added was \"\n f\"{self._self_last_wrapped_dict_snapshot}), which breaks \"\n \"restoration on object creation.\\n\\nIf you don't need this \"\n \"dictionary checkpointed, wrap it in a \"\n \"non-trackable object; it will be subsequently ignored.\")\n assert not self._dirty # Any reason for dirtiness should have an exception.\n children = super()._trackable_children(save_type, **kwargs)\n\n if save_type == base.SaveType.SAVEDMODEL:\n # Add functions to be serialized.\n children.update(\n {key: value for key, value in self.items() if _is_function(value)})\n\n return children\n\n @property\n def _dirty(self):\n \"\"\"Check if there has already been a mutation which prevents saving.\"\"\"\n return (self._self_external_modification\n or self._self_non_string_key)\n\n def _check_self_external_modification(self):\n \"\"\"Checks for any changes to the wrapped dict not through the wrapper.\"\"\"\n if self._dirty:\n return\n if self != self._self_last_wrapped_dict_snapshot:\n self._self_external_modification = True\n self._self_last_wrapped_dict_snapshot = None\n\n def _update_snapshot(self):\n \"\"\"Acknowledges tracked changes to the wrapped dict.\"\"\"\n self._attribute_sentinel.invalidate_all()\n if self._dirty:\n return\n self._self_last_wrapped_dict_snapshot = dict(self)\n\n def _track_value(self, value, name):\n \"\"\"Allows storage of non-trackable objects.\"\"\"\n if isinstance(name, str):\n string_key = True\n else:\n name = \"-non_string_key\"\n string_key = False\n try:\n no_dependency = isinstance(value, NoDependency)\n value = super()._track_value(value=value, name=name)\n if not (string_key or no_dependency):\n # A non-string key maps to a trackable value. This data structure\n # is not saveable.\n self._self_non_string_key = True\n return value\n except ValueError:\n # Even if this value isn't trackable, we need to make sure\n # NoDependency objects get unwrapped.\n return sticky_attribute_assignment(\n trackable=self, value=value, name=name)\n\n def _name_element(self, key):\n \"\"\"Tells TrackableDataStructure to use keys as names as-is.\"\"\"\n return key\n\n def __setitem__(self, key, value):\n \"\"\"Allow any modifications, but possibly mark the wrapper as unsaveable.\"\"\"\n self._check_self_external_modification()\n self._maybe_initialize_trackable()\n no_dep = isinstance(value, NoDependency)\n if isinstance(key, str):\n value = self._track_value(value, name=key)\n else:\n value = wrap_or_unwrap(value)\n if not no_dep and isinstance(value, base.Trackable):\n # Non-string keys are OK as long as we have no reason to add a\n # dependency on the value (either because the value is not\n # trackable, or because it was wrapped in a NoDependency object).\n self._self_non_string_key = True\n self.__wrapped__[key] = value\n\n self._update_snapshot()\n\n def __delitem__(self, key):\n self._check_self_external_modification()\n del self.__wrapped__[key]\n self._update_snapshot()\n\n def __repr__(self):\n return \"DictWrapper(%s)\" % (repr(self.__wrapped__),)\n\n def __hash__(self):\n raise TypeError(\"unhashable type: 'DictWrapper'\")\n\n def __eq__(self, other):\n # Override the TrackableDataStructure \"== -> is\" forwarding and go back to\n # the wrapt implementation.\n return self.__wrapped__ == other\n\n def update(self, *args, **kwargs):\n for key, value in dict(*args, **kwargs).items():\n self[key] = value\n\n\nclass _TupleWrapper(TrackableDataStructure, wrapt.ObjectProxy):\n \"\"\"Trackable wrapper for tuples and namedtuples.\"\"\"\n\n def __init__(self, original_wrapped_tuple=()):\n add_dependency = []\n substituted_wrapped_tuple = []\n for element in original_wrapped_tuple:\n if isinstance(element, NoDependency):\n add_dependency.append(False)\n else:\n add_dependency.append(True)\n substituted_wrapped_tuple.append(wrap_or_unwrap(element))\n try:\n fields = original_wrapped_tuple._fields\n except AttributeError:\n # Not a namedtuple\n is_namedtuple = False\n else:\n is_namedtuple = True\n original_type = type(original_wrapped_tuple)\n # Flag to poison saving if we can't re-construct a namedtupled because its\n # __new__ takes different keyword arguments than its _fields.\n self._self_tuple_is_constructable = True\n if is_namedtuple:\n try:\n # NamedTuples take N arguments, unlike tuple which takes a sequence.\n substituted_wrapped_tuple = original_type(\n **dict(zip(fields, substituted_wrapped_tuple)))\n except TypeError:\n wrapt.ObjectProxy.__init__(self, original_wrapped_tuple)\n TrackableDataStructure.__init__(self)\n self._self_tuple_is_constructable = False\n return\n else:\n substituted_wrapped_tuple = original_type(substituted_wrapped_tuple)\n wrapt.ObjectProxy.__init__(self, substituted_wrapped_tuple)\n TrackableDataStructure.__init__(self)\n\n if is_namedtuple:\n # For namedtuples, also track by names for compatibility with\n # dictionaries.\n for name, should_depend, element in zip(\n fields, add_dependency, substituted_wrapped_tuple):\n if should_depend:\n self._track_value(element, name=name)\n\n # Track by index as well, for compatibility with lists.\n for index, (should_depend, element) in enumerate(\n zip(add_dependency, substituted_wrapped_tuple)):\n if should_depend:\n self._track_value(element, name=\"%d\" % (index,))\n\n @property\n def _values(self):\n \"\"\"Collect values for TrackableDataStructure.\"\"\"\n return self\n\n def _track_value(self, value, name):\n \"\"\"Allows storage of non-trackable objects.\"\"\"\n try:\n value = super()._track_value(value=value, name=name)\n except ValueError:\n # Even if this value isn't trackable, we need to make sure\n # NoDependency objects get unwrapped.\n value = sticky_attribute_assignment(\n trackable=self, value=value, name=name)\n return value\n\n def __repr__(self):\n return \"_TupleWrapper(%s)\" % (repr(self.__wrapped__),)\n\n def __hash__(self):\n # Override the TrackableDataStructure hash forwarding and go back to\n # the wrapt implementation.\n return hash(self.__wrapped__)\n\n def __eq__(self, other):\n # Override the TrackableDataStructure \"== -> is\" forwarding and go back to\n # the wrapt implementation.\n return self.__wrapped__ == other\n\n def __copy__(self):\n return _TupleWrapper(copy.copy(self.__wrapped__))\n\n def __deepcopy__(self, memo):\n return _TupleWrapper(copy.deepcopy(self.__wrapped__, memo))\n\n def __reduce_ex__(self, protocol):\n return (self.__class__,\n (self.__wrapped__,))\n\n # imul and iadd are the only tuple-relevant in-place operators. They need to\n # be special-cased to avoid mutating the original proxy object.\n def __imul__(self, y):\n \"\"\"Avoid running self.__wrapped__ *= y, which mutates `self`.\"\"\"\n return self.__wrapped__ * y\n\n def __iadd__(self, y):\n \"\"\"Avoid running self.__wrapped__ += y, which mutates `self`.\"\"\"\n return self.__wrapped__ + y\n\n def _trackable_children(self, save_type=base.SaveType.CHECKPOINT, **kwargs):\n if not self._self_tuple_is_constructable:\n raise ValueError(\n f\"Unable to save because the namedtuple {self.__wrapped__} is not \"\n \"constructable from its _fields (i.e. __new__ is overridden). \"\n f\"Expected keyword arguments {self.__wrapped__._fields}. If you do \"\n \"not need to save this object, consider wrapping it in a custom \"\n \"object that does not inherit from tuple.\")\n return super()._trackable_children(save_type, **kwargs)\n\n def __getattribute__(self, name):\n if name != \"__wrapped__\" and hasattr(self.__wrapped__, name):\n # Prefer attributes on the wrapped object when they conflict with\n # attributes on the wrapper object.\n return getattr(self.__wrapped__, name)\n\n if (hasattr(type(self), name)\n and isinstance(getattr(type(self), name), property)):\n # Bypass ObjectProxy for properties. Whether this workaround is necessary\n # appears to depend on the Python version but not the wrapt version: 3.4\n # in particular seems to look up properties on the wrapped object instead\n # of the wrapper without this logic.\n return object.__getattribute__(self, name)\n else:\n return super().__getattribute__(name)\n\n\ndef _is_function(x):\n return isinstance(x, (def_function.Function, defun.ConcreteFunction))\n\n\ndef set_list_item(list_object, index_string, value):\n item_index = int(index_string)\n if len(list_object) <= item_index:\n list_object.extend([None] * (1 + item_index - len(list_object)))\n list_object[item_index] = value\n\n\ndef set_tuple_item(list_object, index_string, value):\n try:\n item_index = int(index_string)\n except ValueError:\n # Ignore namedtuple fields.\n return\n if len(list_object) <= item_index:\n list_object.extend([None] * (1 + item_index - len(list_object)))\n list_object[item_index] = value\n", "output": ["set_tuple_item", "_is_function", "set_list_item", "_should_wrap_tuple", "_DictWrapper", "_TupleWrapper", "TrackableDataStructure", "List", "ListWrapper", "NoDependency", "_UntrackableError", "Mapping"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/trackable/data_structures.py", "file_length": 11727, "symbol_dict": [{"symbol": "set_list_item", "type": "mannual_defined_function", "byte_location": 39224, "location": 11543}, {"symbol": "_should_wrap_tuple", "type": "mannual_defined_function", "byte_location": 2213, "location": 619}, {"symbol": "_is_function", "type": "mannual_defined_function", "byte_location": 39129, "location": 11510}, {"symbol": "set_tuple_item", "type": "mannual_defined_function", "byte_location": 39452, "location": 11625}, {"symbol": "TrackableDataStructure", "type": "mannual_defined_class", "byte_location": 6083, "location": 1733}, {"symbol": "_TupleWrapper", "type": "mannual_defined_class", "byte_location": 34181, "location": 10034}, {"symbol": "NoDependency", "type": "mannual_defined_class", "byte_location": 1330, "location": 350}, {"symbol": "ListWrapper", "type": "mannual_defined_class", "byte_location": 14732, "location": 4331}, {"symbol": "Mapping", "type": "mannual_defined_class", "byte_location": 24567, "location": 7202}, {"symbol": "_UntrackableError", "type": "mannual_defined_class", "byte_location": 5671, "location": 1603}, {"symbol": "List", "type": "mannual_defined_class", "byte_location": 11170, "location": 3218}, {"symbol": "_DictWrapper", "type": "mannual_defined_class", "byte_location": 26693, "location": 7853}]}} {"input": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for the TypeSpec base class.\"\"\"\n\nimport collections\n\nfrom absl.testing import parameterized\n\nimport numpy as np\n\nfrom tensorflow.core.framework import full_type_pb2\nfrom tensorflow.core.function import trace_type\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.framework import type_spec_registry\nfrom tensorflow.python.framework.type_utils import fulltypes_for_flat_tensors\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.compat import collections_abc\n\n\nclass TwoTensors:\n \"\"\"A simple value type to test TypeSpec.\n\n Contains two tensors (x, y) and a string (color). The color value is a\n stand-in for any extra type metadata we might need to store.\n \"\"\"\n\n def __init__(self, x, y, color=\"red\"):\n assert isinstance(color, str)\n self.x = ops.convert_to_tensor(x)\n self.y = ops.convert_to_tensor(y)\n self.color = color\n\n\n@type_spec_registry.register(\"tf.TwoTensorsSpec\")\nclass TwoTensorsSpec(type_spec.TypeSpec):\n \"\"\"A TypeSpec for the TwoTensors value type.\"\"\"\n\n def __init__(self, x_shape, x_dtype, y_shape, y_dtype, color=\"red\"):\n self.x_shape = tensor_shape.as_shape(x_shape)\n self.x_dtype = dtypes.as_dtype(x_dtype)\n self.y_shape = tensor_shape.as_shape(y_shape)\n self.y_dtype = dtypes.as_dtype(y_dtype)\n self.color = color\n\n value_type = property(lambda self: TwoTensors)\n\n @property\n def _component_specs(self):\n return (tensor_spec.TensorSpec(self.x_shape, self.x_dtype),\n tensor_spec.TensorSpec(self.y_shape, self.y_dtype))\n\n def _to_components(self, value):\n return (value.x, value.y)\n\n def _from_components(self, components):\n x, y = components\n return TwoTensors(x, y, self.color)\n\n def _serialize(self):\n return (self.x_shape, self.x_dtype, self.y_shape, self.y_dtype, self.color)\n\n @classmethod\n def from_value(cls, value):\n return cls(value.x.shape, value.x.dtype, value.y.shape, value.y.dtype,\n value.color)\n\n\n@type_spec_registry.register(\"tf.TwoTensorsSpecTwin\")\nclass TwoTensorsSpecTwin(TwoTensorsSpec):\n pass\n\n\n@type_spec_registry.register(\"tf.TwoTensorsSpecVariableSerialize\")\nclass TwoTensorsSpecVariableSerialize(TwoTensorsSpec):\n\n def _serialize(self):\n if self.color == \"smaller_tuple\":\n return (self.x_shape, self.x_dtype, self.y_shape, self.y_dtype)\n elif self.color == \"different_order\":\n return (self.y_shape, self.x_shape, self.y_dtype, self.color,\n self.x_dtype)\n\n return (self.x_shape, self.x_dtype, self.y_shape, self.y_dtype, self.color)\n\n\ntype_spec.register_type_spec_from_value_converter(\n TwoTensors, TwoTensorsSpec.from_value)\n\n\nclass TwoComposites:\n \"\"\"A simple value type to test TypeSpec.\n\n Contains two composite tensorstensors (x, y) and a string (color).\n \"\"\"\n\n def __init__(self, x, y, color=\"red\"):\n assert isinstance(color, str)\n self.x = ops.convert_to_tensor_or_composite(x)\n self.y = ops.convert_to_tensor_or_composite(y)\n self.color = color\n\n\n@type_spec_registry.register(\"tf.TwoCompositesSpec\")\nclass TwoCompositesSpec(type_spec.BatchableTypeSpec):\n \"\"\"A TypeSpec for the TwoTensors value type.\"\"\"\n\n def __init__(self, x_spec, y_spec, color=\"red\"):\n self.x_spec = x_spec\n self.y_spec = y_spec\n self.color = color\n\n value_type = property(lambda self: TwoComposites)\n\n @property\n def _component_specs(self):\n return (self.x_spec, self.y_spec)\n\n def _to_components(self, value):\n return (value.x, value.y)\n\n def _from_components(self, components):\n x, y = components\n return TwoComposites(x, y, self.color)\n\n def _serialize(self):\n return (self.x_spec, self.y_spec, self.color)\n\n @classmethod\n def from_value(cls, value):\n return cls(type_spec.type_spec_from_value(value.x),\n type_spec.type_spec_from_value(value.y),\n value.color)\n\n def _batch(self, batch_size):\n return TwoCompositesSpec(\n self.x_spec._batch(batch_size), self.y_spec._batch(batch_size),\n self.color)\n\n def _unbatch(self):\n return TwoCompositesSpec(self.x_spec._unbatch(), self.y_spec._unbatch(),\n self.color)\n\n\ntype_spec.register_type_spec_from_value_converter(\n TwoComposites, TwoCompositesSpec.from_value)\n\n\nclass NestOfTensors:\n \"\"\"CompositeTensor containing a nest of tensors.\"\"\"\n\n def __init__(self, x):\n self.nest = x\n\n\n@type_spec_registry.register(\"tf.NestOfTensorsSpec\")\nclass NestOfTensorsSpec(type_spec.TypeSpec):\n \"\"\"A TypeSpec for the NestOfTensors value type.\"\"\"\n\n def __init__(self, spec):\n self.spec = spec\n\n value_type = property(lambda self: NestOfTensors)\n _component_specs = property(lambda self: self.spec)\n\n def _to_components(self, value):\n return nest.flatten(value)\n\n def _from_components(self, components):\n return nest.pack_sequence_as(self.spec, components)\n\n def _serialize(self):\n return self.spec\n\n def __repr__(self):\n if hasattr(self.spec, \"_fields\") and isinstance(\n self.spec._fields, collections_abc.Sequence) and all(\n isinstance(f, str) for f in self.spec._fields):\n return \"%s(%r)\" % (type(self).__name__, self._serialize())\n return super().__repr__()\n\n @classmethod\n def from_value(cls, value):\n return cls(nest.map_structure(type_spec.type_spec_from_value, value.nest))\n\n @classmethod\n def _deserialize(cls, spec):\n return cls(spec)\n\n\ntype_spec.register_type_spec_from_value_converter(\n NestOfTensors, NestOfTensorsSpec.from_value)\n\n_TestNamedTuple = collections.namedtuple(\"NamedTuple\", [\"a\", \"b\"])\n_TestNamedTuple2 = collections.namedtuple(\"NamedTuple\", [\"a\", \"b\"])\n_TestNamedTupleSingleField = collections.namedtuple(\"SingleField\", [\"a\"])\n_TestNamedTupleDifferentField = collections.namedtuple(\"DifferentField\",\n [\"a\", \"c\"])\n\n\nclass TypeSpecTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n (\"FullySpecified\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool)),\n (\"UnknownDim\",\n TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),\n (\"UnknownRank\",\n TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),\n TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),\n (\"Metadata\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"blue\"),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"blue\")),\n (\"NumpyMetadata\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,\n (np.int32(1), np.float32(1.),\n np.array([[1, 2], [3, 4]]))),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,\n (np.int32(1), np.float32(1.),\n np.array([[1, 2], [3, 4]])))),\n )\n def testEquality(self, v1, v2):\n # pylint: disable=g-generic-assert\n self.assertEqual(v1, v2)\n self.assertEqual(v2, v1)\n self.assertFalse(v1 != v2)\n self.assertFalse(v2 != v1)\n self.assertEqual(hash(v1), hash(v2))\n\n @parameterized.named_parameters(\n (\"UnknownDim\",\n TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),\n (\"UnknownRank\",\n TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),\n (\"IncompatibleDtype\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),\n (\"IncompatibleRank\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),\n (\"IncompatibleDimSize\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),\n (\"IncompatibleMetadata\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"red\"),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"blue\")),\n (\"SwappedValues\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),\n (\"DiffMetadataNumpy\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,\n np.array([[1, 2], [3, 4]])),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,\n np.array([[1, 2], [3, 8]]))),\n (\"DiffMetadataTensorSpecName\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,\n tensor_spec.TensorSpec([4], name=\"a\")),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,\n tensor_spec.TensorSpec([4], name=\"b\"))),\n (\"Non-TypeSpec\",\n TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool), 5),\n )\n def testInequality(self, v1, v2):\n # pylint: disable=g-generic-assert\n self.assertNotEqual(v1, v2)\n self.assertNotEqual(v2, v1)\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)\n\n @parameterized.named_parameters(\n (\"SameValue\", TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)),\n (\"UnknownDim\", TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool),\n TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),\n (\"UnknownRank\", TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool),\n TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),\n (\"NamedTuple\",\n NestOfTensorsSpec(\n _TestNamedTuple(\n a=tensor_spec.TensorSpec([8, 5], dtypes.int32),\n b=tensor_spec.TensorSpec([8, 12], dtypes.int32))),\n NestOfTensorsSpec(\n _TestNamedTuple(\n a=tensor_spec.TensorSpec([None, 5], dtypes.int32),\n b=tensor_spec.TensorSpec([None, None], dtypes.int32)))),\n (\n \"NamedTupleRedefined\",\n NestOfTensorsSpec(\n _TestNamedTuple2( # Separate but equivalent type.\n a=tensor_spec.TensorSpec([8, 5], dtypes.int32),\n b=tensor_spec.TensorSpec([8, 12], dtypes.int32))),\n NestOfTensorsSpec(\n _TestNamedTuple(\n a=tensor_spec.TensorSpec([None, 5], dtypes.int32),\n b=tensor_spec.TensorSpec([None, None], dtypes.int32)))),\n )\n def testIsSubtypeOf(self, v1, v2):\n self.assertTrue(v1.is_subtype_of(v2))\n\n @parameterized.named_parameters(\n (\"DifferentType\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpecTwin([5, 3], dtypes.int32, [None], dtypes.bool),\n ),\n (\"DifferentDtype\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),\n (\"DifferentRank\", TwoTensorsSpec([5, 3], dtypes.int32, [None],\n dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),\n (\"DifferentDimSize\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),\n (\"DifferentMetadata\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"red\"),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"blue\")),\n (\"SwappedValues\", TwoTensorsSpec([5, 3], dtypes.int32, [None],\n dtypes.bool),\n TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),\n (\"SwappedDimensions\",\n TwoTensorsSpec([3, 5], dtypes.int32, [None], dtypes.int32),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.int32)),\n (\"Supertype\", TwoTensorsSpec([5, None], dtypes.int32, [None],\n dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),\n (\"SerializeDifferentStructure\",\n TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],\n dtypes.bool),\n TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],\n dtypes.bool, \"smaller_tuple\")),\n (\"SerializeDifferentOrder\",\n TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],\n dtypes.bool),\n TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],\n dtypes.bool, \"different_order\")),\n )\n def testIsNotSubtypeOf(self, v1, v2):\n self.assertFalse(v1.is_subtype_of(v2))\n\n @parameterized.named_parameters(\n (\"SameValue\", TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)),\n (\"DifferentValue\",\n TwoTensorsSpec([2, 1], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([None, None], dtypes.int32, [None], dtypes.bool)),\n (\"DifferentRank\",\n TwoTensorsSpec([3, 2, 1], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec(None, dtypes.int32, [None], dtypes.bool)),\n (\n \"NamedTupleRedefined\",\n NestOfTensorsSpec(\n _TestNamedTuple2( # Separate but equivalent type.\n a=tensor_spec.TensorSpec([8, 3], dtypes.int32),\n b=tensor_spec.TensorSpec([8, 12], dtypes.int32))),\n NestOfTensorsSpec(\n _TestNamedTuple(\n a=tensor_spec.TensorSpec([None, 5], dtypes.int32),\n b=tensor_spec.TensorSpec([7, None], dtypes.int32))),\n NestOfTensorsSpec(\n _TestNamedTuple(\n a=tensor_spec.TensorSpec([None, None], dtypes.int32),\n b=tensor_spec.TensorSpec([None, None], dtypes.int32)))),\n )\n def testMostSpecificCommonSupertype(self, v1, v2, result):\n self.assertEqual(v1.most_specific_common_supertype([v2]), result)\n self.assertEqual(v2.most_specific_common_supertype([v1]), result)\n\n @parameterized.named_parameters(\n (\"DifferentType\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpecTwin([5, 3], dtypes.int32, [None], dtypes.bool),\n ),\n (\"DifferentDtype\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),\n (\"DifferentMetadata\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"red\"),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"blue\")),\n (\"SerializeDifferentStructure\",\n TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],\n dtypes.bool),\n TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],\n dtypes.bool, \"smaller_tuple\")),\n (\"SerializeDifferentOrder\",\n TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],\n dtypes.bool),\n TwoTensorsSpecVariableSerialize([5, None], dtypes.int32, [None],\n dtypes.bool, \"different_order\")),\n )\n def testNoCommonSupertype(self, v1, v2):\n self.assertIsNone(v1.most_specific_common_supertype([v2]))\n self.assertIsNone(v2.most_specific_common_supertype([v1]))\n\n def testTensorDecomposition(self):\n value = TwoComposites(\n ragged_factory_ops.constant([[1, 2], [3]], dtypes.int32),\n ragged_factory_ops.constant([[5], [6, 7, 8]], dtypes.float32),\n )\n spec = type_spec.type_spec_from_value(value)\n\n self.assertEqual(\n spec.flatten(),\n [\n tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.int32),\n tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int64),\n tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int64),\n ],\n )\n self.assertEqual(\n [trace_type.from_value(t) for t in spec.to_tensors(value)],\n [\n tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int32),\n tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int64),\n tensor_spec.TensorSpec(shape=(4,), dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.int64),\n ],\n )\n\n flat_original = spec.to_tensors(value)\n reconstructed = spec.from_tensors(iter(flat_original))\n flat_reconstructed = spec.to_tensors(reconstructed)\n\n for original, reconstructed in zip(flat_original, flat_reconstructed):\n self.assertIs(original, reconstructed)\n\n def testCastDoesntRecreateCompositeTensor(self):\n value = TwoComposites(\n ragged_factory_ops.constant([[1, 2], [3]], dtypes.int32),\n ragged_factory_ops.constant([[5], [6, 7, 8]], dtypes.float32),\n )\n spec = type_spec.type_spec_from_value(value)\n\n casted_value = spec.cast(value, trace_type.InternalCastContext())\n\n self.assertIs(value, casted_value)\n\n @parameterized.named_parameters(\n (\"SameValue\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)),\n (\"UnknownDim\",\n TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),\n (\"UnknownRank\",\n TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool)),\n (\"NamedTuple\",\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec([None, 5], dtypes.int32),\n b=tensor_spec.TensorSpec([None, None], dtypes.int32))),\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec([8, 5], dtypes.int32),\n b=tensor_spec.TensorSpec([8, 12], dtypes.int32)))),\n (\"NamedTupleRedefined\",\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec([None, 5], dtypes.int32),\n b=tensor_spec.TensorSpec([None, None], dtypes.int32))),\n NestOfTensorsSpec(_TestNamedTuple2( # Separate but equivalent type.\n a=tensor_spec.TensorSpec([8, 5], dtypes.int32),\n b=tensor_spec.TensorSpec([8, 12], dtypes.int32)))),\n )\n def testIsCompatibleWith(self, v1, v2):\n self.assertTrue(v1.is_compatible_with(v2))\n self.assertTrue(v2.is_compatible_with(v1))\n\n @parameterized.named_parameters(\n (\"IncompatibleDtype\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),\n (\"IncompatibleRank\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool)),\n (\"IncompatibleDimSize\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool)),\n (\"IncompatibleMetadata\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"red\"),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool, \"blue\")),\n (\"SwappedValues\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([None], dtypes.bool, [5, 3], dtypes.int32)),\n )\n def testIsNotCompatibleWith(self, v1, v2):\n self.assertFalse(v1.is_compatible_with(v2))\n self.assertFalse(v2.is_compatible_with(v1))\n\n @parameterized.named_parameters(\n (\"EqualTypes\",\n TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)),\n (\"UnknownDim\",\n TwoTensorsSpec([5, None], dtypes.int32, [8], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),\n (\"UnknownRank\",\n TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [8], dtypes.bool),\n TwoTensorsSpec(None, dtypes.int32, None, dtypes.bool)),\n (\"DiffRank\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None, None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)),\n (\"DiffDimSize\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 8], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, None], dtypes.int32, [None], dtypes.bool)),\n (\"NamedTuple\",\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32))),\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32))),\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32)))),\n (\"NamedTupleRedefined\",\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32))),\n NestOfTensorsSpec(_TestNamedTuple2( # Separate but equivalent type.\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32))),\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32)))),\n )\n def testMostSpecificCompatibleType(self, v1, v2, expected):\n self.assertEqual(v1.most_specific_compatible_type(v2), expected)\n self.assertEqual(v2.most_specific_compatible_type(v1), expected)\n\n @parameterized.named_parameters(\n (\"IncompatibleDtype\",\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool),\n TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.float32)),\n (\"IncompatibleMetadata\",\n TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool, \"red\"),\n TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool, \"blue\")),\n (\"IncompatibleTensorSpecName\",\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,\n tensor_spec.TensorSpec([4], name=\"a\")),\n TwoTensorsSpec([5, 3], dtypes.int32, [3], dtypes.bool,\n tensor_spec.TensorSpec([4], name=\"b\"))),\n (\"IncompatibleNestType\",\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32))),\n NestOfTensorsSpec(dict(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32)))),\n )\n def testMostSpecificCompatibleTypeException(self, v1, v2):\n with self.assertRaises(ValueError):\n v1.most_specific_compatible_type(v2)\n with self.assertRaises(ValueError):\n v2.most_specific_compatible_type(v1)\n\n def testMostSpecificCompatibleTypeNamedTupleIsNotTuple(self):\n named_tuple_spec_a = NestOfTensorsSpec.from_value(NestOfTensors(\n _TestNamedTuple(a=1, b=\"aaa\")))\n named_tuple_spec_b = NestOfTensorsSpec.from_value(NestOfTensors(\n _TestNamedTuple(a=2, b=\"bbb\")))\n named_tuple_spec_c = NestOfTensorsSpec.from_value(NestOfTensors(\n _TestNamedTuple(a=3, b=\"ccc\")))\n normal_tuple_spec = NestOfTensorsSpec.from_value(NestOfTensors((2, \"bbb\")))\n result_a_b = named_tuple_spec_a.most_specific_compatible_type(\n named_tuple_spec_b)\n result_b_a = named_tuple_spec_b.most_specific_compatible_type(\n named_tuple_spec_a)\n self.assertEqual(repr(result_a_b), repr(named_tuple_spec_c))\n self.assertEqual(repr(result_b_a), repr(named_tuple_spec_c))\n # Test that spec of named tuple is not equal to spec of normal tuple.\n self.assertNotEqual(repr(result_a_b), repr(normal_tuple_spec))\n\n @parameterized.named_parameters(\n (\"IncompatibleDtype\",\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.bool))),\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32)))),\n (\"DifferentTupleSize\",\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.bool))),\n NestOfTensorsSpec(_TestNamedTupleSingleField(\n a=tensor_spec.TensorSpec((), dtypes.int32)))),\n (\"DifferentFieldName\",\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32))),\n NestOfTensorsSpec(_TestNamedTupleDifferentField(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n c=tensor_spec.TensorSpec((), dtypes.int32)))),\n (\"NamedTupleAndTuple\",\n NestOfTensorsSpec(_TestNamedTuple(\n a=tensor_spec.TensorSpec((), dtypes.int32),\n b=tensor_spec.TensorSpec((), dtypes.int32))),\n NestOfTensorsSpec((\n tensor_spec.TensorSpec((), dtypes.int32),\n tensor_spec.TensorSpec((), dtypes.int32)))),\n )\n def testMostSpecificCompatibleTypeForNamedTuplesException(self, v1, v2):\n with self.assertRaises(ValueError):\n v1.most_specific_compatible_type(v2)\n with self.assertRaises(ValueError):\n v2.most_specific_compatible_type(v1)\n\n def toTensorList(self):\n value = TwoTensors([1, 2, 3], [1.0, 2.0], \"red\")\n spec = TwoTensorsSpec.from_value(value)\n tensor_list = spec._to_tensor_list(value)\n self.assertLen(tensor_list, 2)\n self.assertIs(tensor_list[0], value.x)\n self.assertIs(tensor_list[1], value.y)\n\n def fromTensorList(self):\n x = ops.convert_to_tensor([1, 2, 3])\n y = ops.convert_to_tensor([1.0, 2.0])\n color = \"green\"\n spec = TwoTensorsSpec(x.shape, x.dtype, y.shape, y.dtype, color)\n value = spec._from_tensor_list([x, y])\n self.assertIs(value.x, x)\n self.assertIs(value.y, y)\n self.assertEqual(value.color, color)\n\n def fromIncompatibleTensorList(self):\n x = ops.convert_to_tensor([1, 2, 3])\n y = ops.convert_to_tensor([1.0, 2.0])\n spec1 = TwoTensorsSpec([100], x.dtype, y.shape, y.dtype, \"green\")\n spec2 = TwoTensorsSpec(x.shape, x.dtype, y.shape, dtypes.bool, \"green\")\n with self.assertRaises(ValueError):\n spec1._from_tensor_list([x, y]) # shape mismatch\n with self.assertRaises(ValueError):\n spec2._from_tensor_list([x, y]) # dtype mismatch\n\n def testFlatTensorSpecs(self):\n spec = TwoTensorsSpec([5], dtypes.int32, [5, 8], dtypes.float32, \"red\")\n self.assertEqual(spec._flat_tensor_specs,\n [tensor_spec.TensorSpec([5], dtypes.int32),\n tensor_spec.TensorSpec([5, 8], dtypes.float32)])\n\n def testFullTypesForFlatTensors(self):\n spec = TwoTensorsSpec([5], dtypes.int32, [5, 8], dtypes.float32, \"red\")\n full_type_list = fulltypes_for_flat_tensors(spec)\n expect = [\n full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_UNSET),\n full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_UNSET)\n ]\n self.assertEqual(len(spec._flat_tensor_specs), len(full_type_list))\n self.assertEqual(expect, full_type_list)\n\n def testRepr(self):\n spec = TwoTensorsSpec([5, 3], dtypes.int32, None, dtypes.bool)\n self.assertEqual(\n repr(spec),\n \"TwoTensorsSpec(%r, %r, %r, %r, %r)\" %\n (tensor_shape.TensorShape([5, 3]), dtypes.int32,\n tensor_shape.TensorShape(None), dtypes.bool, \"red\"))\n\n def testFromValue(self):\n value = TwoTensors([1, 2, 3], [1.0, 2.0], \"red\")\n spec = type_spec.type_spec_from_value(value)\n self.assertEqual(spec, TwoTensorsSpec.from_value(value))\n\n def testCast(self):\n spec = TwoTensorsSpec([], dtypes.int32, [], dtypes.float32)\n foo = spec._from_components([1, 2.3])\n ctx = trace_type.InternalCastContext()\n value = spec.cast(foo, ctx)\n tensor_type = type(ops.convert_to_tensor([1, 2, 3]))\n self.assertIsInstance(value.x, tensor_type)\n self.assertIsInstance(value.y, tensor_type)\n self.assertEqual(value.x.dtype, dtypes.int32)\n self.assertEqual(value.y.dtype, dtypes.float32)\n\n bar = TwoComposites(\n ragged_factory_ops.constant([[1, 2], [3]], dtypes.int32),\n ragged_factory_ops.constant([[5], [6, 7, 8]], dtypes.float32))\n bar_spec = type_spec.type_spec_from_value(bar)\n value = bar_spec.cast(bar, ctx)\n self.assertEqual(value.x.dtype, dtypes.int32)\n self.assertEqual(value.y.dtype, dtypes.float32)\n\n def testNestedRagged(self):\n # Check that TwoCompositeSpecs are compatible if one has a nested\n # RaggedTensorSpec w/ ragged_rank=0 and the other has a corresponding\n # nested TensorSpec.\n spec1 = TwoCompositesSpec(\n ragged_tensor.RaggedTensorSpec([10], dtypes.int32, ragged_rank=0),\n tensor_spec.TensorSpec(None, dtypes.int32))\n spec2 = TwoCompositesSpec(\n tensor_spec.TensorSpec([10], dtypes.int32),\n tensor_spec.TensorSpec(None, dtypes.int32))\n spec3 = TwoCompositesSpec(\n tensor_spec.TensorSpec([12], dtypes.int32),\n tensor_spec.TensorSpec(None, dtypes.int32))\n self.assertTrue(spec1.is_compatible_with(spec2))\n self.assertFalse(spec1.is_compatible_with(spec3))\n\n def testRegistry(self):\n self.assertEqual(\"tf.TwoCompositesSpec\",\n type_spec_registry.get_name(TwoCompositesSpec))\n self.assertEqual(\"tf.TwoTensorsSpec\",\n type_spec_registry.get_name(TwoTensorsSpec))\n self.assertEqual(TwoCompositesSpec,\n type_spec_registry.lookup(\"tf.TwoCompositesSpec\"))\n self.assertEqual(TwoTensorsSpec,\n type_spec_registry.lookup(\"tf.TwoTensorsSpec\"))\n\n def testRegistryTypeErrors(self):\n with self.assertRaisesRegex(TypeError, \"Expected `name` to be a string\"):\n type_spec_registry.register(None)\n\n with self.assertRaisesRegex(TypeError, \"Expected `name` to be a string\"):\n type_spec_registry.register(TwoTensorsSpec)\n\n with self.assertRaisesRegex(TypeError, \"Expected `cls` to be a TypeSpec\"):\n type_spec_registry.register(\"tf.foo\")(None)\n\n with self.assertRaisesRegex(TypeError, \"Expected `cls` to be a TypeSpec\"):\n type_spec_registry.register(\"tf.foo\")(ragged_tensor.RaggedTensor)\n\n def testRegistryDuplicateErrors(self):\n with self.assertRaisesRegex(\n ValueError, \"Name tf.TwoCompositesSpec has already been registered \"\n \"for class __main__.TwoCompositesSpec.\"):\n\n @type_spec_registry.register(\"tf.TwoCompositesSpec\") # pylint: disable=unused-variable\n class NewTypeSpec(TwoCompositesSpec):\n pass\n\n with self.assertRaisesRegex(\n ValueError, \"Class __main__.TwoCompositesSpec has already been \"\n \"registered with name tf.TwoCompositesSpec\"):\n type_spec_registry.register(\"tf.NewName\")(TwoCompositesSpec)\n\n def testRegistryNameErrors(self):\n for bad_name in [\"foo\", \"\", \"hello world\"]:\n with self.assertRaises(ValueError):\n type_spec_registry.register(bad_name)\n\n def testRegistryLookupErrors(self):\n with self.assertRaises(TypeError):\n type_spec_registry.lookup(None)\n with self.assertRaisesRegex(\n ValueError, \"No TypeSpec has been registered with name 'foo.bar'\"):\n type_spec_registry.lookup(\"foo.bar\")\n\n def testRegistryGetNameErrors(self):\n with self.assertRaises(TypeError):\n type_spec_registry.get_name(None)\n\n class Foo(TwoCompositesSpec):\n pass\n\n with self.assertRaisesRegex(\n ValueError, \"TypeSpec __main__.Foo has not been registered.\"):\n type_spec_registry.get_name(Foo)\n\n def testSerialization(self):\n spec = TwoTensorsSpec([5, 3], dtypes.int32, [None], dtypes.bool)\n self.assertEqual(spec, trace_type.deserialize(trace_type.serialize(spec)))\n\n\nclass BatchableTypeSpecTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n @parameterized.parameters([\n {\n \"unbatched\":\n TwoCompositesSpec(\n tensor_spec.TensorSpec([]), tensor_spec.TensorSpec([8])),\n \"batch_size\":\n 5,\n \"batched\":\n TwoCompositesSpec(\n tensor_spec.TensorSpec([5]), tensor_spec.TensorSpec([5, 8]))\n },\n {\n \"unbatched\":\n TwoCompositesSpec(\n tensor_spec.TensorSpec(None), tensor_spec.TensorSpec([8])),\n \"batch_size\":\n None,\n \"batched\":\n TwoCompositesSpec(\n tensor_spec.TensorSpec(None),\n tensor_spec.TensorSpec([None, 8]))\n },\n {\n \"unbatched\":\n TwoCompositesSpec(\n tensor_spec.TensorSpec([3, None]),\n tensor_spec.TensorSpec([8])),\n \"batch_size\":\n 12,\n \"batched\":\n TwoCompositesSpec(\n tensor_spec.TensorSpec([12, 3, None]),\n tensor_spec.TensorSpec([12, 8]))\n },\n {\n \"unbatched\":\n TwoCompositesSpec(\n ragged_tensor.RaggedTensorSpec([3, None]),\n tensor_spec.TensorSpec([8])),\n \"batch_size\":\n 12,\n \"batched\":\n TwoCompositesSpec(\n ragged_tensor.RaggedTensorSpec([12, 3, None]),\n tensor_spec.TensorSpec([12, 8]))\n },\n ])\n def testBatch(self, unbatched, batch_size, batched):\n self.assertEqual(unbatched._batch(batch_size), batched)\n self.assertEqual(batched._unbatch(), unbatched)\n\n def testFlatTensorSpecs(self):\n a = TwoComposites(\n ragged_factory_ops.constant([[1, 2], [3]]),\n ragged_factory_ops.constant([[5], [6, 7, 8]]))\n a_spec = type_spec.type_spec_from_value(a)\n flat_specs = a_spec._flat_tensor_specs\n self.assertEqual(flat_specs, [\n tensor_spec.TensorSpec(None, dtypes.variant),\n tensor_spec.TensorSpec(None, dtypes.variant)\n ])\n\n def testFullTypesForFlatTensors(self):\n a = TwoComposites(\n ragged_factory_ops.constant([[1, 2], [3]]),\n ragged_factory_ops.constant([[5], [6, 7, 8]]))\n a_spec = type_spec.type_spec_from_value(a)\n full_type_list = fulltypes_for_flat_tensors(a_spec)\n expect = [\n full_type_pb2.FullTypeDef(\n type_id=full_type_pb2.TFT_RAGGED,\n args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_INT32)]),\n full_type_pb2.FullTypeDef(\n type_id=full_type_pb2.TFT_RAGGED,\n args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_INT32)]),\n ]\n self.assertEqual(len(a_spec._flat_tensor_specs), len(full_type_list))\n self.assertEqual(expect, full_type_list)\n\n def testToTensorList(self):\n a = TwoComposites(\n ragged_factory_ops.constant([[1, 2], [3]]),\n ragged_factory_ops.constant([[5], [6, 7, 8]]))\n a_spec = type_spec.type_spec_from_value(a)\n tensor_list = a_spec._to_tensor_list(a)\n self.assertLen(tensor_list, 2)\n self.assertEqual(tensor_list[0].dtype, dtypes.variant)\n self.assertEqual(tensor_list[1].dtype, dtypes.variant)\n self.assertEqual(tensor_list[0].shape.rank, 0)\n self.assertEqual(tensor_list[1].shape.rank, 0)\n\n b = a_spec._from_tensor_list(tensor_list)\n self.assertAllEqual(a.x, b.x)\n self.assertAllEqual(a.y, b.y)\n self.assertEqual(a.color, b.color)\n\n c = a_spec._from_compatible_tensor_list(tensor_list)\n self.assertAllEqual(a.x, c.x)\n self.assertAllEqual(a.y, c.y)\n self.assertEqual(a.color, c.color)\n\n def testToBatchedTensorList(self):\n a = TwoComposites(\n ragged_factory_ops.constant([[1, 2], [3]]),\n ragged_factory_ops.constant([[5], [6, 7, 8]]))\n a_spec = type_spec.type_spec_from_value(a)\n tensor_list = a_spec._to_batched_tensor_list(a)\n self.assertLen(tensor_list, 2)\n self.assertEqual(tensor_list[0].dtype, dtypes.variant)\n self.assertEqual(tensor_list[1].dtype, dtypes.variant)\n self.assertEqual(tensor_list[0].shape.rank, 1)\n self.assertEqual(tensor_list[1].shape.rank, 1)\n\n b = a_spec._from_tensor_list(tensor_list)\n self.assertAllEqual(a.x, b.x)\n self.assertAllEqual(a.y, b.y)\n self.assertEqual(a.color, b.color)\n\n c = a_spec._from_compatible_tensor_list(tensor_list)\n self.assertAllEqual(a.x, c.x)\n self.assertAllEqual(a.y, c.y)\n self.assertEqual(a.color, c.color)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n", "output": ["TwoComposites", "NewTypeSpec", "TwoTensorsSpecVariableSerialize", "NestOfTensorsSpec", "Foo", "TwoTensorsSpec", "TypeSpecTest", "BatchableTypeSpecTest", "TwoCompositesSpec", "TwoTensors", "TwoTensorsSpecTwin", "NestOfTensors"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/framework/type_spec_test.py", "file_length": 13761, "symbol_dict": [{"symbol": "NestOfTensors", "type": "mannual_defined_class", "byte_location": 5336, "location": 1789}, {"symbol": "TypeSpecTest", "type": "mannual_defined_class", "byte_location": 6917, "location": 2318}, {"symbol": "Foo", "type": "mannual_defined_class", "byte_location": 32900, "location": 11952}, {"symbol": "TwoComposites", "type": "mannual_defined_class", "byte_location": 3742, "location": 1221}, {"symbol": "TwoTensorsSpec", "type": "mannual_defined_class", "byte_location": 2042, "location": 567}, {"symbol": "BatchableTypeSpecTest", "type": "mannual_defined_class", "byte_location": 33267, "location": 12079}, {"symbol": "TwoTensorsSpecTwin", "type": "mannual_defined_class", "byte_location": 3119, "location": 985}, {"symbol": "TwoTensorsSpecVariableSerialize", "type": "mannual_defined_class", "byte_location": 3237, "location": 1028}, {"symbol": "TwoTensors", "type": "mannual_defined_class", "byte_location": 1610, "location": 414}, {"symbol": "NewTypeSpec", "type": "mannual_defined_class", "byte_location": 32056, "location": 11684}, {"symbol": "NestOfTensorsSpec", "type": "mannual_defined_class", "byte_location": 5510, "location": 1855}, {"symbol": "TwoCompositesSpec", "type": "mannual_defined_class", "byte_location": 4138, "location": 1365}]}} {"input": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"FuncGraph and related functionality.\"\"\"\n\nimport traceback\nfrom typing import Any, Callable, Hashable\nimport weakref\n\nfrom tensorflow.core.function import trace_type\nfrom tensorflow.core.function.capture import capture_container\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager.polymorphic_function import composite_tensor_utils\nfrom tensorflow.python.framework import auto_control_deps\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor as tensor_lib\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.saved_model import save_context\nfrom tensorflow.python.types import core\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util import variable_utils\nfrom tensorflow.python.util.tf_export import tf_export\n\n\nALLOWLIST_COLLECTIONS = [\n ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.LOCAL_VARIABLES,\n ops.GraphKeys.TRAINABLE_VARIABLES,\n variable_scope._VARSTORE_KEY, # pylint: disable=protected-access\n variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access\n]\n\n\nclass UnknownArgument(object):\n \"\"\"Signifies an argument which is not currently handled.\"\"\"\n\n\ndef convert_structure_to_signature(structure, arg_names=None,\n signature_context=None):\n \"\"\"Convert a potentially nested structure to a signature.\n\n Args:\n structure: Structure to convert, where top level collection is a list or a\n tuple.\n arg_names: Optional list of arguments that has equal number of elements as\n `structure` and is used for naming corresponding TensorSpecs.\n signature_context: TraceType InternalTracingContext to generate alias_ids\n for mutable objects, like ResourceVariables.\n\n Returns:\n Identical structure that has TensorSpec objects instead of Tensors and\n UnknownArgument instead of any unsupported types.\n \"\"\"\n\n def encode_arg(arg, path):\n \"\"\"A representation for this argument, for converting into signatures.\"\"\"\n if isinstance(arg, tensor_lib.Tensor):\n user_specified_name = None\n try:\n user_specified_name = compat.as_str(\n arg.op.get_attr(\"_user_specified_name\"))\n except (ValueError, AttributeError):\n pass\n\n if path and user_specified_name and user_specified_name != path[0]:\n # The user has explicitly named the argument differently than the name\n # of the function argument.\n name = user_specified_name\n else:\n name = tensor_lib.sanitize_spec_name(\"_\".join(str(p) for p in path))\n return tensor_lib.TensorSpec(arg.shape, arg.dtype, name)\n if isinstance(arg, resource_variable_ops.ResourceVariable):\n return trace_type.from_value(arg, signature_context)\n if isinstance(arg, composite_tensor.CompositeTensor):\n # TODO(b/133606651) Do we need to inject arg_name?\n return arg._type_spec # pylint: disable=protected-access\n if isinstance(arg, (\n int,\n float,\n bool,\n str,\n type(None),\n dtypes.DType,\n tensor_lib.TensorSpec,\n type_spec.TypeSpec,\n )):\n return arg\n return UnknownArgument()\n\n # We are using the flattened paths to name the TensorSpecs. We need an\n # explicit name for them downstream.\n flattened = nest.flatten_with_tuple_paths(structure)\n if arg_names:\n if len(arg_names) != len(structure):\n raise ValueError(\n \"Passed in arg_names don't match actual signature (%s).\" % arg_names)\n # Replace all top-level names with their actual arg_names. If a path before\n # was \"(2,'a',1)\", it will become \"(arg_names[2],'a',1)\".\n flattened = [\n ((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened\n ]\n\n mapped = [encode_arg(arg, path) for path, arg in flattened]\n return nest.pack_sequence_as(structure, mapped)\n\n\n@tf_export(\"__internal__.FuncGraph\", v1=[])\nclass FuncGraph(ops.Graph):\n \"\"\"Graph representing a function body.\n\n Attributes:\n name: The name of the function.\n inputs: Placeholder tensors representing the inputs to this function. The\n tensors are in this FuncGraph. This represents \"regular\" inputs as well as\n captured inputs (i.e. the values of self.captures), with the regular\n inputs coming first.\n outputs: Tensors that will be returned by this function. The tensors are in\n this FuncGraph.\n control_outputs: Operations that must be executed before the function\n represented by this graph can be said to have been executed.\n structured_input_signature: A tuple of (args, kwargs), which are both\n possibly-nested python objects that were received by this function. Note\n that these structures might contain Python `None`s.\n structured_outputs: A possibly-nested python object which will be returned\n by this function. The Tensors in this structure are the same as those of\n self.outputs. Note that this structure might contain Python `None`s.\n variables: Variables that should be watched during function execution.\n outer_graph: The graph this function is defined in. May be another FuncGraph\n or the global default Graph.\n captures: Maps external tensor -> internal tensor (i.e. input placeholder).\n The entries are in the order they were captured.\n seed: The graph-level random seed.\n capture_by_value: If True, the func graph will capture Variables by value\n instead of reference.\n \"\"\"\n\n def __init__(self,\n name,\n collections=None,\n capture_by_value=None,\n structured_input_signature=None,\n structured_outputs=None):\n \"\"\"Construct a new FuncGraph.\n\n The graph will inherit its graph key, collections, seed, and distribution\n strategy stack from the current context or graph.\n\n Args:\n name: the name of the function.\n collections: a dictionary of collections this FuncGraph should start with.\n If not specified (None), the FuncGraph will read (but not write to) the\n outer graph's collections that are not allowlisted, and both read and\n write to the outer graph's collections that are allowlisted. The current\n allowlisted collections are the global variables, the local variables,\n and the trainable variables. Defaults to None.\n capture_by_value: An optional boolean. If True, the func graph will\n capture Variables by value instead of reference. By default inherit from\n outer graphs, and failing that will default to False.\n structured_input_signature: Optional. The structured input signature to\n use for initializing the FuncGraph. See the docstring for FuncGraph for\n more information.\n structured_outputs: Optional. The structured outputs to use for\n initializing the FuncGraph. See the docstring for FuncGraph for more\n information.\n \"\"\"\n super().__init__()\n self.name = name\n # TODO(panzf): Separate captures from non-captures inputs in self.inputs\n self.inputs = []\n self.outputs = []\n self.control_outputs = []\n self.structured_input_signature = structured_input_signature\n self.structured_outputs = structured_outputs\n self._resource_tensor_inputs = object_identity.ObjectIdentitySet()\n self._weak_variables = []\n self._watched_variables = object_identity.ObjectIdentityWeakSet()\n self.is_control_flow_graph = False\n\n self._function_captures = capture_container.FunctionCaptures()\n outer_graph = ops.get_default_graph()\n self._weak_outer_graph = weakref.ref(outer_graph)\n while outer_graph.building_function:\n outer_graph = outer_graph.outer_graph\n # If self._weak_outer_graph is deleted, we revert to the outermost Graph\n # active when the FuncGraph was traced. This will not be a FuncGraph.\n self._fallback_outer_graph = outer_graph\n # If not None, records the names of output args of this function. Used to\n # preserve the output names in the signature of a serialized+deserialized\n # function. Private at the moment mostly because it's often out of date.\n self._output_names = None\n # Inherit capture-by-value from outer graph.\n if capture_by_value is not None:\n self.capture_by_value = capture_by_value\n elif self.outer_graph is not None and isinstance(self.outer_graph,\n FuncGraph):\n self.capture_by_value = self.outer_graph.capture_by_value\n else:\n self.capture_by_value = False\n\n self._building_function = True\n\n graph = self.outer_graph\n\n if context.executing_eagerly():\n self.seed = context.global_seed()\n # [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of\n # any None op_seed for random_op in the function, in which case we end up\n # using function seed, which could be unintended behavior for the op.\n self._seed_used = False\n else:\n self.seed = graph.seed\n self._seed_used = False\n # TODO(allenl): Figure out if we can remove colocation stack\n # specialization (currently used in cond_v2), here and in the cache key.\n self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access\n\n if collections is None:\n for collection_name in graph.get_all_collection_keys():\n if collection_name not in ALLOWLIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection(\n collection_name)\n for collection_name in ALLOWLIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection_ref(\n collection_name)\n else:\n self._collections = collections\n\n # Keep track of whether this FuncGraph is exportable to SavedModel. Use\n # `graph.mark_as_unsaveable(reason)` to mark this FuncGraph and any\n # dependent functions as unsaveable.\n self._saveable = True\n self._saving_errors = set()\n\n # Keep track of callbacks to run when this graph exits default scope\n self._scope_exit_callbacks = None\n\n def __str__(self):\n return \"FuncGraph(name=%s, id=%s)\" % (self.name, id(self))\n\n def watch_variable(self, v):\n \"\"\"Marks the variable v as accessed while building this graph.\"\"\"\n # Don't watch `v` if it is one of ResourceVariable input arguments.\n if (isinstance(v, resource_variable_ops.ResourceVariable) and\n v.handle in self._resource_tensor_inputs):\n return\n\n while self is not None and isinstance(self, FuncGraph):\n self._watched_variables.add(v)\n self = self.outer_graph\n\n def capture_call_time_value(self,\n closure,\n spec,\n key=None,\n default_value=None,\n placeholder=None):\n \"\"\"Returns a placeholder which at call time has the value closure().\n\n The `tf.function` supports the notion of captures, that is, it allows Python\n functions to have closure variables, which bind over some value outside the\n function. However, this name binding is \"early binding\" performed before the\n program is run, i.e.,\n ```\n @tf.function\n def f():\n return x\n\n x = tf.constant(1)\n f() # returns 1\n\n x = tf.constant(2)\n f() # still returns 1!\n ```\n while in Python, name binding is performed as the program is running.\n ```\n def f():\n return x\n\n x = 1\n f() # returns 1\n\n x = 2\n f() # returns 2\n ```\n `capture_call_time_value` allows tf.function to mimic late binding as a\n Python function does, by passing in a `closure` callable argument to be\n executed when the tf.function is invoked eagerly. E.g.\n ```\n @tf.function\n def f():\n return ops.get_default_graph.capture_call_time_value(lambda: x)\n\n x = tf.constant(1)\n f() # returns 1\n\n x = tf.constant(2)\n f() # returns 2\n ```\n Note that a `capture_call_time_value` function itself does not work well in\n the saving process (since the tf.function in which it's called is not\n invoked eagerly) unless passed a `default_value` argument. At saving time,\n the `default_value` argument is returned instead.\n\n Args:\n closure: function which takes no arguments, to be evaluated at function\n call time, returning a nest of tensors compatible with `spec`.\n spec: nest of TypeSpec for the value to capture.\n key: optional. If not None, multiple calls to lazy_capture with the same\n key in the same graph will return the same placeholder, and the first\n closure will be used at function call time.\n default_value: optional value to return in environments that cannot safely\n evaluate closure.\n placeholder: optional. If not None, the graph will take the passed-in\n `placeholder` as the internal capture instead of creating a new one.\n This is useful when loading from a SavedModel.\n\n Returns:\n Nest of placeholders which, at function call time, will be fed with the\n result of calling closure().\n\n Raises:\n ValueError: at function call time, if the return value of closure() is\n not compatible with `spec`.\n \"\"\"\n if key is None:\n key = object()\n if key not in self._function_captures.by_ref_internal:\n trace_ctx = trace_type.InternalTracingContext(True)\n spec = trace_type.from_value(spec, trace_ctx)\n\n if placeholder is None:\n placeholder_ctx = trace_type.InternalPlaceholderContext(self)\n placeholder = spec.placeholder_value(placeholder_ctx)\n\n def wrapped_closure():\n\n # One major case requiring returning a `default_value` is when passing a\n # concrete function to `save`, i.e.\n # serving_fn = serve_fn.get_concrete_function(...)\n # model.save(save_dir, signatures={\"serving_default\": serving_fn})\n # `serving_fn` has deferred captures added through\n # `capture_call_time_value`. It can't be saved correctly since\n # `wrapped_closure` will end up executing under a default Graph instead\n # of FuncGraph. The user of `capture_call_time_value` also cannot\n # conditionally avoid this call since presence of `save_context` when\n # executing `wrapped_closure` is not known at tracing time of\n # `serving_fn`.\n if save_context.in_save_context() and default_value is not None:\n return default_value\n # TODO(wxinyi): raise an error if in save context but no default value.\n\n if not context.executing_eagerly():\n graph = ops.get_default_graph()\n assert isinstance(\n graph,\n FuncGraph), \"This API should only be used in TF2 enviroment.\"\n\n with graph.as_default():\n ret_nest = graph.capture_call_time_value(\n closure, spec, key=key, default_value=default_value)\n else:\n ret_nest = closure()\n\n ret_nest = spec.cast(ret_nest, trace_type.InternalCastContext)\n return spec.to_tensors(ret_nest)\n\n wrapped_closure.output_spec = spec\n self._function_captures.add_or_replace(\n key=key,\n external=wrapped_closure,\n internal=placeholder,\n tracetype=spec,\n is_by_ref=True)\n return self._function_captures.by_ref_internal[key]\n\n def control_dependencies(self, control_inputs):\n \"\"\"Handles control dependencies.\n\n FuncGraph wraps Graph's control_dependencies logic by first filtering out\n any external tensors / operations and storing them in the graph's\n control_captures member. Any consumers of this function graph must then\n decide how to handle the control captures.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which must be\n executed or computed before running the operations defined in the\n context. Can also be `None` to clear the control dependencies.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n\n Raises:\n TypeError: If `control_inputs` is not a list of `Operation` or\n `Tensor` objects.\n \"\"\"\n if control_inputs is None:\n return super().control_dependencies(control_inputs)\n\n filtered_control_inputs = []\n for c in control_inputs:\n # Check for _UnreadVariable\n if (isinstance(c, indexed_slices.IndexedSlices) or\n (hasattr(c, \"_handle\") and hasattr(c, \"op\"))):\n c = c.op\n graph_element = ops._as_graph_element(c) # pylint: disable=protected-access\n if graph_element is None:\n graph_element = c\n if graph_element is not None and getattr(graph_element, \"graph\",\n None) is not self:\n self._function_captures.control.add(graph_element)\n else:\n filtered_control_inputs.append(graph_element)\n return super().control_dependencies(filtered_control_inputs)\n\n def as_default(self):\n outer_cm = super().as_default()\n\n @tf_contextlib.contextmanager\n def inner_cm():\n \"\"\"Context manager for copying distribute.Strategy scope information.\"\"\"\n # pylint: disable=protected-access\n # TODO(b/112906995, nareshmodi): distribution strategy depends on\n # inheriting this stack from the default graph even in eager mode. Maybe\n # it should be part of the eager context? This would also allow us to\n # remove a get_default_graph() call from the function cache lookup.\n graph = ops.get_default_graph()\n old_strategy_stack = self._distribution_strategy_stack\n self._distribution_strategy_stack = list(\n graph._distribution_strategy_stack)\n\n # We ignore device placements from any outer scopes while tracing the\n # function when possible, to avoid hard-coding them in the function\n # graph. \"Default\" placements come from the PartitionedCallOp's placement,\n # so that the same trace of the Python function may be placed on several\n # different devices and saved functions may be placed on new devices when\n # restored.\n # However, we need to preserve the outer device stack in the following\n # cases in non eager context:\n # 1. device stack is callable\n # 2. When using distribution strategy with legacy graph mode.\n old_device_stack = self._device_function_stack\n if (not context.executing_eagerly() and\n (device_stack_has_callable(graph._device_function_stack) or\n (self._distribution_strategy_stack and\n not ops.executing_eagerly_outside_functions()))):\n # Hard-code devices from device functions in the function body\n self._device_function_stack = graph._device_function_stack.copy()\n\n old_creator_stack = self._variable_creator_stack\n self._variable_creator_stack = graph._variable_creator_stack\n # Inherit the graph key, since this is used for matching variables in\n # optimizers.\n old_graph_key = self._graph_key\n self._graph_key = graph._graph_key\n # pylint: enable=protected-access\n\n old_scope_exit_callbacks = self._scope_exit_callbacks\n self._scope_exit_callbacks = []\n\n with outer_cm as g:\n try:\n yield g\n finally:\n try:\n for fn in self._scope_exit_callbacks:\n fn()\n finally:\n self._scope_exit_callbacks = old_scope_exit_callbacks\n self._distribution_strategy_stack = old_strategy_stack\n self._device_function_stack = old_device_stack\n self._variable_creator_stack = old_creator_stack\n self._graph_key = old_graph_key\n\n return inner_cm()\n\n @property\n def outer_graph(self):\n \"\"\"The Graph this FuncGraph is nested in.\n\n Functions may capture Tensors from graphs they are nested in (transitive).\n\n Returns:\n A Graph object. Initially set to the current default graph when the\n FuncGraph was created. If the previous `outer_graph` was deleted because\n the function that owns it was deleted, `outer_graph` is reset to the\n outermost default graph active when the FuncGraph was created. This\n FuncGraph won't have captured anything from the new `outer_graph` (and\n likely not from the previous setting, since that would have created a\n strong reference), but it is returned so that FuncGraphs always have a\n parent.\n \"\"\"\n current = self._weak_outer_graph()\n if current is None:\n return self._fallback_outer_graph\n return current\n\n @outer_graph.setter\n def outer_graph(self, new_outer_graph):\n \"\"\"Sets `outer_graph` to `new_outer_graph`.\"\"\"\n self._weak_outer_graph = weakref.ref(new_outer_graph)\n\n @property\n def output_types(self):\n return [t.dtype for t in self.outputs]\n\n @property\n def output_shapes(self):\n return [t.shape for t in self.outputs]\n\n @property\n def trainable_variables(self):\n \"\"\"A sequence of trainable variables accessed by this FuncGraph.\n\n Note that functions keep only weak references to variables. Calling the\n function after a variable it accesses has been deleted is an error.\n\n Returns:\n Sequence of trainable variables for this func graph.\n \"\"\"\n return tuple(v for v in self.variables if v.trainable)\n\n @property\n def variables(self):\n \"\"\"A sequence of variables accessed by this FuncGraph.\n\n Note that functions keep only weak references to variables. Calling the\n function after a variable it accesses has been deleted is an error.\n\n Returns:\n Sequence of variables for this func graph.\n \"\"\"\n\n def deref(weak_v):\n v = weak_v()\n if v is None:\n raise AssertionError(\n \"Called a function referencing variables which have been deleted. \"\n \"This likely means that function-local variables were created and \"\n \"not referenced elsewhere in the program. This is generally a \"\n \"mistake; consider storing variables in an object attribute on \"\n \"first call.\")\n return v\n\n return tuple(deref(v) for v in self._weak_variables)\n\n @variables.setter\n def variables(self, var_list):\n self._weak_variables = [weakref.ref(v) for v in var_list]\n\n def _capture_by_value(\n self,\n op_type,\n inputs,\n dtypes, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n # When capturing by value, do the read outside\n reverse_captures = dict((id(v), k) for k, v in self.captures)\n uncaptured_inputs = [reverse_captures.get(id(t), t) for t in inputs]\n with ops.init_scope():\n if context.executing_eagerly():\n attr_list = (\"dtype\", int(attrs[\"dtype\"].type))\n value, = execute.execute(\n compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,\n context.context())\n else:\n op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access\n op_type, uncaptured_inputs, dtypes, input_types, name, attrs,\n op_def, compute_device)\n value = op.outputs[0]\n captured_value = self.capture(value)\n return captured_value.op\n\n def _create_op_internal(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n \"\"\"Like Graph.create_op, except handles external input tensors.\n\n This overload adds functionality to create_op to \"capture\" any external\n input tensors, i.e. tensors from the eager context or outer function graphs\n if this is a nested function. See `capture` for more information.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of the\n tensors that the operation consumes. By default, uses the base `DType`\n of each input in `inputs`. Operations that expect reference-typed inputs\n must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_device: (Optional.) If True, device functions will be executed to\n compute the device property of the Operation.\n\n Returns:\n An `Operation` object.\n \"\"\"\n if self.capture_by_value and op_type in [\n \"ReadVariableOp\", \"ResourceGather\"\n ]:\n return self._capture_by_value(op_type, inputs, dtypes, input_types, name,\n attrs, op_def, compute_device)\n\n # This capturing logic interacts poorly with control flow contexts which\n # want to replace inputs of ops far too late in the process. This can lead\n # the context to get confused and try to create an Enter for an Enter. We\n # can detect this here and skip the additional Enter which can confuse loop\n # validation logic.\n if op_type == \"Enter\" and inputs[0].op.type == \"Enter\":\n if inputs[0].op.get_attr(\"frame_name\") == attrs[\"frame_name\"].s:\n return inputs[0].op\n # Calling AddValue on the control flow contexts to force creation of the\n # backward accumulators in the original graph before we create placeholders\n # to capture the inputs.\n ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access\n # Use a different list to avoid modifying the original inputs list.\n captured_inputs = []\n for inp in inputs:\n # TPU Estimator defines a control flow context with no AddValue method.\n if ctxt is not None and hasattr(ctxt, \"AddValue\"):\n inp = ctxt.AddValue(inp)\n inp = self.capture(inp)\n captured_inputs.append(inp)\n return super()._create_op_internal( # pylint: disable=protected-access\n op_type, captured_inputs, dtypes, input_types, name, attrs, op_def,\n compute_device)\n\n def capture(self, tensor, name=None, shape=None):\n return self._function_captures.capture_by_value(self, tensor, name)\n\n def _validate_in_scope(self, tensor):\n inner_graph = tensor.graph\n while inner_graph is not None and isinstance(inner_graph, FuncGraph):\n if inner_graph is self:\n try:\n tb = tensor.op.traceback\n except AttributeError:\n tensor_traceback = \"\"\n else:\n tensor_traceback_list = []\n for frame in traceback.format_list(tb.get_user_frames()):\n tensor_traceback_list.extend(\n [f\" {line}\" for line in frame.split(\"\\n\") if line.strip()])\n tensor_traceback = \"\\n\".join(tensor_traceback_list)\n # Keep in sync with tfe_wrapper.cc.\n # TODO(b/200991648): Unify those two paths.\n raise errors.InaccessibleTensorError(\n f\"{tensor!r} is out of scope and cannot be used here. Use return \"\n \"values, explicit Python locals or TensorFlow collections to \"\n \"access it.\\n\"\n \"Please see https://www.tensorflow.org/guide/function#all_outputs_of_a_tffunction_must_be_return_values \" # pylint: disable=line-too-long\n \"for more information.\\n\\n\"\n f\"{tensor!r} was defined here:\\n{tensor_traceback}\\n\\n\"\n f\"The tensor {tensor!r} cannot be accessed from {self}, because \"\n f\"it was defined in {tensor.graph}, which is out of scope.\")\n inner_graph = inner_graph.outer_graph\n\n # TODO(panzf): Rename this method along with usages in cond/while graph.\n def _capture_helper(self, tensor, name):\n return self._function_captures._create_placeholder_helper( # pylint: disable=protected-access\n self, tensor, name)\n\n def _experimental_capture_side_input_by_ref(self, identifier: Hashable,\n func: Callable[[], Any]) ->...:\n \"\"\"Implement capturing side input by reference for tf.function.\n\n Note that this API will only register the capture in the func_graph where\n it is called. In the case of nested graph, like nested tf.function or\n tf.while, the outer graph is not aware of this capture in the inner graph.\n Thus, the outer tf.function will not retrace when the by-ref capture\n changes. It's the user's responsibility to call this API in the outer\n func_graph as well if proper retracing is needed.\n\n For example:\n\n ```\n x = 1\n\n # Correct usage\n @tf.function\n def f_1():\n graph = tf.compat.v1.get_default_graph()\n # Capture the same x for the outer tf.function\n graph._experimental_capture_side_input_by_ref(\"x\", lambda: x)\n\n @tf.function\n def g():\n graph = tf.compat.v1.get_default_graph()\n cap_x = graph._experimental_capture_side_input_by_ref(\"x\", lambda: x)\n return cap_x + 1\n\n return g()\n\n # Incorrect usage\n @tf.function\n def f_2():\n\n @tf.function\n def g():\n graph = tf.compat.v1.get_default_graph()\n cap_x = graph._experimental_capture_side_input_by_ref(\"x\", lambda: x)\n return cap_x + 1\n\n return g()\n\n assert f_1() == 2\n assert f_2() == 2\n x = 2\n assert f_1() == 3\n assert f_2() == 2 # This is incorrect\n ```\n\n Args:\n identifier: A hashable object as the key for the capture.\n func: A Python function that takes no arguments and returns the value of\n side input. The function is evaluated at function call time.\n\n Returns:\n A nested structure with the same structure as the side input. Tensors\n are replaced with placehoders, and non-tensors remain the same.\n\n \"\"\"\n if context.executing_eagerly():\n return func()\n\n def maybe_convert_to_tensor():\n value = func()\n if not (isinstance(value, core.Value) or isinstance(value, core.Symbol)):\n value = constant_op.constant(value)\n return value\n\n placeholder = self._function_captures._capture_by_ref( # pylint: disable=protected-access\n self, maybe_convert_to_tensor, identifier)\n return placeholder\n\n @property\n def captures(self):\n \"\"\"Order list of tuples containing external and internal captures.\"\"\"\n return self._function_captures.by_val_capture_tuples\n\n def add_capture(self, tensor, placeholder):\n \"\"\"Capture a specific tensor and utilize the provided placeholder.\n\n Args:\n tensor: Tensor to captures.\n placeholder: Provided placeholder for the tensor.\n \"\"\"\n self._function_captures.add_or_replace(\n key=id(tensor),\n external=tensor,\n internal=placeholder,\n is_by_ref=False)\n self.inputs.append(placeholder)\n\n def replace_capture(self, tensor, placeholder):\n \"\"\"Replace already existing capture.\"\"\"\n self._function_captures.add_or_replace(\n key=id(tensor),\n external=tensor,\n internal=placeholder,\n is_by_ref=False)\n\n def replace_capture_with_deferred_capture(self,\n tensor,\n closure,\n spec,\n placeholder,\n default_value=None):\n \"\"\"Replaces existing capture `tensor` with a deferred capture `closure`.\n\n Caution: It is the caller's responsibility to make sure that, after calling\n this function, the TypeSpec of the `inputs` (i.e. internal placeholders) and\n the `_captured_inputs` (i.e. external captures) of a concrete function that\n wraps this function graph are still compatible. Thus user should pairing\n usage of this function with `ConcreteFunction.set_external_captures` to make\n sure the order still matches. For example,\n ```\n # concrete_fn._captured_inputs == [tensor1, tensor2, tensor3]\n # concrete_fn.inputs == [placeholder1, placeholder2, placeholder3]\n # replace external capture `tensor2` with a deferred_capture, i.e., a\n # closure, `closure2`\n concrete_fn.graph.replace_capture_with_deferred_capture(tensor2,\n closure2,\n placeholder2,\n some_spec,\n some_default)\n concrete_fn.set_external_captures([tensor1, closure2, tensor3])\n ```\n\n Args:\n tensor: Tensor already captured.\n closure: function which takes no arguments, to be evaluated at function\n call time, returning a nest of tensors compatible with `spec`.\n spec: nest of TypeSpec for the value to capture.\n placeholder: the internal placeholder corresponding to the captured\n `tensor`.\n default_value: optional value to use in environments that cannot safely\n evaluate closure.\n \"\"\"\n self._function_captures.pop(id(tensor), is_by_ref=False)\n self.capture_call_time_value(\n closure,\n spec,\n key=id(tensor),\n default_value=default_value,\n placeholder=placeholder)\n\n @property\n def external_captures(self):\n \"\"\"External tensors captured by this function.\"\"\"\n return list(self._function_captures.by_val_external.values())\n\n @property\n def internal_captures(self):\n \"\"\"Placeholders in this function corresponding captured tensors.\"\"\"\n return list(self._function_captures.by_val_internal.values())\n\n @property\n def deferred_external_captures(self):\n \"\"\"Ordered nest of tensors whose placeholders will be fed at call time.\"\"\"\n return list(self._function_captures.by_ref_external.values())\n\n @property\n def deferred_internal_captures(self):\n \"\"\"List of nest of placeholders which at call time will be fed.\"\"\"\n return list(self._function_captures.by_ref_internal.values())\n\n @property\n def variable_captures(self):\n \"\"\"Map of python object ids of variables to variables which are captured.\"\"\"\n return self.variables\n\n @property\n def function_captures(self):\n return self._function_captures\n\n def mark_as_unsaveable(self, error_message):\n \"\"\"Marks this FuncGraph as unsaveable.\n\n Any attempts to export this FuncGraph will raise an error with the specified\n message.\n\n Args:\n error_message: List or string containing the error message to be raised\n when saving this FuncGraph to SavedModel.\n \"\"\"\n self._saveable = False\n if isinstance(error_message, str):\n error_message = [error_message]\n self._saving_errors.update(error_message)\n\n @property\n def saveable(self):\n \"\"\"Returns whether this FuncGraph is saveable.\"\"\"\n return self._saveable\n\n @property\n def saving_errors(self):\n \"\"\"Returns set of errors preventing this FuncGraph from being saved.\"\"\"\n return self._saving_errors\n\n def _add_scope_exit_callback(self, fn):\n \"\"\"Add a function to call when this graph exits the default scope.\"\"\"\n if not callable(fn):\n raise TypeError(\"fn is not callable: {}\".format(fn))\n if self._scope_exit_callbacks is None:\n raise RuntimeError(\n \"Attempting to add a scope exit callback, but the default graph is \"\n \"not the context scope graph. Did you forget to call \"\n \"'with graph.as_default(): ...'?\")\n self._scope_exit_callbacks.append(fn)\n\n\ndef func_graph_from_py_func(name,\n python_func,\n args,\n kwargs,\n signature=None,\n func_graph=None,\n add_control_dependencies=True,\n arg_names=None,\n op_return_value=None,\n collections=None,\n capture_by_value=None,\n create_placeholders=True):\n \"\"\"Returns a `FuncGraph` generated from `python_func`.\n\n Args:\n name: an identifier for the function.\n python_func: the Python function to trace.\n args: the positional args with which the Python function should be called;\n ignored if a signature is provided.\n kwargs: the keyword args with which the Python function should be called;\n ignored if a signature is provided.\n signature: a possibly nested sequence of `TensorSpecs` specifying the shapes\n and dtypes of the arguments. When a signature is provided, `args` and\n `kwargs` are ignored, and `python_func` is traced with Tensors conforming\n to `signature`. If `None`, the shapes and dtypes are inferred from the\n inputs.\n func_graph: Optional. An instance of FuncGraph. If provided, we will use\n this graph else a new one is built and returned.\n add_control_dependencies: If True, automatically adds control dependencies\n to ensure program order matches execution order and stateful ops always\n execute.\n arg_names: Optional list of argument names, used to give input placeholders\n recognizable names.\n op_return_value: Optional. A Tensor. If set and `python_func` returns\n Operations, those return values will be replaced with this value. If not\n set, returning an Operation triggers an error.\n collections: a dictionary of collections this FuncGraph should start with.\n If not specified (None), the FuncGraph will read (but not write to) the\n outer graph's collections that are not allowlisted, and both read and\n write to the outer graph's collections that are allowlisted. The current\n allowlisted collections are the global variables, the local variables, and\n the trainable variables. Defaults to None.\n capture_by_value: An optional boolean. If True, the func graph will capture\n Variables by value instead of reference. By default inherit from outer\n graphs, and failing that will default to False.\n create_placeholders: An optional boolean. If True, then func graph will\n create placeholders for the inputs as graph ops. If False, the input args\n and kwargs will be treated as the input placeholders.\n\n Returns:\n A FuncGraph.\n\n Raises:\n TypeError: If any of `python_func`'s return values is neither `None`, a\n `Tensor` or a `tf.experimental.ExtensionType`.\n \"\"\"\n if op_return_value is not None:\n assert isinstance(op_return_value, tensor_lib.Tensor), op_return_value\n if func_graph is None:\n func_graph = FuncGraph(\n name, collections=collections, capture_by_value=capture_by_value)\n assert isinstance(func_graph, FuncGraph)\n if add_control_dependencies:\n deps_control_manager = auto_control_deps.AutomaticControlDependencies()\n else:\n deps_control_manager = ops.NullContextmanager()\n\n with func_graph.as_default(), deps_control_manager as deps_ctx:\n current_scope = variable_scope.get_variable_scope()\n default_use_resource = current_scope.use_resource\n current_scope.set_use_resource(True)\n\n if signature is not None:\n args = signature\n kwargs = {}\n\n if create_placeholders:\n func_args, func_kwargs = _create_placeholders(args, kwargs, arg_names)\n else:\n func_args, func_kwargs = args, kwargs\n\n input_trace_types = trace_type.from_value([func_args, func_kwargs])\n func_graph.inputs = input_trace_types.to_tensors([func_args, func_kwargs]) # pylint: disable=protected-access\n\n # Reset variables watched while deconstructing inputs.\n func_graph._watched_variables = object_identity.ObjectIdentityWeakSet() # pylint: disable=protected-access\n\n for arg in func_graph.inputs:\n if arg.dtype == dtypes.resource:\n func_graph._resource_tensor_inputs.add(arg) # pylint:disable=protected-access\n\n signature_context = trace_type.InternalTracingContext()\n # Convert all Tensors into TensorSpecs before saving the structured inputs.\n # If storing pure concrete functions that are not called through polymorphic\n # functions, we don't have access to FunctionSpec, so we need to call the\n # TensorSpecs by their `arg_names` for later binding.\n func_graph.structured_input_signature = (\n convert_structure_to_signature(\n func_args, arg_names, signature_context=signature_context),\n convert_structure_to_signature(\n func_kwargs, signature_context=signature_context))\n\n # Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.\n # Variables to help check whether mutation happens in calling the function\n # Copy the recursive list, tuple and map structure, but not base objects\n func_args_before = nest.pack_sequence_as(\n func_args,\n nest.flatten(func_args, expand_composites=True),\n expand_composites=True)\n func_kwargs_before = nest.pack_sequence_as(\n func_kwargs,\n nest.flatten(func_kwargs, expand_composites=True),\n expand_composites=True)\n\n def convert(x):\n \"\"\"Converts a function output to a Tensor.\"\"\"\n if x is None:\n return None\n if op_return_value is not None and isinstance(x, ops.Operation):\n # TODO(b/79881896): we currently can't capture external control deps, so\n # this won't work if x needs to be captured (i.e. if python_func returns\n # captured Operations).\n with ops.control_dependencies([x]):\n x = array_ops.identity(op_return_value)\n elif not isinstance(x, tensor_array_ops.TensorArray):\n try:\n x = ops.convert_to_tensor_or_composite(x)\n except (ValueError, TypeError):\n raise TypeError(\n \"To be compatible with tf.function, Python functions \"\n \"must return zero or more Tensors or ExtensionTypes or None \"\n f\"values; in compilation of {str(python_func)}, found return \"\n f\"value of type {type(x).__name__}, which is not a Tensor or \"\n \"ExtensionType.\")\n if add_control_dependencies:\n x = deps_ctx.mark_as_return(x)\n return x\n\n _, original_func = tf_decorator.unwrap(python_func)\n func_outputs = python_func(*func_args, **func_kwargs)\n\n # invariant: `func_outputs` contains only Tensors, CompositeTensors,\n # TensorArrays and `None`s.\n func_outputs = variable_utils.convert_variables_to_tensors(func_outputs)\n func_outputs = nest.map_structure(\n convert, func_outputs, expand_composites=True)\n\n # flatten and unflatten func_args and func_kwargs to maintain parity\n # from flattening which sorts by key\n func_args = nest.pack_sequence_as(\n func_args,\n nest.flatten(func_args, expand_composites=True),\n expand_composites=True)\n func_kwargs = nest.pack_sequence_as(\n func_kwargs,\n nest.flatten(func_kwargs, expand_composites=True),\n expand_composites=True)\n check_func_mutation(func_args_before, func_kwargs_before, func_args,\n func_kwargs, original_func)\n current_scope.set_use_resource(default_use_resource)\n\n inputs = []\n for arg in composite_tensor_utils.flatten_with_variables([func_args,\n func_kwargs]):\n if isinstance(arg, resource_variable_ops.BaseResourceVariable):\n # Even if an argument variable was not used in the function, we've\n # already manually captured the resource Tensor when creating argument\n # placeholders.\n capture = func_graph._function_captures.pop(id(arg.handle), False) # pylint: disable=protected-access\n assert len(capture) >= 2\n resource_placeholder = capture[1]\n if resource_placeholder is None:\n continue\n inputs.append(resource_placeholder)\n elif isinstance(arg, tensor_lib.Tensor):\n inputs.append(arg)\n func_graph.inputs = (\n inputs + func_graph.internal_captures + nest.flatten(\n func_graph.deferred_internal_captures, expand_composites=True))\n func_graph.structured_outputs = func_outputs\n # Returning a closed-over tensor does not trigger convert_to_tensor.\n func_graph.outputs.extend(\n func_graph.capture(x)\n for x in flatten(func_graph.structured_outputs)\n if x is not None)\n\n func_graph.variables = func_graph._watched_variables # pylint: disable=protected-access\n\n if add_control_dependencies:\n func_graph.control_outputs.extend(deps_control_manager.ops_which_must_run)\n func_graph.collective_manager_ids_used = (\n deps_control_manager.collective_manager_ids_used)\n\n return func_graph\n\n\ndef maybe_captured(tensor):\n \"\"\"If t is a captured value placeholder, returns the original captured value.\n\n Args:\n tensor: Tensor.\n\n Returns:\n A tensor, potentially from a different Graph/FuncGraph.\n \"\"\"\n if (not isinstance(tensor, ops.EagerTensor) and\n tensor.op.graph.building_function and tensor.op.type == \"Placeholder\"):\n for input_t, placeholder_t in tensor.op.graph.captures:\n if tensor == placeholder_t:\n return maybe_captured(input_t)\n # pylint: enable=protected-access\n return tensor\n\n\ndef device_stack_has_callable(device_stack):\n \"\"\"Checks whether a device stack contains a callable.\"\"\"\n return any(\n callable(spec._device_name_or_function) # pylint: disable=protected-access\n for spec in device_stack.peek_objs())\n\n\ndef has_mutation(n1, n2):\n \"\"\"Returns true if n1 and n2 are different (using `is` to compare leaves).\"\"\"\n try:\n nest.assert_same_structure(n1, n2, expand_composites=True)\n except ValueError:\n return True\n\n for arg1, arg2 in zip(\n nest.flatten(n1, expand_composites=True),\n nest.flatten(n2, expand_composites=True)):\n if arg1 is not arg2:\n return True\n\n return False\n\n\ndef check_func_mutation(old_args, old_kwargs, new_args, new_kwargs, func):\n \"\"\"Checks that the arguments to a function are not modified.\"\"\"\n if not has_mutation((old_args, old_kwargs), (new_args, new_kwargs)):\n return\n\n # Mutation detected; construct a useful error message.\n func_name = getattr(func, \"__qualname__\", getattr(func, \"__name__\", func))\n signature = tf_inspect.signature(func)\n try:\n old_bound = signature.bind(*old_args, **old_kwargs).arguments\n new_bound = signature.bind(*new_args, **new_kwargs).arguments\n except TypeError as e:\n # This occurs when the function is called with the (deprecated)\n # \"flat signature\". See ConcreteFunction._call_with_flat_signature. In\n # this case, we can't report which arguments were modified.\n raise ValueError(\n f\"{func_name}{signature} should not modify its Python input \"\n f\"arguments. Check if it modifies any lists or dicts passed as \"\n f\"arguments. Modifying a copy is allowed.\") from e\n\n assert set(old_bound) == set(new_bound)\n modified_args = [\n arg_name for arg_name in new_bound\n if has_mutation(old_bound[arg_name], new_bound[arg_name])\n ]\n changes = \", \".join(modified_args)\n raise ValueError(f\"{func_name}{signature} should not modify its Python \"\n f\"input arguments. Modifying a copy is allowed. The \"\n f\"following parameter(s) were modified: {changes}\")\n\n\n# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.\ndef flatten(sequence):\n \"\"\"Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.\n\n Args:\n sequence: A nested structure of Tensors, CompositeTensors, and TensorArrays.\n\n Returns:\n A list of tensors.\n \"\"\"\n flat_sequence = nest.flatten(sequence, expand_composites=True)\n return [\n item.flow if isinstance(item, tensor_array_ops.TensorArray) else item\n for item in flat_sequence\n ]\n\n\n# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.\ndef pack_sequence_as(structure, flat_sequence):\n \"\"\"Like `nest.pack_sequence_as` but also builds TensorArrays from flows.\n\n Args:\n structure: The structure to pack into. May contain Tensors,\n CompositeTensors, or TensorArrays.\n flat_sequence: An iterable containing tensors.\n\n Returns:\n A nested structure.\n\n Raises:\n AssertionError if `structure` and `flat_sequence` are not compatible.\n \"\"\"\n flat_sequence = list(flat_sequence)\n flattened_structure = nest.flatten(structure, expand_composites=True)\n if len(flattened_structure) != len(flat_sequence):\n raise ValueError(\"Mismatch in element count\")\n for i in range(len(flat_sequence)):\n if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):\n flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(\n old_ta=flattened_structure[i], flow=flat_sequence[i])\n return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)\n\n\ndef _create_placeholders(args, kwargs, arg_names=None):\n \"\"\"Create placeholders given positional args and keyword args.\"\"\"\n signature_context = trace_type.InternalTracingContext(\n is_legacy_signature=True)\n arg_trace_types = trace_type.from_value(tuple(args), signature_context)\n kwarg_trace_types = trace_type.from_value(kwargs, signature_context)\n\n placeholder_mapping = signature_context.get_placeholder_mapping()\n placeholder_context = trace_type.InternalPlaceholderContext(\n ops.get_default_graph(), placeholder_mapping)\n\n if arg_names is None:\n arg_names = [None] * len(arg_trace_types.components)\n\n # Create placeholders for trace type args and trace type kwargs\n func_args = []\n for name, trace_type_arg in zip(arg_names, arg_trace_types.components):\n placeholder_context.update_naming_scope(name)\n placeholder = trace_type_arg.placeholder_value(placeholder_context)\n func_args.append(placeholder)\n\n func_kwargs = {}\n for name, trace_type_kwarg in zip(*sorted(kwarg_trace_types.mapping.items())):\n placeholder_context.update_naming_scope(name)\n placeholder = trace_type_kwarg.placeholder_value(placeholder_context)\n func_kwargs[name] = placeholder\n\n return tuple(func_args), func_kwargs\n\n\ndef dismantle_func_graph(func_graph):\n \"\"\"Removes reference cycles in `func_graph` FuncGraph.\n\n Helpful for making sure the garbage collector doesn't need to run when\n the FuncGraph goes out of scope, e.g. in tests using defun with\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).\n\n Args:\n func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable after\n this function.\n \"\"\"\n func_graph._function_captures.clear() # pylint: disable=protected-access\n ops.dismantle_graph(func_graph)\n\n\ndef override_func_graph_name_scope(func_graph, name_scope):\n func_graph._name_stack = name_scope # pylint: disable=protected-access\n", "output": ["func_graph_from_py_func", "has_mutation", "pack_sequence_as", "_create_placeholders", "override_func_graph_name_scope", "check_func_mutation", "dismantle_func_graph", "maybe_captured", "convert_structure_to_signature", "device_stack_has_callable", "flatten", "FuncGraph", "UnknownArgument"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/framework/func_graph.py", "file_length": 14720, "symbol_dict": [{"symbol": "has_mutation", "type": "mannual_defined_function", "byte_location": 47171, "location": 13058}, {"symbol": "override_func_graph_name_scope", "type": "mannual_defined_function", "byte_location": 52301, "location": 14676}, {"symbol": "func_graph_from_py_func", "type": "mannual_defined_function", "byte_location": 37271, "location": 10293}, {"symbol": "flatten", "type": "mannual_defined_function", "byte_location": 49071, "location": 13654}, {"symbol": "maybe_captured", "type": "mannual_defined_function", "byte_location": 46395, "location": 12818}, {"symbol": "dismantle_func_graph", "type": "mannual_defined_function", "byte_location": 51765, "location": 14490}, {"symbol": "device_stack_has_callable", "type": "mannual_defined_function", "byte_location": 46925, "location": 12980}, {"symbol": "check_func_mutation", "type": "mannual_defined_function", "byte_location": 47568, "location": 13200}, {"symbol": "pack_sequence_as", "type": "mannual_defined_function", "byte_location": 49572, "location": 13814}, {"symbol": "_create_placeholders", "type": "mannual_defined_function", "byte_location": 50524, "location": 14115}, {"symbol": "convert_structure_to_signature", "type": "mannual_defined_function", "byte_location": 2617, "location": 686}, {"symbol": "UnknownArgument", "type": "mannual_defined_class", "byte_location": 2522, "location": 662}, {"symbol": "FuncGraph", "type": "mannual_defined_class", "byte_location": 5315, "location": 1483}]}} {"input": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Operations for generating random numbers.\"\"\"\n\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.distribute import values_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import array_ops_stack\nfrom tensorflow.python.ops import gen_stateful_random_ops\nfrom tensorflow.python.ops import gen_stateless_random_ops_v2\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops_util\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import stateless_random_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.trackable import autotrackable\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# A seed for random ops (stateful and stateless) will always be 1024\n# bits, all of which will be sent to the C++ code. The actual C++\n# implementation of some algorithms may only use a lower part of the bits.\n\nUINT64_HALF_SPAN = 2**63\nMAX_INT64 = UINT64_HALF_SPAN - 1\nMIN_INT64 = -UINT64_HALF_SPAN\nUINT64_SPAN = UINT64_HALF_SPAN * 2\n# 'Variable' doesn't support uint32 or uint64 yet (due to reasons explained in\n# b/111604096 and cl/171681867), so I use signed int here. I choose int64\n# instead of int32 here because `VarHandleOp` doesn't support int32 on GPU.\nSEED_TYPE = \"int64\"\nSEED_MIN = MIN_INT64\nSEED_MAX = MAX_INT64\nSEED_UINT_SPAN = UINT64_SPAN\nSEED_TYPE_BITS = 64\nSEED_BIT_MASK = 0xFFFFFFFFFFFFFFFF\nSEED_SIZE = 16 # in units of SEED_TYPE\n\n\nSTATE_TYPE = SEED_TYPE\nALGORITHM_TYPE = STATE_TYPE\n\n\n# The following sizes are all in unit of uint64.\nPHILOX_KEY_SIZE = 1\nTHREEFRY_KEY_SIZE = 1\nPHILOX_COUNTER_SIZE = 2\nTHREEFRY_COUNTER_SIZE = 1\nPHILOX_STATE_SIZE = PHILOX_COUNTER_SIZE + PHILOX_KEY_SIZE\nTHREEFRY_STATE_SIZE = THREEFRY_COUNTER_SIZE + THREEFRY_KEY_SIZE\n\n\nRNG_ALG_PHILOX = random_ops_util.Algorithm.PHILOX.value\nRNG_ALG_THREEFRY = random_ops_util.Algorithm.THREEFRY.value\n\n\nDEFAULT_ALGORITHM = RNG_ALG_PHILOX\n\n\ndef non_deterministic_ints(shape, dtype=dtypes.int64):\n \"\"\"Non-deterministically generates some integers.\n\n This op may use some OS-provided source of non-determinism (e.g. an RNG), so\n each execution will give different results.\n\n Args:\n shape: the shape of the result.\n dtype: (optional) the dtype of the result.\n\n Returns:\n a tensor whose element values are non-deterministically chosen.\n \"\"\"\n return gen_stateful_random_ops.non_deterministic_ints(\n shape=shape, dtype=dtype)\n\n\ndef _uint_to_int(n):\n if isinstance(n, int) and n > SEED_MAX:\n n = n - SEED_UINT_SPAN\n return n\n\n\ndef _make_1d_state(state_size, seed):\n \"\"\"Makes a 1-D RNG state.\n\n Args:\n state_size: an integer.\n seed: an integer or 1-D tensor.\n\n Returns:\n a 1-D tensor of shape [state_size] and dtype STATE_TYPE.\n \"\"\"\n if isinstance(seed, int):\n # chop the Python integer (infinite precision) into chunks of SEED_TYPE\n ls = []\n for _ in range(state_size):\n ls.append(seed & SEED_BIT_MASK)\n seed >>= SEED_TYPE_BITS\n seed = ls\n # to avoid overflow error from ops.convert_to_tensor\n seed = nest.map_structure(_uint_to_int, seed)\n seed = math_ops.cast(seed, STATE_TYPE)\n seed = array_ops.reshape(seed, [-1])\n seed = seed[0:state_size]\n # Padding with zeros on the *left* if too short. Padding on the right would\n # cause a small seed to be used as the \"counter\" while the \"key\" is always\n # zero (for counter-based RNG algorithms), because in the current memory\n # layout counter is stored before key. In such a situation two RNGs with\n # two different small seeds may generate overlapping outputs.\n seed_size = seed.shape[0]\n if seed_size is None:\n seed_size = array_ops.shape(seed)[0]\n padding_size = math_ops.maximum(state_size - seed_size, 0)\n padding = array_ops.zeros([padding_size], seed.dtype)\n # can't use `pad` because it doesn't support integer dtypes on GPU\n seed = array_ops.concat([padding, seed], axis=0)\n seed.set_shape([state_size])\n return seed\n\n\ndef _get_counter_size(alg):\n if alg == random_ops_util.Algorithm.PHILOX.value:\n return PHILOX_COUNTER_SIZE\n elif alg == random_ops_util.Algorithm.THREEFRY.value:\n return THREEFRY_COUNTER_SIZE\n elif alg == random_ops_util.Algorithm.AUTO_SELECT.value:\n # For AUTO_SELECT, we'll manage the counter as if it's for Philox.\n return PHILOX_COUNTER_SIZE\n else:\n raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))\n\n\ndef _get_state_size(alg):\n if alg == random_ops_util.Algorithm.PHILOX.value:\n return PHILOX_STATE_SIZE\n elif alg == random_ops_util.Algorithm.THREEFRY.value:\n return THREEFRY_STATE_SIZE\n elif alg == random_ops_util.Algorithm.AUTO_SELECT.value:\n # For AUTO_SELECT, we'll manage the state as if it's for Philox.\n return PHILOX_STATE_SIZE\n else:\n raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))\n\n\ndef _check_state_shape(shape, alg):\n if isinstance(alg, tensor.Tensor) and not context.executing_eagerly():\n return\n shape.assert_is_compatible_with([_get_state_size(int(alg))])\n\n\ndef _make_state_from_seed(seed, alg):\n return _make_1d_state(_get_state_size(alg), seed)\n\n\n@tf_export(\"random.create_rng_state\", \"random.experimental.create_rng_state\")\ndef create_rng_state(seed, alg):\n \"\"\"Creates a RNG state from an integer or a vector.\n\n Example:\n\n >>> tf.random.create_rng_state(\n ... 1234, \"philox\")\n \n >>> tf.random.create_rng_state(\n ... [12, 34], \"threefry\")\n \n\n Args:\n seed: an integer or 1-D numpy array.\n alg: the RNG algorithm. Can be a string, an `Algorithm` or an integer.\n\n Returns:\n a 1-D numpy array whose size depends on the algorithm.\n \"\"\"\n alg = random_ops_util.convert_alg_to_int(alg)\n return _make_state_from_seed(seed, alg)\n\n\ndef _shape_tensor(shape):\n \"\"\"Convert to an int32 or int64 tensor, defaulting to int64 if empty.\"\"\"\n if isinstance(shape, (tuple, list)) and not shape:\n dtype = dtypes.int64\n else:\n dtype = None\n return ops.convert_to_tensor(shape, dtype=dtype, name=\"shape\")\n\n\ndef _convert_to_state_tensor(t):\n # to avoid out-of-range error from ops.convert_to_tensor\n t = nest.map_structure(_uint_to_int, t)\n return math_ops.cast(t, STATE_TYPE)\n\n\ndef get_replica_id():\n rctx = distribute_lib.get_replica_context()\n if rctx is None:\n return None\n return rctx.replica_id_in_sync_group\n\n\n@tf_export(\"random.Generator\", \"random.experimental.Generator\")\nclass Generator(autotrackable.AutoTrackable):\n \"\"\"Random-number generator.\n\n Example:\n\n Creating a generator from a seed:\n\n >>> g = tf.random.Generator.from_seed(1234)\n >>> g.normal(shape=(2, 3))\n \n\n Creating a generator from a non-deterministic state:\n\n >>> g = tf.random.Generator.from_non_deterministic_state()\n >>> g.normal(shape=(2, 3))\n \n\n All the constructors allow explicitly choosing an Random-Number-Generation\n (RNG) algorithm. Supported algorithms are `\"philox\"` and `\"threefry\"`. For\n example:\n\n >>> g = tf.random.Generator.from_seed(123, alg=\"philox\")\n >>> g.normal(shape=(2, 3))\n \n\n CPU, GPU and TPU with the same algorithm and seed will generate the same\n integer random numbers. Float-point results (such as the output of `normal`)\n may have small numerical discrepancies between different devices.\n\n This class uses a `tf.Variable` to manage its internal state. Every time\n random numbers are generated, the state of the generator will change. For\n example:\n\n >>> g = tf.random.Generator.from_seed(1234)\n >>> g.state\n \n >>> g.normal(shape=(2, 3))\n <...>\n >>> g.state\n \n\n The shape of the state is algorithm-specific.\n\n There is also a global generator:\n\n >>> g = tf.random.get_global_generator()\n >>> g.normal(shape=(2, 3))\n \n\n When creating a generator inside a `tf.distribute.Strategy` scope, each\n replica will get a different stream of random numbers.\n\n For example, in this code:\n\n ```\n strat = tf.distribute.MirroredStrategy(devices=[\"cpu:0\", \"cpu:1\"])\n with strat.scope():\n g = tf.random.Generator.from_seed(1)\n def f():\n return g.normal([])\n results = strat.run(f).values\n ```\n\n `results[0]` and `results[1]` will have different values.\n\n If the generator is seeded (e.g. created via `Generator.from_seed`), the\n random numbers will be determined by the seed, even though different replicas\n get different numbers. One can think of a random number generated on a\n replica as a hash of the replica ID and a \"master\" random number that may be\n common to all replicas. Hence, the whole system is still deterministic.\n\n (Note that the random numbers on different replicas are not correlated, even\n if they are deterministically determined by the same seed. They are not\n correlated in the sense that no matter what statistics one calculates on them,\n there won't be any discernable correlation.)\n\n Generators can be freely saved and restored using `tf.train.Checkpoint`. The\n checkpoint can be restored in a distribution strategy with a different number\n of replicas than the original strategy. If a replica ID is present in both the\n original and the new distribution strategy, its state will be properly\n restored (i.e. the random-number stream from the restored point will be the\n same as that from the saving point) unless the replicas have already diverged\n in their RNG call traces before saving (e.g. one replica has made one RNG call\n while another has made two RNG calls). We don't have such guarantee if the\n generator is saved in a strategy scope and restored outside of any strategy\n scope, or vice versa.\n\n When a generator is created within the scope of\n `tf.distribute.experimental.ParameterServerStrategy`, the workers\n will share the generator's state (placed on one of the parameter\n servers). In this way the workers will still get different\n random-number streams, as stated above. (This is similar to replicas\n in a `tf.distribute.MirroredStrategy` sequentially accessing a\n generator created outside the strategy.) Each RNG call on a worker\n will incur a round-trip to a parameter server, which may have\n performance impacts. When creating a\n `tf.distribute.experimental.ParameterServerStrategy`, please make\n sure that the `variable_partitioner` argument won't shard small\n variables of shape `[2]` or `[3]` (because generator states must not\n be sharded). Ways to avoid sharding small variables include setting\n `variable_partitioner` to `None` or to\n `tf.distribute.experimental.partitioners.MinSizePartitioner` with a\n large enough `min_shard_bytes` (see\n `tf.distribute.experimental.ParameterServerStrategy`'s documentation\n for more details).\n \"\"\"\n\n @classmethod\n def from_state(cls, state, alg):\n \"\"\"Creates a generator from a state.\n\n See `__init__` for description of `state` and `alg`.\n\n Args:\n state: the new state.\n alg: the RNG algorithm.\n\n Returns:\n The new generator.\n \"\"\"\n return cls(alg=alg, state=state)\n\n @classmethod\n def from_seed(cls, seed, alg=None):\n \"\"\"Creates a generator from a seed.\n\n A seed is a 1024-bit unsigned integer represented either as a Python\n integer or a vector of integers. Seeds shorter than 1024-bit will be\n padded. The padding, the internal structure of a seed and the way a seed\n is converted to a state are all opaque (unspecified). The only semantics\n specification of seeds is that two different seeds are likely to produce\n two independent generators (but no guarantee).\n\n Args:\n seed: the seed for the RNG.\n alg: (optional) the RNG algorithm. If None, it will be auto-selected. See\n `__init__` for its possible values.\n\n Returns:\n The new generator.\n \"\"\"\n if alg is None:\n # TODO(b/170668986): more sophisticated algorithm selection\n alg = DEFAULT_ALGORITHM\n alg = random_ops_util.convert_alg_to_int(alg)\n state = create_rng_state(seed, alg)\n return cls(state=state, alg=alg)\n\n @classmethod\n def from_non_deterministic_state(cls, alg=None):\n \"\"\"Creates a generator by non-deterministically initializing its state.\n\n The source of the non-determinism will be platform- and time-dependent.\n\n Args:\n alg: (optional) the RNG algorithm. If None, it will be auto-selected. See\n `__init__` for its possible values.\n\n Returns:\n The new generator.\n \"\"\"\n if config.is_op_determinism_enabled():\n raise RuntimeError('\"from_non_deterministic_state\" cannot be called when ' # pylint: disable=g-doc-exception\n \"determinism is enabled.\")\n if alg is None:\n # TODO(b/170668986): more sophisticated algorithm selection\n alg = DEFAULT_ALGORITHM\n alg = random_ops_util.convert_alg_to_int(alg)\n state = non_deterministic_ints(shape=[_get_state_size(alg)],\n dtype=SEED_TYPE)\n return cls(state=state, alg=alg)\n\n @classmethod\n def from_key_counter(cls, key, counter, alg):\n \"\"\"Creates a generator from a key and a counter.\n\n This constructor only applies if the algorithm is a counter-based algorithm.\n See method `key` for the meaning of \"key\" and \"counter\".\n\n Args:\n key: the key for the RNG, a scalar of type STATE_TYPE.\n counter: a vector of dtype STATE_TYPE representing the initial counter for\n the RNG, whose length is algorithm-specific.,\n alg: the RNG algorithm. If None, it will be auto-selected. See\n `__init__` for its possible values.\n\n Returns:\n The new generator.\n \"\"\"\n counter = _convert_to_state_tensor(counter)\n key = _convert_to_state_tensor(key)\n alg = random_ops_util.convert_alg_to_int(alg)\n counter.shape.assert_is_compatible_with([_get_state_size(alg) - 1])\n key.shape.assert_is_compatible_with([])\n key = array_ops.reshape(key, [1])\n state = array_ops.concat([counter, key], 0)\n return cls(state=state, alg=alg)\n\n def __init__(self, copy_from=None, state=None, alg=None):\n \"\"\"Creates a generator.\n\n The new generator will be initialized by one of the following ways, with\n decreasing precedence:\n (1) If `copy_from` is not None, the new generator is initialized by copying\n information from another generator.\n (2) If `state` and `alg` are not None (they must be set together), the new\n generator is initialized by a state.\n\n Args:\n copy_from: a generator to be copied from.\n state: a vector of dtype STATE_TYPE representing the initial state of the\n RNG, whose length and semantics are algorithm-specific. If it's a\n variable, the generator will reuse it instead of creating a new\n variable.\n alg: the RNG algorithm. Possible values are\n `tf.random.Algorithm.PHILOX` for the Philox algorithm and\n `tf.random.Algorithm.THREEFRY` for the ThreeFry algorithm\n (see paper 'Parallel Random Numbers: As Easy as 1, 2, 3'\n [https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]).\n The string names `\"philox\"` and `\"threefry\"` can also be used.\n Note `PHILOX` guarantees the same numbers are produced (given\n the same random state) across all architectures (CPU, GPU, XLA etc).\n \"\"\"\n # TODO(b/175072242): Remove distribution-strategy dependencies in this file.\n if distribute_lib.has_strategy():\n self._distribution_strategy = distribute_lib.get_strategy()\n else:\n self._distribution_strategy = None\n if copy_from is not None:\n # All other arguments should be None\n assert (alg or state) is None\n self._state_var = self._create_variable(copy_from.state, dtype=STATE_TYPE,\n trainable=False)\n self._alg = copy_from.algorithm\n else:\n assert alg is not None and state is not None\n alg = random_ops_util.convert_alg_to_int(alg)\n if isinstance(state, variables.Variable):\n _check_state_shape(state.shape, alg)\n self._state_var = state\n else:\n state = _convert_to_state_tensor(state)\n _check_state_shape(state.shape, alg)\n self._state_var = self._create_variable(state, dtype=STATE_TYPE,\n trainable=False)\n self._alg = alg\n\n def _create_variable(self, *args, **kwargs):\n \"\"\"Creates a variable.\n\n Args:\n *args: positional arguments passed along to `variables.Variable.\n **kwargs: keyword arguments passed along to `variables.Variable.\n\n Returns:\n The created variable.\n \"\"\"\n with ops.name_scope(\"random_generator\"):\n # Make sure we don't change this name since Keras was using this name\n # to filter out the state variable.\n kwargs[\"name\"] = \"StateVar\"\n v = variables.Variable(*args, **kwargs)\n if isinstance(v, sharded_variable.ShardedVariable):\n # RNG state is an atomic entity representing a 128-bit or\n # 192-bit value, so it mustn't be sharded.\n raise ValueError(\n \"tf.random.Generator state is sharded, which is not allowed. When \"\n \"creating a tf.distribute.experimental.ParameterServerStrategy, \"\n \"please make sure that the `variable_partitioner` \"\n \"argument won't shard a \"\n \"small variable of shape [2] or [3]. Ways to avoid sharding small \"\n \"variables include setting `variable_partitioner` to None or to \"\n \"tf.distribute.experimental.partitioners.MinSizePartitioner with a \"\n \"large enough `min_shard_bytes`.\")\n return v\n\n def reset(self, state):\n \"\"\"Resets the generator by a new state.\n\n See `__init__` for the meaning of \"state\".\n\n Args:\n state: the new state.\n \"\"\"\n state = _convert_to_state_tensor(state)\n state.shape.assert_is_compatible_with([_get_state_size(self.algorithm)])\n self._state_var.assign(state)\n\n def reset_from_seed(self, seed):\n \"\"\"Resets the generator by a new seed.\n\n See `from_seed` for the meaning of \"seed\".\n\n Args:\n seed: the new seed.\n \"\"\"\n state = create_rng_state(seed, self.algorithm)\n self._state_var.assign(state)\n\n def reset_from_key_counter(self, key, counter):\n \"\"\"Resets the generator by a new key-counter pair.\n\n See `from_key_counter` for the meaning of \"key\" and \"counter\".\n\n Args:\n key: the new key.\n counter: the new counter.\n \"\"\"\n counter = _convert_to_state_tensor(counter)\n key = _convert_to_state_tensor(key)\n counter.shape.assert_is_compatible_with(\n [_get_state_size(self.algorithm) - 1])\n key.shape.assert_is_compatible_with([])\n key = array_ops.reshape(key, [1])\n state = array_ops.concat([counter, key], 0)\n self._state_var.assign(state)\n\n @property\n def state(self):\n \"\"\"The internal state of the RNG.\"\"\"\n return self._state_var\n\n @property\n def algorithm(self):\n \"\"\"The RNG algorithm id (a Python integer or scalar integer Tensor).\"\"\"\n return self._alg\n\n def _standard_normal(self, shape, dtype):\n key, counter = self._prepare_key_counter(shape)\n return gen_stateless_random_ops_v2.stateless_random_normal_v2(\n shape, key=key, counter=counter, dtype=dtype, alg=self.algorithm)\n\n @property\n def key(self):\n \"\"\"The 'key' part of the state of a counter-based RNG.\n\n For a counter-base RNG algorithm such as Philox and ThreeFry (as\n described in paper 'Parallel Random Numbers: As Easy as 1, 2, 3'\n [https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]),\n the RNG state consists of two parts: counter and key. The output is\n generated via the formula: output=hash(key, counter), i.e. a hashing of\n the counter parametrized by the key. Two RNGs with two different keys can\n be thought as generating two independent random-number streams (a stream\n is formed by increasing the counter).\n\n Returns:\n A scalar which is the 'key' part of the state, if the RNG algorithm is\n counter-based; otherwise it raises a ValueError.\n \"\"\"\n alg = self.algorithm\n if alg in (a.value for a in random_ops_util.Algorithm):\n return self._state_var[-1]\n else:\n raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))\n\n def _skip_single_var(self, var, delta):\n resource_variable_ops.variable_accessed(var)\n # TODO(wangpeng): Cache the cast algorithm instead of casting everytime.\n return gen_stateful_random_ops.rng_read_and_skip(\n var.handle,\n alg=math_ops.cast(self.algorithm, dtypes.int32),\n delta=math_ops.cast(delta, dtypes.uint64))\n\n def skip(self, delta):\n \"\"\"Advance the counter of a counter-based RNG.\n\n Args:\n delta: the amount of advancement. The state of the RNG after\n `skip(n)` will be the same as that after `normal([n])`\n (or any other distribution). The actual increment added to the\n counter is an unspecified implementation detail.\n\n Returns:\n A `Tensor` of type `int64`.\n \"\"\"\n\n def update_fn(v):\n return self._skip_single_var(v, delta)\n # TODO(b/170515001): Always call strategy.extended.update after calling it\n # from both replica context and cross-replica context is supported.\n if values_util.is_saving_non_distributed():\n # Assumes replica context with replica_id=0, since we only save the first\n # replica.\n return update_fn(self.state)\n if self._distribution_strategy is not None:\n with distribute_lib.enter_or_assert_strategy(self._distribution_strategy):\n if distribute_lib.in_cross_replica_context():\n # Code that operates on all replicas of a variable cannot be saved\n # without retracing.\n values_util.mark_as_unsaveable()\n if (distribute_lib.in_cross_replica_context() or\n \"CentralStorage\" in type(self._distribution_strategy).__name__):\n # In cross-replica context we need to use strategy.extended.update.\n # In CentralStorageStrategy we also need to use\n # strategy.extended.update (even for replica context),\n # because variable updates here must be within merge_call.\n return distribute_lib.get_strategy().extended.update(\n self.state, update_fn)\n return update_fn(self.state)\n\n def _preprocess_key(self, key):\n if self._distribution_strategy is None:\n return key\n with distribute_lib.enter_or_assert_strategy(self._distribution_strategy):\n replica_id = get_replica_id()\n if replica_id is not None:\n replica_id = array_ops_stack.stack([replica_id, 0], axis=0)\n replica_id = math_ops.cast(replica_id, dtypes.uint64)\n # Conceptually: key = hash(key, replica_id)\n key = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2(\n shape=[1], key=key, counter=replica_id, dtype=dtypes.uint64,\n alg=self.algorithm)\n return key\n\n def _prepare_key_counter(self, shape):\n delta = math_ops.reduce_prod(shape)\n counter_key = self.skip(delta)\n counter_size = _get_counter_size(self.algorithm)\n counter = array_ops.bitcast(counter_key[:counter_size], dtypes.uint64)\n key = array_ops.bitcast(counter_key[counter_size:counter_size + 1],\n dtypes.uint64)\n key = self._preprocess_key(key)\n return key, counter\n\n # The following functions return a tensor and as a side effect update\n # self._state_var.\n def normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32,\n name=None):\n \"\"\"Outputs random values from a normal distribution.\n\n Args:\n shape: A 1-D integer Tensor or Python array. The shape of the output\n tensor.\n mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal\n distribution.\n stddev: A 0-D Tensor or Python value of type `dtype`. The standard\n deviation of the normal distribution.\n dtype: The type of the output.\n name: A name for the operation (optional).\n\n Returns:\n A tensor of the specified shape filled with random normal values.\n \"\"\"\n with ops.name_scope(name, \"stateful_normal\", [shape, mean, stddev]) as name:\n shape = _shape_tensor(shape)\n mean = ops.convert_to_tensor(mean, dtype=dtype, name=\"mean\")\n stddev = ops.convert_to_tensor(stddev, dtype=dtype, name=\"stddev\")\n rnd = self._standard_normal(shape, dtype=dtype)\n return math_ops.add(rnd * stddev, mean, name=name)\n\n def _truncated_normal(self, shape, dtype):\n key, counter = self._prepare_key_counter(shape)\n return gen_stateless_random_ops_v2.stateless_truncated_normal_v2(\n shape=shape, key=key, counter=counter, dtype=dtype, alg=self.algorithm)\n\n def truncated_normal(self, shape,\n mean=0.0,\n stddev=1.0,\n dtype=dtypes.float32,\n name=None):\n \"\"\"Outputs random values from a truncated normal distribution.\n\n The generated values follow a normal distribution with specified mean and\n standard deviation, except that values whose magnitude is more than\n 2 standard deviations from the mean are dropped and re-picked.\n\n Args:\n shape: A 1-D integer Tensor or Python array. The shape of the output\n tensor.\n mean: A 0-D Tensor or Python value of type `dtype`. The mean of the\n truncated normal distribution.\n stddev: A 0-D Tensor or Python value of type `dtype`. The standard\n deviation of the normal distribution, before truncation.\n dtype: The type of the output.\n name: A name for the operation (optional).\n\n Returns:\n A tensor of the specified shape filled with random truncated normal\n values.\n \"\"\"\n with ops.name_scope(\n name, \"truncated_normal\", [shape, mean, stddev]) as name:\n shape_tensor = _shape_tensor(shape)\n mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name=\"mean\")\n stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name=\"stddev\")\n rnd = self._truncated_normal(shape_tensor, dtype=dtype)\n mul = rnd * stddev_tensor\n return math_ops.add(mul, mean_tensor, name=name)\n\n def _uniform(self, shape, dtype):\n key, counter = self._prepare_key_counter(shape)\n return gen_stateless_random_ops_v2.stateless_random_uniform_v2(\n shape=shape, key=key, counter=counter, dtype=dtype, alg=self.algorithm)\n\n def _uniform_full_int(self, shape, dtype, name=None):\n key, counter = self._prepare_key_counter(shape)\n return gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2(\n shape=shape,\n key=key,\n counter=counter,\n dtype=dtype,\n alg=self.algorithm,\n name=name)\n\n def uniform(self, shape, minval=0, maxval=None,\n dtype=dtypes.float32, name=None):\n \"\"\"Outputs random values from a uniform distribution.\n\n The generated values follow a uniform distribution in the range\n `[minval, maxval)`. The lower bound `minval` is included in the range, while\n the upper bound `maxval` is excluded. (For float numbers especially\n low-precision types like bfloat16, because of\n rounding, the result may sometimes include `maxval`.)\n\n For floats, the default range is `[0, 1)`. For ints, at least `maxval` must\n be specified explicitly.\n\n In the integer case, the random integers are slightly biased unless\n `maxval - minval` is an exact power of two. The bias is small for values of\n `maxval - minval` significantly smaller than the range of the output (either\n `2**32` or `2**64`).\n\n For full-range random integers, pass `minval=None` and `maxval=None` with an\n integer `dtype` (for integer dtypes, `minval` and `maxval` must be both\n `None` or both not `None`).\n\n Args:\n shape: A 1-D integer Tensor or Python array. The shape of the output\n tensor.\n minval: A Tensor or Python value of type `dtype`, broadcastable with\n `shape` (for integer types, broadcasting is not supported, so it needs\n to be a scalar). The lower bound (included) on the range of random\n values to generate. Pass `None` for full-range integers. Defaults to 0.\n maxval: A Tensor or Python value of type `dtype`, broadcastable with\n `shape` (for integer types, broadcasting is not supported, so it needs\n to be a scalar). The upper bound (excluded) on the range of random\n values to generate. Pass `None` for full-range integers. Defaults to 1\n if `dtype` is floating point.\n dtype: The type of the output.\n name: A name for the operation (optional).\n\n Returns:\n A tensor of the specified shape filled with random uniform values.\n\n Raises:\n ValueError: If `dtype` is integral and `maxval` is not specified.\n \"\"\"\n dtype = dtypes.as_dtype(dtype)\n if dtype.is_integer:\n if (minval is None) != (maxval is None):\n raise ValueError(\"For integer dtype {}, minval and maxval must be both \"\n \"`None` or both non-`None`; got minval={} and \"\n \"maxval={}\".format(dtype, minval, maxval))\n elif maxval is None:\n maxval = 1\n with ops.name_scope(name, \"stateful_uniform\",\n [shape, minval, maxval]) as name:\n shape = _shape_tensor(shape)\n if dtype.is_integer and minval is None:\n return self._uniform_full_int(shape=shape, dtype=dtype, name=name)\n minval = ops.convert_to_tensor(minval, dtype=dtype, name=\"min\")\n maxval = ops.convert_to_tensor(maxval, dtype=dtype, name=\"max\")\n if dtype.is_integer:\n key, counter = self._prepare_key_counter(shape)\n return gen_stateless_random_ops_v2.stateless_random_uniform_int_v2(\n shape=shape,\n key=key,\n counter=counter,\n minval=minval,\n maxval=maxval,\n alg=self.algorithm,\n name=name)\n else:\n rnd = self._uniform(shape=shape, dtype=dtype)\n return math_ops.add(rnd * (maxval - minval), minval, name=name)\n\n def uniform_full_int(self, shape, dtype=dtypes.uint64, name=None):\n \"\"\"Uniform distribution on an integer type's entire range.\n\n This method is the same as setting `minval` and `maxval` to `None` in the\n `uniform` method.\n\n Args:\n shape: the shape of the output.\n dtype: (optional) the integer type, default to uint64.\n name: (optional) the name of the node.\n\n Returns:\n A tensor of random numbers of the required shape.\n \"\"\"\n dtype = dtypes.as_dtype(dtype)\n with ops.name_scope(name, \"stateful_uniform_full_int\",\n [shape]) as name:\n shape = _shape_tensor(shape)\n return self._uniform_full_int(shape=shape, dtype=dtype, name=name)\n\n def binomial(self, shape, counts, probs, dtype=dtypes.int32, name=None):\n \"\"\"Outputs random values from a binomial distribution.\n\n The generated values follow a binomial distribution with specified count and\n probability of success parameters.\n\n Example:\n\n ```python\n counts = [10., 20.]\n # Probability of success.\n probs = [0.8]\n\n rng = tf.random.Generator.from_seed(seed=234)\n binomial_samples = rng.binomial(shape=[2], counts=counts, probs=probs)\n\n\n counts = ... # Shape [3, 1, 2]\n probs = ... # Shape [1, 4, 2]\n shape = [3, 4, 3, 4, 2]\n rng = tf.random.Generator.from_seed(seed=1717)\n # Sample shape will be [3, 4, 3, 4, 2]\n binomial_samples = rng.binomial(shape=shape, counts=counts, probs=probs)\n ```\n\n\n Args:\n shape: A 1-D integer Tensor or Python array. The shape of the output\n tensor.\n counts: Tensor. The counts of the binomial distribution. Must be\n broadcastable with `probs`, and broadcastable with the rightmost\n dimensions of `shape`.\n probs: Tensor. The probability of success for the\n binomial distribution. Must be broadcastable with `counts` and\n broadcastable with the rightmost dimensions of `shape`.\n dtype: The type of the output. Default: tf.int32\n name: A name for the operation (optional).\n\n Returns:\n samples: A Tensor of the specified shape filled with random binomial\n values. For each i, each samples[i, ...] is an independent draw from\n the binomial distribution on counts[i] trials with probability of\n success probs[i].\n \"\"\"\n dtype = dtypes.as_dtype(dtype)\n with ops.name_scope(name, \"binomial\", [shape, counts, probs]) as name:\n counts = ops.convert_to_tensor(counts, name=\"counts\")\n probs = ops.convert_to_tensor(probs, name=\"probs\")\n shape_tensor = _shape_tensor(shape)\n return gen_stateful_random_ops.stateful_random_binomial(\n self.state.handle,\n self.algorithm,\n shape=shape_tensor,\n counts=counts,\n probs=probs,\n dtype=dtype,\n name=name)\n\n # TODO(wangpeng): implement other distributions\n\n def _make_int64_keys(self, shape=()):\n # New independent keys are generated via\n # `new_key[i] = hash(old_key, counter+i)`, which is exactly what\n # `uniform_full_int(dtype=int64)` does for PhiloxRandom_64_128_128 and\n # ThreeFry_64_64_64.\n return self.uniform_full_int(shape=shape, dtype=dtypes.int64)\n\n def make_seeds(self, count=1):\n \"\"\"Generates seeds for stateless random ops.\n\n For example:\n\n ```python\n seeds = get_global_generator().make_seeds(count=10)\n for i in range(10):\n seed = seeds[:, i]\n numbers = stateless_random_normal(shape=[2, 3], seed=seed)\n ...\n ```\n\n Args:\n count: the number of seed pairs (note that stateless random ops need a\n pair of seeds to invoke).\n\n Returns:\n A tensor of shape [2, count] and dtype int64.\n \"\"\"\n alg = self.algorithm\n if alg in (a.value for a in random_ops_util.Algorithm):\n keys = self._make_int64_keys(shape=[count])\n # The two seeds for stateless random ops don't have individual semantics\n # and are scrambled together, so setting one to zero is fine.\n zeros = array_ops.zeros_like(keys)\n return array_ops_stack.stack([keys, zeros])\n else:\n raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))\n\n def split(self, count=1):\n \"\"\"Returns a list of independent `Generator` objects.\n\n Two generators are independent of each other in the sense that the\n random-number streams they generate don't have statistically detectable\n correlations. The new generators are also independent of the old one.\n The old generator's state will be changed (like other random-number\n generating methods), so two calls of `split` will return different\n new generators.\n\n For example:\n\n ```python\n gens = get_global_generator().split(count=10)\n for gen in gens:\n numbers = gen.normal(shape=[2, 3])\n # ...\n gens2 = get_global_generator().split(count=10)\n # gens2 will be different from gens\n ```\n\n The new generators will be put on the current device (possible different\n from the old generator's), for example:\n\n ```python\n with tf.device(\"/device:CPU:0\"):\n gen = Generator(seed=1234) # gen is on CPU\n with tf.device(\"/device:GPU:0\"):\n gens = gen.split(count=10) # gens are on GPU\n ```\n\n Args:\n count: the number of generators to return.\n\n Returns:\n A list (length `count`) of `Generator` objects independent of each other.\n The new generators have the same RNG algorithm as the old one.\n \"\"\"\n def _key_to_state(alg, key):\n # Padding with zeros on the left. The zeros will be the counter.\n return [0] * (_get_state_size(alg) - 1) + [key]\n\n alg = self.algorithm\n if alg in (a.value for a in random_ops_util.Algorithm):\n keys = self._make_int64_keys(shape=[count])\n return [Generator(state=_key_to_state(alg, key), alg=alg)\n for key in array_ops_stack.unstack(keys, num=count)]\n else:\n raise ValueError(stateless_random_ops.unsupported_alg_error_msg(alg))\n\n\n# It's not safe to create TF ops before `init_google` is called, so this is\n# initialized to None and get a value the first time `get_global_generator` is\n# called.\nglobal_generator = None\n\n\n@tf_export(\"random.get_global_generator\",\n \"random.experimental.get_global_generator\")\ndef get_global_generator():\n \"\"\"Retrieves the global generator.\n\n This function will create the global generator the first time it is called,\n and the generator will be placed at the default device at that time, so one\n needs to be careful when this function is first called. Using a generator\n placed on a less-ideal device will incur performance regression.\n\n Returns:\n The global `tf.random.Generator` object.\n \"\"\"\n global global_generator\n if global_generator is None:\n if config.is_op_determinism_enabled():\n raise RuntimeError('\"get_global_generator\" cannot be called if ' # pylint: disable=g-doc-exception\n \"determinism is enabled, unless \"\n '\"set_global_generator\" has already been called. '\n 'Please call \"set_global_generator\" first.')\n with ops.init_scope():\n global_generator = Generator.from_non_deterministic_state()\n return global_generator\n\n\n@tf_export(\"random.set_global_generator\",\n \"random.experimental.set_global_generator\")\ndef set_global_generator(generator):\n \"\"\"Replaces the global generator with another `Generator` object.\n\n This function replaces the global generator with the provided `generator`\n object.\n A random number generator utilizes a `tf.Variable` object to store its state.\n The user shall be aware of caveats how `set_global_generator` interacts with\n `tf.function`:\n\n - tf.function puts restrictions on Variable creation thus one cannot freely\n create a new random generator instance inside `tf.function`.\n To call `set_global_generator` inside `tf.function`, the generator instance\n must have already been created eagerly.\n - tf.function captures the Variable during trace-compilation, thus a compiled\n f.function will not be affected `set_global_generator` as demonstrated by\n random_test.py/RandomTest.testResetGlobalGeneratorBadWithDefun .\n\n For most use cases, avoid calling `set_global_generator` after program\n initialization, and prefer to reset the state of the existing global generator\n instead, such as,\n\n >>> rng = tf.random.get_global_generator()\n >>> rng.reset_from_seed(30)\n\n\n Args:\n generator: the new `Generator` object.\n \"\"\"\n global global_generator\n global_generator = generator\n", "output": ["_check_state_shape", "_get_state_size", "_uint_to_int", "_get_counter_size", "_make_1d_state", "_make_state_from_seed", "_convert_to_state_tensor", "non_deterministic_ints", "get_replica_id", "_shape_tensor", "Generator"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/ops/stateful_random_ops.py", "file_length": 12507, "symbol_dict": [{"symbol": "_get_counter_size", "type": "mannual_defined_function", "byte_location": 4981, "location": 1687}, {"symbol": "non_deterministic_ints", "type": "mannual_defined_function", "byte_location": 2971, "location": 994}, {"symbol": "_uint_to_int", "type": "mannual_defined_function", "byte_location": 3473, "location": 1157}, {"symbol": "_convert_to_state_tensor", "type": "mannual_defined_function", "byte_location": 7141, "location": 2507}, {"symbol": "_make_state_from_seed", "type": "mannual_defined_function", "byte_location": 6046, "location": 2081}, {"symbol": "get_replica_id", "type": "mannual_defined_function", "byte_location": 7315, "location": 2576}, {"symbol": "_shape_tensor", "type": "mannual_defined_function", "byte_location": 6870, "location": 2412}, {"symbol": "_make_1d_state", "type": "mannual_defined_function", "byte_location": 3576, "location": 1206}, {"symbol": "_check_state_shape", "type": "mannual_defined_function", "byte_location": 5861, "location": 2015}, {"symbol": "_get_state_size", "type": "mannual_defined_function", "byte_location": 5426, "location": 1854}, {"symbol": "Generator", "type": "mannual_defined_class", "byte_location": 7523, "location": 2650}]}} {"input": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Initializers for TF 2.\"\"\"\nimport math\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_linalg_ops\nfrom tensorflow.python.ops import linalg_ops_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import stateless_random_ops\nfrom tensorflow.python.ops.init_ops import _compute_fans\nfrom tensorflow.python.util.tf_export import tf_export\n\n_PARTITION_SHAPE = \"partition_shape\"\n_PARTITION_OFFSET = \"partition_offset\"\n\n\nclass Initializer:\n \"\"\"Initializer base class: all initializers inherit from this class.\n\n Initializers should implement a `__call__` method with the following\n signature:\n\n ```python\n def __call__(self, shape, dtype=None, **kwargs):\n # returns a tensor of shape `shape` and dtype `dtype`\n # containing values drawn from a distribution of your choice.\n ```\n \"\"\"\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. If not provided will return tensor\n of `tf.float32`.\n **kwargs: Additional keyword arguments. Accepted values:\n `partition_shape` and `partition_offset`. Used when creating a single\n partition in a partitioned variable. `partition_shape` is the shape of\n the partition (i.e. the shape of the returned tensor) and\n `partition_offset` is a tuple of `int` specifying the offset of this\n partition w.r.t each axis. For example, a tensor of shape `(30, 100)`\n can be partitioned into two partitions: `p0` of shape `(10, 100)` and\n `p1` of shape `(20, 100)`; if the initializer is called with\n `partition_shape=(20, 100)` and `partition_offset=(10, 0)`, it should\n return the value for `p1`.\n \"\"\"\n raise NotImplementedError\n\n def get_config(self):\n \"\"\"Returns the configuration of the initializer as a JSON-serializable dict.\n\n Returns:\n A JSON-serializable Python dict.\n \"\"\"\n return {}\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Instantiates an initializer from a configuration dictionary.\n\n Example:\n\n ```python\n initializer = RandomUniform(-1, 1)\n config = initializer.get_config()\n initializer = RandomUniform.from_config(config)\n ```\n\n Args:\n config: A Python dictionary.\n It will typically be the output of `get_config`.\n\n Returns:\n An Initializer instance.\n \"\"\"\n config.pop(\"dtype\", None)\n return cls(**config)\n\n def _validate_kwargs(self, kwargs, support_partition=True):\n for kwarg in kwargs:\n if kwarg not in [_PARTITION_SHAPE, _PARTITION_OFFSET]:\n raise TypeError(\n \"Keyword argument should be one of \"\n f\"{list([_PARTITION_SHAPE, _PARTITION_OFFSET])}. Received: {kwarg}\")\n elif not support_partition:\n raise ValueError(\n f\"{self.__class__.__name__} initializer doesn't support \"\n \"partition-related arguments\")\n\n\n@tf_export(\"zeros_initializer\", v1=[])\nclass Zeros(Initializer):\n \"\"\"Initializer that generates tensors initialized to 0.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Examples:\n\n >>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.zeros_initializer())\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.ones_initializer())\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.constant_initializer(2.))\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> value = [0, 1, 2, 3, 4, 5, 6, 7]\n >>> init = tf.constant_initializer(value)\n >>> # Fitting shape\n >>> tf.Variable(init(shape=[2, 4], dtype=tf.float32))\n \n >>> # Larger shape\n >>> tf.Variable(init(shape=[3, 4], dtype=tf.float32))\n Traceback (most recent call last):\n ...\n TypeError: ...value has 8 elements, shape is (3, 4) with 12 elements...\n >>> # Smaller shape\n >>> tf.Variable(init(shape=[2, 3], dtype=tf.float32))\n Traceback (most recent call last):\n ...\n TypeError: ...value has 8 elements, shape is (2, 3) with 6 elements...\n\n Args:\n value: A Python scalar, list or tuple of values, or a N-dimensional numpy\n array. All elements of the initialized variable will be set to the\n corresponding value in the `value` argument.\n support_partition: If true, the initizer supports passing partition\n offset and partition shape arguments to variable creators. This is\n particularly useful when initializing sharded variables where each\n variable shard is initialized to a slice of constant initializer.\n \n Raises:\n TypeError: If the input `value` is not one of the expected types.\n \"\"\"\n\n def __init__(self, value=0, support_partition=False):\n if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))):\n raise TypeError(\n f\"Invalid type for initial value: {type(value).__name__}. Expected \"\n \"Python scalar, list or tuple of values, or numpy.ndarray.\")\n self.value = value\n self.support_partition = support_partition\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. If not provided the dtype of the\n tensor created will be the type of the inital value.\n **kwargs: Additional keyword arguments.\n\n Raises:\n TypeError: If the initializer cannot create a tensor of the requested\n dtype.\n \"\"\"\n self._validate_kwargs(kwargs, support_partition=self.support_partition)\n if dtype is not None:\n dtype = dtypes.as_dtype(dtype)\n return constant_op.constant(self.value, dtype=dtype, shape=shape)\n\n def get_config(self):\n return {\"value\": self.value}\n\n\n@tf_export(\"random_uniform_initializer\", v1=[])\nclass RandomUniform(Initializer):\n \"\"\"Initializer that generates tensors with a uniform distribution.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Examples:\n\n >>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.ones_initializer())\n >>> v1\n \n >>> v2\n \n >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3,\n ... tf.random_normal_initializer(mean=1., stddev=2.))\n >>> v1\n \n >>> v2\n >> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(\n ... 3, tf.initializers.TruncatedNormal(mean=1., stddev=2.))\n >>> v1\n \n >>> v2\n >> make_variables(4, tf.initializers.RandomUniform(minval=-1., maxval=1.))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.initializers.VarianceScaling(scale=1.))\n >>> v1\n \n >>> v2\n >> make_variables(4, tf.initializers.VarianceScaling(distribution='uniform'))\n (, >> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.initializers.Orthogonal())\n >>> v1\n >> v2\n >> make_variables(4, tf.initializers.Orthogonal(gain=0.5))\n (>> def make_variable(k, initializer):\n ... return tf.Variable(initializer(shape=[k, k], dtype=tf.float32))\n >>> make_variable(2, tf.initializers.Identity())\n \n >>> make_variable(3, tf.initializers.Identity(gain=0.5))\n \n\n Args:\n gain: Multiplicative factor to apply to the identity matrix.\n \"\"\"\n\n def __init__(self, gain=1.0):\n self.gain = gain\n\n def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported.\n **kwargs: Additional keyword arguments.\n\n Raises:\n ValueError: If the dtype is not floating point\n ValueError: If the requested shape does not have exactly two axes.\n \"\"\"\n self._validate_kwargs(kwargs, support_partition=False)\n dtype = _assert_float_dtype(dtype)\n if len(shape) != 2:\n raise ValueError(\"The tensor to initialize, specified by argument `shape`\"\n \" must be at least two-dimensional. Received shape=\"\n f\"{shape}\")\n initializer = linalg_ops_impl.eye(*shape, dtype=dtype)\n return self.gain * initializer\n\n def get_config(self):\n return {\"gain\": self.gain}\n\n\nclass GlorotUniform(VarianceScaling):\n \"\"\"The Glorot uniform initializer, also called Xavier uniform initializer.\n\n Initializers allow you to pre-specify an initialization strategy, encoded in\n the Initializer object, without knowing the shape and dtype of the variable\n being initialized.\n\n Draws samples from a uniform distribution within [-limit, limit] where `limit`\n is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units\n in the weight tensor and `fan_out` is the number of output units in the weight\n tensor.\n\n Examples:\n\n >>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.initializers.GlorotUniform())\n >>> v1\n >> v2\n >> make_variables(4, tf.initializers.RandomNormal())\n (>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.initializers.GlorotNormal())\n >>> v1\n >> v2\n >> make_variables(4, tf.initializers.RandomNormal())\n (>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.initializers.lecun_normal())\n >>> v1\n >> v2\n >> make_variables(4, tf.initializers.RandomNormal())\n (>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.initializers.lecun_uniform())\n >>> v1\n >> v2\n >> make_variables(4, tf.initializers.RandomNormal())\n (>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.initializers.he_normal())\n >>> v1\n >> v2\n >> make_variables(4, tf.initializers.RandomNormal())\n (>> def make_variables(k, initializer):\n ... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),\n ... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))\n >>> v1, v2 = make_variables(3, tf.initializers.he_uniform())\n >>> v1\n >> v2\n >> make_variables(4, tf.initializers.RandomNormal())\n ( data_service_pb2.ProcessingModeDef.ShardingPolicy:\n \"\"\"Converts the policy to ProcessingModeDef proto enum.\"\"\"\n\n if self == ShardingPolicy.OFF:\n return data_service_pb2.ProcessingModeDef.OFF\n if self == ShardingPolicy.DYNAMIC:\n return data_service_pb2.ProcessingModeDef.DYNAMIC\n if self == ShardingPolicy.FILE:\n return data_service_pb2.ProcessingModeDef.FILE\n if self == ShardingPolicy.DATA:\n return data_service_pb2.ProcessingModeDef.DATA\n if self == ShardingPolicy.FILE_OR_DATA:\n return data_service_pb2.ProcessingModeDef.FILE_OR_DATA\n if self == ShardingPolicy.HINT:\n return data_service_pb2.ProcessingModeDef.HINT\n raise ValueError(f\"Unable to convert sharding policy {self!r} to proto.\")\n\n\n@tf_export(\"data.experimental.service.CrossTrainerCache\")\nclass CrossTrainerCache:\n \"\"\"Options related to the tf.data service cross trainer cache.\n\n This is used to enable cross-trainer cache when distributing a dataset. For\n example:\n\n ```\n dataset = dataset.apply(tf.data.experimental.service.distribute(\n processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,\n service=FLAGS.tf_data_service_address,\n job_name=\"job\",\n cross_trainer_cache=data_service_ops.CrossTrainerCache(\n trainer_id=trainer_id())))\n ```\n\n For more details, refer to\n https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers.\n \"\"\"\n\n def __init__(self, trainer_id):\n \"\"\"Constructs a CrossTrainerCache.\n\n Args:\n trainer_id: Each training job has a unique ID. Once a job has consumed\n data, the data remains in the cache and is re-used by jobs with different\n `trainer_id`s. Requests with the same `trainer_id` do not re-use data.\n\n Raises:\n ValueError if `trainer_id` is empty.\n \"\"\"\n if not trainer_id:\n raise ValueError(\n \"tf.data service cross-trainer cache requires a non-empty trainer ID.\"\n )\n self.trainer_id = trainer_id\n\n def _to_proto(self) -> data_service_pb2.CrossTrainerCacheOptions:\n return data_service_pb2.CrossTrainerCacheOptions(trainer_id=self.trainer_id)\n\n\ndef _get_validated_sharding_policy(processing_mode) -> ShardingPolicy:\n \"\"\"Validates `processing_mode` and converts it to ShardingPolicy.\"\"\"\n\n if isinstance(processing_mode, ShardingPolicy):\n return processing_mode\n if processing_mode == _PARALLEL_EPOCHS:\n return ShardingPolicy.OFF\n if processing_mode == _DISTRIBUTED_EPOCH:\n return ShardingPolicy.DYNAMIC\n\n raise ValueError(\"tf.data service processing mode should be a \"\n \"`tf.data.experimental.service.ShardingPolicy`, \"\n \"`\\\"parallel_epochs\\\"`, or `\\\"distributed_epoch\\\"`. Got \"\n f\"{processing_mode!r}.\")\n\n\ndef _validate_job_name(job_name) -> None:\n if job_name is None:\n return\n if not isinstance(job_name, str):\n raise ValueError(\"`job_name` must be a string, but `job_name` was of type \"\n f\"{type(job_name)}. job_name={job_name}\")\n if not job_name:\n raise ValueError(\"`job_name` must not be empty\")\n\n\ndef _validate_compression(compression) -> None:\n valid_compressions = [COMPRESSION_AUTO, COMPRESSION_NONE]\n if compression not in valid_compressions:\n raise ValueError(f\"Invalid `compression` argument: {compression}. \"\n f\"Must be one of {valid_compressions}.\")\n\n\ndef _get_compression_proto(\n compression) -> data_service_pb2.DataServiceMetadata.Compression:\n if compression == COMPRESSION_AUTO:\n return data_service_pb2.DataServiceMetadata.COMPRESSION_SNAPPY\n if compression == COMPRESSION_NONE:\n return data_service_pb2.DataServiceMetadata.COMPRESSION_OFF\n raise ValueError(f\"Invalid `compression` argument: {compression}. \"\n f\"Must be one of {[COMPRESSION_AUTO, COMPRESSION_NONE]}.\")\n\n\ndef _to_tensor(dataset_id) -> tensor.Tensor:\n \"\"\"Converts `dataset_id` to Tensor.\"\"\"\n\n if isinstance(dataset_id, tensor.Tensor):\n return dataset_id\n if isinstance(dataset_id, str) or isinstance(dataset_id, bytes):\n return ops.convert_to_tensor(\n dataset_id, dtype=dtypes.string, name=\"dataset_id\")\n return ops.convert_to_tensor(\n dataset_id, dtype=dtypes.int64, name=\"dataset_id\")\n\n\ndef _to_string(dataset_id) -> str:\n \"\"\"Converts `dataset_id` to string.\"\"\"\n\n if isinstance(dataset_id, tensor.Tensor):\n return (dataset_id if dataset_id.dtype == dtypes.string else\n string_ops.as_string(dataset_id))\n return (dataset_id.decode()\n if isinstance(dataset_id, bytes) else str(dataset_id))\n\n\nclass _DataServiceDatasetV2(dataset_ops.DatasetSource):\n \"\"\"A `Dataset` that reads elements from the tf.data service.\"\"\"\n\n def __init__(self,\n dataset_id,\n processing_mode,\n address,\n element_spec,\n protocol,\n data_transfer_protocol,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n task_refresh_interval_hint_ms=None,\n cross_trainer_cache=None,\n target_workers=\"AUTO\"):\n \"\"\"Constructs a _DataServiceDatasetV2.\n\n Args:\n dataset_id: The dataset id for the dataset to read from.\n processing_mode: A `tf.data.experimental.service.ShardingPolicy`\n specifying how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n address: The tf.data service address, e.g. \"localhost:5000\".\n element_spec: The dataset element spec for the dataset to read from.\n protocol: The protocol to use for communicating with the tf.data service,\n e.g. \"grpc\".\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using\n gRPC.\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string or Tensor. This argument makes it possible for multiple\n datasets to share the same job. The default behavior is that the dataset\n creates anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from\n `0` to `num_consumers`. Must be specified alongside `num_consumers`.\n When specified, consumers will read from the job in a strict round-robin\n order, instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead\n of the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out\n of sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the\n amount of memory used, since `distribute` won't use more than\n `element_size` * `max_outstanding_requests` of memory.\n task_refresh_interval_hint_ms: (Optional.) A hint for how often to query\n the dispatcher for task changes.\n cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is\n provided, dataset iteration will be shared across concurrently running\n trainers. See\n https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers\n for details.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`,\n tf.data runtime decides which workers to read from. If `\"ANY\"`, reads\n from any tf.data service workers. If `\"LOCAL\"`, only reads from local\n in-processs tf.data service workers. `\"AUTO\"` works well for most cases,\n while users can specify other targets. For example, `\"LOCAL\"` helps\n avoid RPCs and data copy if every TF worker colocates with a tf.data\n service worker. Consumers of a shared job must use the same\n `target_workers`. Defaults to `\"AUTO\"`.\n \"\"\"\n if consumer_index is None != num_consumers is None:\n raise ValueError(\n \"Must either set both `consumer_index` and `num_consumers`, \"\n \"or neither. \",\n f\"consumer_index={consumer_index}, num_consumers={num_consumers}\")\n if num_consumers is not None and job_name is None:\n raise ValueError(\"`job_name` must be set when setting `num_consumers`. \"\n f\"num_consumers was set to {num_consumers}.\")\n\n processing_mode_def = data_service_pb2.ProcessingModeDef(\n sharding_policy=_get_validated_sharding_policy(\n processing_mode)._to_proto())\n if job_name is None:\n job_name = \"\"\n if max_outstanding_requests is None:\n max_outstanding_requests = dataset_ops.AUTOTUNE\n if task_refresh_interval_hint_ms is None:\n task_refresh_interval_hint_ms = dataset_ops.AUTOTUNE\n\n self._dataset_id = _to_tensor(dataset_id)\n self._processing_mode = ops.convert_to_tensor(\n processing_mode_def.SerializeToString(),\n dtype=dtypes.string,\n name=\"processing_mode\")\n self._address = ops.convert_to_tensor(\n address, dtype=dtypes.string, name=\"address\")\n self._protocol = ops.convert_to_tensor(\n protocol, dtype=dtypes.string, name=\"protocol\")\n self._job_name = ops.convert_to_tensor(\n job_name, dtype=dtypes.string, name=\"job_name\")\n self._consumer_index = ops.convert_to_tensor(\n -1 if consumer_index is None else consumer_index,\n dtype=dtypes.int64,\n name=\"consumer_index\")\n self._num_consumers = ops.convert_to_tensor(\n -1 if num_consumers is None else num_consumers,\n dtype=dtypes.int64,\n name=\"num_consumers\")\n self._max_outstanding_requests = ops.convert_to_tensor(\n max_outstanding_requests,\n dtype=dtypes.int64,\n name=\"max_outstanding_requests\")\n self._element_spec = element_spec\n uncompress_func = structured_function.StructuredFunctionWrapper(\n lambda x: compression_ops.uncompress(x, output_spec=element_spec),\n transformation_name=\"DataServiceDataset.uncompress()\",\n input_structure=tensor.TensorSpec(shape=(), dtype=dtypes.variant))\n cross_trainer_cache_options = (\n cross_trainer_cache._to_proto().SerializeToString()\n if cross_trainer_cache else None)\n\n compat_kwargs = {}\n if data_transfer_protocol is not None:\n compat_kwargs[\"data_transfer_protocol\"] = data_transfer_protocol\n\n # If `uncompress` is `True`, the dataset will query the servers to find\n # out the actual compression used. It is always set to `True` the first\n # time the graph is built, and set to false when serializing, so we will\n # uncompress at most once.\n uncompress = True\n variant_tensor = gen_experimental_dataset_ops.data_service_dataset_v4(\n dataset_id=self._dataset_id,\n processing_mode=self._processing_mode,\n address=self._address,\n protocol=self._protocol,\n job_name=self._job_name,\n consumer_index=self._consumer_index,\n num_consumers=self._num_consumers,\n max_outstanding_requests=self._max_outstanding_requests,\n task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,\n iteration_counter=(\n gen_experimental_dataset_ops.dummy_iteration_counter()),\n target_workers=target_workers,\n uncompress=uncompress,\n uncompress_fn=uncompress_func.function,\n cross_trainer_cache_options=cross_trainer_cache_options,\n **compat_kwargs,\n **self._flat_structure)\n super(_DataServiceDatasetV2, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._element_spec\n\n\nclass _DataServiceDatasetV1(dataset_ops.DatasetV1Adapter):\n \"\"\"A `Dataset` that executes its input through the tf.data service.\"\"\"\n\n @functools.wraps(_DataServiceDatasetV2.__init__)\n def __init__(self, dataset_id, processing_mode, address, element_spec,\n protocol, data_transfer_protocol, job_name, consumer_index,\n num_consumers, max_outstanding_requests,\n task_refresh_interval_hint_ms, cross_trainer_cache,\n target_workers):\n\n self._wrapped = _DataServiceDatasetV2(\n dataset_id=dataset_id,\n processing_mode=processing_mode,\n address=address,\n element_spec=element_spec,\n protocol=protocol,\n data_transfer_protocol=data_transfer_protocol,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,\n cross_trainer_cache=cross_trainer_cache,\n target_workers=target_workers)\n super(_DataServiceDatasetV1, self).__init__(self._wrapped)\n\n\nif tf2.enabled():\n _DataServiceDataset = _DataServiceDatasetV2\nelse:\n _DataServiceDataset = _DataServiceDatasetV1\n\n\ndef _parse_service(service) -> tuple[str, str]:\n \"\"\"Converts a tf.data service string into a (protocol, address) tuple.\n\n Args:\n service: A string in the format \"protocol://address\" or just \"address\". If\n the string is only an address, the default protocol will be used.\n\n Returns:\n The (protocol, address) tuple\n \"\"\"\n if not isinstance(service, str):\n raise ValueError(\"`service` must be a string, but `service` was of type \"\n f\"{type(service)}. service={service}\")\n if not service:\n raise ValueError(\"`service` must not be empty\")\n parts = service.split(\"://\")\n if len(parts) == 2:\n protocol, address = parts\n elif len(parts) == 1:\n address = parts[0]\n protocol = _pywrap_utils.TF_DATA_DefaultProtocol()\n else:\n raise ValueError(\"Malformed `service` string has multiple '://': \"\n f\"{service}.\")\n # TODO(aaudibert): Considering validating reachability of address here.\n return (protocol, address)\n\n\ndef _distribute(\n processing_mode,\n service,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n task_refresh_interval_hint_ms=None,\n data_transfer_protocol=None,\n compression=\"AUTO\",\n cross_trainer_cache=None,\n target_workers=\"AUTO\",\n) -> Callable[dataset_ops.Dataset, dataset_ops.Dataset]:\n \"\"\"A transformation that moves dataset processing to the tf.data service.\n\n This transformation is similar to `distribute`, but supports additional\n parameters which we do not yet want to add to the public Python API.\n\n Args:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[://]
`, where `
` identifies the dispatcher\n address and `` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string. This argument makes it possible for multiple datasets to\n share the same job. The default behavior is that the dataset creates\n anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the\n dispatcher for task changes.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using gRPC.\n compression: How to compress the dataset's elements before transferring them\n over the network. \"AUTO\" leaves the decision of how to compress up to the\n tf.data service runtime. `None` indicates not to compress.\n cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is\n provided, dataset iteration will be shared across concurrently running\n trainers. See\n https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers\n for details.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-processs\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults to\n `\"AUTO\"`.\n\n Returns:\n Dataset: A `Dataset` of the elements produced by the data service.\n \"\"\"\n processing_mode = _get_validated_sharding_policy(processing_mode)\n _validate_compression(compression)\n\n def _apply_fn(dataset) -> dataset_ops.Dataset: # pylint: disable=missing-docstring\n dataset_id = _register_dataset(service, dataset, compression=compression)\n return _from_dataset_id(\n processing_mode,\n service,\n dataset_id,\n dataset.element_spec,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,\n data_transfer_protocol=data_transfer_protocol,\n cross_trainer_cache=cross_trainer_cache,\n target_workers=target_workers)\n\n return _apply_fn\n\n\n@tf_export(\"data.experimental.service.distribute\")\ndef distribute(\n processing_mode,\n service,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n data_transfer_protocol=None,\n compression=\"AUTO\",\n cross_trainer_cache=None,\n target_workers=\"AUTO\",\n) -> Callable[dataset_ops.Dataset, dataset_ops.Dataset]:\n \"\"\"A transformation that moves dataset processing to the tf.data service.\n\n When you iterate over a dataset containing the `distribute` transformation,\n the tf.data service creates a \"job\" which produces data for the dataset\n iteration.\n\n The tf.data service uses a cluster of workers to prepare data for training\n your model.\n The `processing_mode` argument to `tf.data.experimental.service.distribute`\n describes how to leverage multiple workers to process the input dataset.\n Currently, there are two processing modes to choose from: \"distributed_epoch\"\n and \"parallel_epochs\".\n\n \"distributed_epoch\" means that the dataset will be split across all tf.data\n service workers.\n The dispatcher produces \"splits\" for the dataset and sends them to workers for\n further processing. For example, if a dataset begins with a list of filenames,\n the dispatcher will iterate through the filenames and send the filenames to\n tf.data workers, which will perform the rest of the dataset transformations on\n those files. \"distributed_epoch\" is useful when your model needs to see each\n element of the dataset exactly once, or if it needs to see the data in a\n generally-sequential order. \"distributed_epoch\" only works for datasets with\n splittable sources, such as `Dataset.from_tensor_slices`,\n `Dataset.list_files`, or `Dataset.range`.\n\n \"parallel_epochs\" means that the entire input dataset will be processed\n independently by each of the tf.data service workers.\n For this reason, it is important to shuffle data (e.g. filenames)\n non-deterministically, so that each worker will process the elements of the\n dataset in a different order. \"parallel_epochs\" can be used to distribute\n datasets that aren't splittable.\n\n With two workers, \"parallel_epochs\" will produce every element of the dataset\n twice:\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> # Start two workers\n >>> workers = [\n ... tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address)) for _ in range(2)\n ... ]\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.apply(tf.data.experimental.service.distribute(\n ... processing_mode=\"parallel_epochs\", service=dispatcher.target))\n >>> print(sorted(list(dataset.as_numpy_iterator())))\n [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9]\n\n \"distributed_epoch\", on the other hand, will still produce each element once:\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> workers = [\n ... tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address)) for _ in range(2)\n ... ]\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.apply(tf.data.experimental.service.distribute(\n ... processing_mode=\"distributed_epoch\", service=dispatcher.target))\n >>> print(sorted(list(dataset.as_numpy_iterator())))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n When using `apply(tf.data.experimental.service.distribute(...))`, the dataset\n before the `apply` transformation executes within the tf.data service, while\n the operations after `apply` happen within the local process.\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> workers = [\n ... tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address)) for _ in range(2)\n ... ]\n >>> dataset = tf.data.Dataset.range(5)\n >>> dataset = dataset.map(lambda x: x*x)\n >>> dataset = dataset.apply(\n ... tf.data.experimental.service.distribute(\"parallel_epochs\",\n ... dispatcher.target))\n >>> dataset = dataset.map(lambda x: x+1)\n >>> print(sorted(list(dataset.as_numpy_iterator())))\n [1, 1, 2, 2, 5, 5, 10, 10, 17, 17]\n\n In the above example, the dataset operations (before applying the `distribute`\n function on the elements) will be executed on the tf.data workers,\n and the elements are provided over RPC. The remaining transformations\n (after the call to `distribute`) will be executed locally. The dispatcher\n and the workers will bind to usused free ports (which are chosen at random),\n in order to communicate with each other. However, to bind them to specific\n ports, the `port` parameter can be passed.\n\n The `job_name` argument allows jobs to be shared across multiple\n datasets. Instead of each dataset creating its own job, all\n datasets with the same `job_name` will consume from the same job. A new job\n will be created for each iteration of the dataset (with each repetition of\n `Dataset.repeat` counting as a new iteration). Suppose the `DispatchServer`\n is serving on `localhost:5000` and two training workers (in either a single\n client or multi-client setup) iterate over the below dataset, and there is a\n single tf.data worker:\n\n ```\n range5_dataset = tf.data.Dataset.range(5)\n dataset = range5_dataset.apply(tf.data.experimental.service.distribute(\n \"parallel_epochs\", \"localhost:5000\", job_name=\"my_job_name\"))\n for iteration in range(3):\n print(list(dataset))\n ```\n\n The elements of each job will be split between the two processes, with\n elements being consumed by the processes on a first-come first-served basis.\n One possible result is that process 1 prints\n\n ```\n [0, 2, 4]\n [0, 1, 3]\n [1]\n ```\n\n and process 2 prints\n\n ```\n [1, 3]\n [2, 4]\n [0, 2, 3, 4]\n ```\n\n Job names must not be re-used across different training jobs within the\n lifetime of the tf.data service. In general, the tf.data service is expected\n to live for the duration of a single training job.\n To use the tf.data service with multiple training jobs, make sure to use\n different job names to avoid conflicts. For example, suppose a training job\n calls `distribute` with `job_name=\"job\"` and reads until end of input. If\n another independent job connects to the same tf.data service and tries to read\n from `job_name=\"job\"`, it will immediately receive end of input, without\n getting any data.\n\n **Coordinated data read**\n\n By default, when multiple consumers read from the same job, they receive data\n on a first-come first-served basis. In some use cases, it is advantageous to\n coordinate the consumers. At each step, consumers read data from the same\n worker.\n\n For example, the tf.data service can be used to coordinate example sizes\n across a cluster during synchronous training, so that during each step all\n replicas train on similar-sized elements. To achieve this, define a dataset\n which generates rounds of `num_consumers` consecutive similar-sized batches,\n then enable coordinated reads by setting `consumer_index` and `num_consumers`.\n\n NOTE: To keep consumers in sync, round robin data consumption requires that\n the dataset have infinite cardinality. You can get this by adding `.repeat()`\n at the end of the dataset definition.\n\n **Keras and Distribution Strategies**\n\n The dataset produced by the `distribute` transformation can be passed to\n Keras' `Model.fit` or Distribution Strategy's\n `tf.distribute.Strategy.experimental_distribute_dataset` like any other\n `tf.data.Dataset`. We recommend setting a `job_name` on the call to\n `distribute` so that if there are multiple workers, they read data from the\n same job. Note that the autosharding normally performed by\n `experimental_distribute_dataset` will be disabled when setting a `job_name`,\n since sharing the job already results in splitting data across the workers.\n When using a shared job, data will be dynamically balanced across workers, so\n that they reach end of input about the same time. This results in better\n worker utilization than with autosharding, where each worker processes an\n independent set of files, and some workers may run out of data earlier than\n others.\n\n Args:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[://]
`, where `
` identifies the dispatcher\n address and `` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string. This argument makes it possible for multiple datasets to\n share the same job. The default behavior is that the dataset creates\n anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using gRPC.\n compression: How to compress the dataset's elements before transferring them\n over the network. \"AUTO\" leaves the decision of how to compress up to the\n tf.data service runtime. `None` indicates not to compress.\n cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is\n provided, dataset iteration will be shared across concurrently running\n trainers. See\n https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers\n for details.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-processs\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults to\n `\"AUTO\"`.\n\n Returns:\n Dataset: A `Dataset` of the elements produced by the data service.\n \"\"\"\n _validate_job_name(job_name)\n return _distribute(\n processing_mode=processing_mode,\n service=service,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n data_transfer_protocol=data_transfer_protocol,\n compression=compression,\n cross_trainer_cache=cross_trainer_cache,\n target_workers=target_workers)\n\n\ndef _register_dataset(\n service, dataset, compression, dataset_id=None) -> tensor.Tensor:\n \"\"\"Registers a dataset with the tf.data service.\n\n This transformation is similar to `register_dataset`, but supports additional\n parameters which we do not yet want to add to the public Python API.\n\n Args:\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[://]
`, where `
` identifies the dispatcher\n address and `` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n dataset: A `tf.data.Dataset` to register with the tf.data service.\n compression: How to compress the dataset's elements before transferring them\n over the network. \"AUTO\" leaves the decision of how to compress up to the\n tf.data service runtime. `None` indicates not to compress.\n dataset_id: (Optional.) By default, tf.data service generates a unique\n (string) ID for each registered dataset. If a `dataset_id` is provided, it\n will use the specified ID. If a dataset with a matching ID already exists,\n no new dataset is registered. This is useful if multiple training jobs\n want to (re)use the same dataset for training. In this case, they can\n register the dataset with the same dataset ID.\n\n Returns:\n A scalar string tensor representing the dataset ID.\n \"\"\"\n _validate_compression(compression)\n\n if isinstance(service, tuple):\n protocol, address = service\n else:\n protocol, address = _parse_service(service)\n external_state_policy = dataset.options().experimental_external_state_policy\n if external_state_policy is None:\n external_state_policy = ExternalStatePolicy.WARN\n\n encoded_spec = None\n if context.executing_eagerly():\n encoded_spec = nested_structure_coder.encode_structure(\n dataset.element_spec).SerializeToString()\n\n if compression == COMPRESSION_AUTO:\n dataset = dataset.map(\n lambda *x: compression_ops.compress(x),\n num_parallel_calls=dataset_ops.AUTOTUNE)\n dataset = dataset._apply_debug_options() # pylint: disable=protected-access\n\n metadata = data_service_pb2.DataServiceMetadata(\n element_spec=encoded_spec,\n compression=_get_compression_proto(compression))\n\n return gen_experimental_dataset_ops.register_dataset_v2(\n dataset._variant_tensor, # pylint: disable=protected-access\n address=address,\n protocol=protocol,\n external_state_policy=external_state_policy.value,\n requested_dataset_id=dataset_id,\n metadata=metadata.SerializeToString())\n\n\n@tf_export(\"data.experimental.service.register_dataset\")\ndef register_dataset(\n service, dataset, compression=\"AUTO\", dataset_id=None) -> tensor.Tensor:\n \"\"\"Registers a dataset with the tf.data service.\n\n `register_dataset` registers a dataset with the tf.data service so that\n datasets can be created later with\n `tf.data.experimental.service.from_dataset_id`. This is useful when the\n dataset\n is registered by one process, then used in another process. When the same\n process is both registering and reading from the dataset, it is simpler to use\n `tf.data.experimental.service.distribute` instead.\n\n If the dataset is already registered with the tf.data service,\n `register_dataset` returns the already-registered dataset's id.\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> worker = tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address))\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset_id = tf.data.experimental.service.register_dataset(\n ... dispatcher.target, dataset)\n >>> dataset = tf.data.experimental.service.from_dataset_id(\n ... processing_mode=\"parallel_epochs\",\n ... service=dispatcher.target,\n ... dataset_id=dataset_id,\n ... element_spec=dataset.element_spec)\n >>> print(list(dataset.as_numpy_iterator()))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n Args:\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[://]
`, where `
` identifies the dispatcher\n address and `` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n dataset: A `tf.data.Dataset` to register with the tf.data service.\n compression: (Optional.) How to compress the dataset's elements before\n transferring them over the network. \"AUTO\" leaves the decision of how to\n compress up to the tf.data service runtime. `None` indicates not to\n compress.\n dataset_id: (Optional.) By default, tf.data service generates a unique\n (string) ID for each registered dataset. If a `dataset_id` is provided, it\n will use the specified ID. If a dataset with a matching ID already exists,\n no new dataset is registered. This is useful if multiple training jobs\n want to (re)use the same dataset for training. In this case, they can\n register the dataset with the same dataset ID.\n\n Returns:\n A scalar string tensor representing the dataset ID.\n \"\"\"\n return _register_dataset(service, dataset, compression, dataset_id)\n\n\ndef _from_dataset_id(processing_mode,\n service,\n dataset_id,\n element_spec,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n task_refresh_interval_hint_ms=None,\n data_transfer_protocol=None,\n cross_trainer_cache=None,\n target_workers=\"AUTO\") -> dataset_ops.Dataset:\n \"\"\"Creates a dataset which reads data from the tf.data service.\n\n This transformation is similar to `from_dataset_id`, but supports additional\n parameters which we do not yet want to add to the public Python API.\n\n Args:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[://]
`, where `
` identifies the dispatcher\n address and `` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n dataset_id: The id of the dataset to read from. This id is returned by\n `register_dataset` when the dataset is registered with the tf.data\n service.\n element_spec: A nested structure of `tf.TypeSpec`s representing the type of\n elements produced by the dataset. This argument is only required inside a\n tf.function. Use `tf.data.Dataset.element_spec` to get the element spec\n for a given dataset.\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string or tensor. This argument makes it possible for multiple\n datasets to share the same job. The default behavior is that the dataset\n creates anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the\n dispatcher for task changes.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using gRPC.\n cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is\n provided, dataset iteration will be shared across concurrently running\n trainers. See\n https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers\n for details.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-processs\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults to\n `\"AUTO\"`.\n\n Returns:\n A `tf.data.Dataset` which reads from the tf.data service.\n \"\"\"\n def _get_element_spec():\n \"\"\"Fetches the element spec from the server.\"\"\"\n data_service_metadata = None\n dataset_id_val = tensor_util.constant_value(dataset_id)\n try:\n data_service_metadata = (\n _pywrap_server_lib.TF_DATA_GetDataServiceMetadataByID(\n dataset_id_val, address, protocol\n )\n )\n except NotImplementedError as err:\n raise ValueError(\n \"The tf.data service is running an earlier version of TensorFlow \"\n \"that requires specifying `element_spec` as an argument to \"\n \"`from_dataset_id`. Please either supply an element spec or update \"\n \"the tf.data service to the latest version.\") from err\n except RuntimeError:\n # This error results from dataset ID not found. A more appropriate error\n # will be raised when the dataset is created.\n pass\n\n if not data_service_metadata or not data_service_metadata.element_spec:\n dataset_id_val = tensor_util.constant_value(dataset_id)\n raise ValueError(\n f\"Failed to fetch element spec for dataset id {dataset_id_val} from \"\n \"tf.data service. If the dataset was registered in graph mode or \"\n \"inside a tf.function, the `element_spec` must be specified as an \"\n \"argument to `from_dataset_id`.\")\n\n struct_pb = nested_structure_coder.struct_pb2.StructuredValue()\n struct_pb.ParseFromString(data_service_metadata.element_spec)\n return nested_structure_coder.decode_proto(struct_pb)\n\n processing_mode = _get_validated_sharding_policy(processing_mode)\n if isinstance(service, tuple):\n protocol, address = service\n else:\n protocol, address = _parse_service(service)\n if job_name is not None:\n if not isinstance(job_name, str) and not isinstance(\n job_name, tensor.Tensor):\n raise ValueError(\n \"`job_name` must be a string or Tensor, but `job_name` was of type \"\n f\"{type(job_name)}. job_name={job_name}.\")\n\n if not element_spec:\n if not context.executing_eagerly():\n raise ValueError(\n \"In graph mode `element_spec` must be provided manually.\")\n element_spec = _get_element_spec()\n\n dataset = _DataServiceDataset(\n dataset_id=dataset_id,\n processing_mode=processing_mode,\n address=address,\n element_spec=element_spec,\n protocol=protocol,\n data_transfer_protocol=data_transfer_protocol,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,\n cross_trainer_cache=cross_trainer_cache,\n target_workers=target_workers)\n\n # Disable autosharding for shared jobs.\n if job_name is not None:\n options = options_lib.Options()\n options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF\n dataset = dataset.with_options(options)\n return dataset\n\n\n@tf_export(\"data.experimental.service.from_dataset_id\")\ndef from_dataset_id(processing_mode,\n service,\n dataset_id,\n element_spec=None,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n data_transfer_protocol=None,\n cross_trainer_cache=None,\n target_workers=\"AUTO\") -> dataset_ops.Dataset:\n \"\"\"Creates a dataset which reads data from the tf.data service.\n\n This is useful when the dataset is registered by one process, then used in\n another process. When the same process is both registering and reading from\n the dataset, it is simpler to use `tf.data.experimental.service.distribute`\n instead.\n\n Before using `from_dataset_id`, the dataset must have been registered with the\n tf.data service using `tf.data.experimental.service.register_dataset`.\n `register_dataset` returns a dataset id for the registered dataset. That is\n the `dataset_id` which should be passed to `from_dataset_id`.\n\n The `element_spec` argument indicates the `tf.TypeSpec`s for the elements\n produced by the dataset. Currently `element_spec` must be explicitly\n specified, and match the dataset registered under `dataset_id`. `element_spec`\n defaults to `None` so that in the future we can support automatically\n discovering the `element_spec` by querying the tf.data service.\n\n `tf.data.experimental.service.distribute` is a convenience method which\n combines `register_dataset` and `from_dataset_id` into a dataset\n transformation.\n See the documentation for `tf.data.experimental.service.distribute` for more\n detail about how `from_dataset_id` works.\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> worker = tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address))\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset_id = tf.data.experimental.service.register_dataset(\n ... dispatcher.target, dataset)\n >>> dataset = tf.data.experimental.service.from_dataset_id(\n ... processing_mode=\"parallel_epochs\",\n ... service=dispatcher.target,\n ... dataset_id=dataset_id,\n ... element_spec=dataset.element_spec)\n >>> print(list(dataset.as_numpy_iterator()))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n Args:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[://]
`, where `
` identifies the dispatcher\n address and `` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n dataset_id: The id of the dataset to read from. This id is returned by\n `register_dataset` when the dataset is registered with the tf.data\n service.\n element_spec: A nested structure of `tf.TypeSpec`s representing the type of\n elements produced by the dataset. This argument is only required inside a\n tf.function. Use `tf.data.Dataset.element_spec` to get the element spec\n for a given dataset.\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string. This argument makes it possible for multiple datasets to\n share the same job. The default behavior is that the dataset creates\n anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using gRPC.\n cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is\n provided, dataset iteration will be shared across concurrently running\n trainers. See\n https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers\n for details.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-processs\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults to\n `\"AUTO\"`.\n\n Returns:\n A `tf.data.Dataset` which reads from the tf.data service.\n \"\"\"\n _validate_job_name(job_name)\n if job_name is not None:\n job_name = string_ops.string_join(\n [\"dataset_id=\", _to_string(dataset_id), job_name], \"/\")\n\n return _from_dataset_id(\n processing_mode=processing_mode,\n service=service,\n dataset_id=dataset_id,\n element_spec=element_spec,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n data_transfer_protocol=data_transfer_protocol,\n cross_trainer_cache=cross_trainer_cache,\n target_workers=target_workers)\n", "output": ["_validate_job_name", "_distribute", "_parse_service", "_from_dataset_id", "_register_dataset", "_get_validated_sharding_policy", "_to_string", "_to_tensor", "_validate_compression", "_get_compression_proto", "_DataServiceDatasetV2", "ShardingPolicy", "CrossTrainerCache", "_DataServiceDatasetV1"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/data/experimental/ops/data_service_ops.py", "file_length": 15781, "symbol_dict": [{"symbol": "_from_dataset_id", "type": "mannual_defined_function", "byte_location": 41839, "location": 11873}, {"symbol": "_parse_service", "type": "mannual_defined_function", "byte_location": 17983, "location": 5236}, {"symbol": "_to_string", "type": "mannual_defined_function", "byte_location": 8739, "location": 2593}, {"symbol": "_to_tensor", "type": "mannual_defined_function", "byte_location": 8334, "location": 2450}, {"symbol": "_register_dataset", "type": "mannual_defined_function", "byte_location": 36392, "location": 10379}, {"symbol": "_distribute", "type": "mannual_defined_function", "byte_location": 18964, "location": 5523}, {"symbol": "_get_compression_proto", "type": "mannual_defined_function", "byte_location": 7879, "location": 2304}, {"symbol": "_get_validated_sharding_policy", "type": "mannual_defined_function", "byte_location": 6633, "location": 1908}, {"symbol": "_validate_job_name", "type": "mannual_defined_function", "byte_location": 7262, "location": 2101}, {"symbol": "_validate_compression", "type": "mannual_defined_function", "byte_location": 7591, "location": 2214}, {"symbol": "_DataServiceDatasetV1", "type": "mannual_defined_class", "byte_location": 16736, "location": 4850}, {"symbol": "ShardingPolicy", "type": "mannual_defined_class", "byte_location": 2073, "location": 562}, {"symbol": "_DataServiceDatasetV2", "type": "mannual_defined_class", "byte_location": 9068, "location": 2703}, {"symbol": "CrossTrainerCache", "type": "mannual_defined_class", "byte_location": 5273, "location": 1471}]}} {"input": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport functools\nimport gc\nimport weakref\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python import pywrap_tfe\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import forwardprop\nfrom tensorflow.python.eager import forwardprop_util\nfrom tensorflow.python.eager import record\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import array_ops_stack\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import gradient_checker_v2\nfrom tensorflow.python.ops import map_fn\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_impl\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.parallel_for import control_flow_ops\nfrom tensorflow.python.ops.unconnected_gradients import UnconnectedGradients\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import nest\n\n_X11_35_DERIVATIVES = [\n 1.1**3.5, 3.5 * 1.1**2.5, 3.5 * 2.5 * 1.1**1.5, 3.5 * 2.5 * 1.5 * 1.1**0.5\n]\n\n\n# TODO(allenl): Move this somewhere useful once forward gradients are stable.\ndef _jvp(f, primals, tangents):\n \"\"\"Compute the jacobian of `f` at `primals` multiplied by `tangents`.\"\"\"\n with forwardprop.ForwardAccumulator(primals, tangents) as acc:\n primals_out = f(*primals)\n return primals_out, acc.jvp(\n primals_out, unconnected_gradients=UnconnectedGradients.ZERO)\n\n\ndef _jacfwd(f, primals):\n \"\"\"Compute the jacobian of `f` at `primals` using forward-mode autodiff.\"\"\"\n jac_flat = []\n flat_primals = nest.flatten(primals)\n tangent_mask = [\n array_ops.zeros_like(primal, dtype=primal.dtype)\n for primal in flat_primals\n ]\n for primal_index, primal in enumerate(flat_primals):\n primal_vector = array_ops.reshape(primal, [-1])\n primal_vector_length = array_ops.size(primal_vector)\n jac_columns = []\n for element_index in math_ops.range(primal_vector_length):\n mask = array_ops.one_hot(\n element_index, primal_vector_length, dtype=primal.dtype)\n tangent_mask[primal_index] = array_ops.reshape(mask,\n array_ops.shape(primal))\n jac_columns.append(\n nest.map_structure(\n functools.partial(array_ops.reshape, shape=[-1]),\n _jvp(f, primals, nest.pack_sequence_as(primals,\n tangent_mask))[1]))\n jac_flat.append(array_ops_stack.stack(jac_columns, axis=1))\n tangent_mask[primal_index] = array_ops.zeros_like(primal)\n return nest.pack_sequence_as(primals, jac_flat)\n\n\ndef _jvp_batch(f, primal, tangents):\n tf_function = def_function.function(f)\n\n return control_flow_ops.vectorized_map(\n functools.partial(_jvp, tf_function, primal), tangents)\n\n\ndef _jvp_batch_matmul(f, primals, tangent_batch):\n \"\"\"Compute the jacobian of `f` at `primals` multiplied by `tangents`.\"\"\"\n jac_fwd = _jacfwd(f, primals)\n\n def jac_mul(tangent):\n flat_tangent = array_ops.reshape(tangent, shape=[-1])\n tangent_vector = array_ops.expand_dims(flat_tangent, 1)\n jvp_vector = math_ops.matmul(jac_fwd, tangent_vector)\n return array_ops.reshape(jvp_vector, tangent.shape)\n\n return control_flow_ops.vectorized_map(jac_mul, tangent_batch)\n\n\ndef _grad(f, argnums=0):\n \"\"\"Return a function which computes the gradient of `f`.\"\"\"\n\n def _f(*params):\n with backprop.GradientTape() as tape:\n tape.watch(params)\n primals_out = f(*params)\n return tape.gradient(\n primals_out,\n params[argnums],\n unconnected_gradients=UnconnectedGradients.ZERO)\n\n return _f\n\n\ndef _gradfwd(f, argnums=0, f_out_dtypes=dtypes.float32):\n \"\"\"Return a function which computes the gradient of `f` in forward mode.\"\"\"\n\n def _f(*params):\n\n def _single_jvp(param_mask):\n with forwardprop.ForwardAccumulator(\n primals=[params[argnums]], tangents=param_mask) as acc:\n primals_out = f(*params)\n return acc.jvp(primals_out)\n\n # Building up a function to run with pfor takes a bit too long since we're\n # only running it a handful of times.\n return _vectorize_parameters(\n _single_jvp, [params[argnums]], use_pfor=False, dtype=f_out_dtypes)\n\n return _f\n\n\ndef _hvp(f, primals, tangents):\n \"\"\"Compute a forward-over-back Hessian-vector product.\"\"\"\n with forwardprop.ForwardAccumulator(primals, tangents) as acc:\n with backprop.GradientTape() as tape:\n tape.watch(primals)\n f_out = f(*primals)\n f_out.shape.assert_is_compatible_with([])\n return acc.jvp(tape.gradient(f_out, primals))\n\n\ndef _vectorize_parameters(f, params, use_pfor, dtype):\n \"\"\"Loop over `params`, providing a one-hot mask to `f` for each.\"\"\"\n parameter_sizes = [array_ops.size(param) for param in params]\n total_size = math_ops.add_n(parameter_sizes)\n\n def _wrapper(index):\n full_onehot = array_ops.one_hot(index, total_size)\n split_onehot = array_ops.split(full_onehot, parameter_sizes)\n tangents = [\n array_ops.reshape(v, array_ops.shape(param))\n for param, v in zip(params, split_onehot)\n ]\n return f(tangents)\n\n if use_pfor:\n return control_flow_ops.vectorized_map(_wrapper, math_ops.range(total_size))\n\n return map_fn.map_fn(_wrapper, math_ops.range(total_size), dtype)\n\n\ndef _forward_over_back_hessian(f, params, use_pfor, dtype=None):\n \"\"\"Computes the full Hessian matrix for the scalar-valued f(*params).\n\n Args:\n f: A function taking `params` and returning a scalar.\n params: A possibly nested structure of tensors.\n use_pfor: If true, uses `tf.vectorized_map` calls instead of looping.\n dtype: Required if `use_pfor=False`. A possibly nested structure of dtypes\n (e.g. `tf.float32`) matching the structure of `f`'s returns.\n\n Returns:\n A possibly nested structure of matrix slices corresponding to `params`. Each\n slice has shape [P, p_s] where `p_s` is the number of parameters (`tf.size`)\n in the corresponding element of `params` and `P` is the total number of\n parameters (`sum_s(p_s)`). The full matrix can be obtained by concatenating\n along the second axis.\n \"\"\"\n return _vectorize_parameters(\n functools.partial(_hvp, f, params),\n params,\n use_pfor=use_pfor,\n dtype=dtype)\n\n\ndef _test_gradients(testcase,\n f,\n primals,\n order,\n delta=1e-3,\n rtol=1e-2,\n atol=1e-6,\n srtol=1e-6,\n satol=1e-6):\n \"\"\"Tests forward/backward jacobians of `f`'s [0, `order`)-order gradients.\"\"\"\n if order < 1:\n raise ValueError(\n \"`order` should be a positive integer, got '{}'.\".format(order))\n if order > 1:\n _test_gradients(\n testcase=testcase,\n f=_grad(f),\n primals=primals,\n order=order - 1,\n delta=delta,\n rtol=rtol,\n atol=atol,\n srtol=srtol,\n satol=satol)\n sym_jac_back, num_jac = gradient_checker_v2.compute_gradient(\n f, primals, delta=delta)\n testcase.assertAllClose(num_jac, sym_jac_back, rtol=rtol, atol=atol)\n sym_jac_fwd = _jacfwd(f, primals)\n testcase.assertAllClose(num_jac, sym_jac_fwd, rtol=rtol, atol=atol)\n # And the symbolic computations should be much closer.\n testcase.assertAllClose(sym_jac_back, sym_jac_fwd, rtol=srtol, atol=satol)\n\n\n@test_util.with_eager_op_as_function\nclass ForwardpropTest(test.TestCase, parameterized.TestCase):\n\n def testJVPFunction(self):\n add_outputs = (constant_op.constant(4.),)\n vp, = forwardprop._jvp_dispatch(\n op_name=\"Add\",\n attr_tuple=(),\n inputs=(constant_op.constant(1.), constant_op.constant(3.)),\n outputs=add_outputs,\n tangents=(\n constant_op.constant(1.),\n constant_op.constant(5.),\n ))\n self.assertAllClose(1. + 5., self.evaluate(vp))\n\n mul_outputs = (constant_op.constant([20.]),)\n vp, = forwardprop._jvp_dispatch(\n op_name=\"Mul\",\n attr_tuple=(),\n inputs=(constant_op.constant([4.]), constant_op.constant([5.])),\n outputs=mul_outputs,\n tangents=(\n constant_op.constant([2.]),\n constant_op.constant([3.]),\n ))\n self.assertAllClose([2. * 5. + 3. * 4.], self.evaluate(vp))\n\n def testJVPFunctionWithBatchOfTangents(self):\n add_outputs = (constant_op.constant(4.),)\n jvp_flat = forwardprop._jvp_dispatch(\n op_name=\"Add\",\n attr_tuple=(),\n inputs=(constant_op.constant(1.), constant_op.constant(3.)),\n outputs=add_outputs,\n tangents=(\n constant_op.constant([1., 2., 3.]),\n constant_op.constant([4., 5., 6.]),\n ),\n use_batch=True)\n\n # Using evaluate and asserting with just a list works too\n # but the output is more explicit this way\n self.assertAllClose([constant_op.constant([1. + 4., 2. + 5., 3. + 6.])],\n jvp_flat)\n\n mul_outputs = (constant_op.constant([20.]),)\n jvp_flat = forwardprop._jvp_dispatch(\n op_name=\"Mul\",\n attr_tuple=(),\n inputs=(constant_op.constant([4.]), constant_op.constant([5.])),\n outputs=mul_outputs,\n tangents=(\n constant_op.constant([[1.], [0.], [1.]]),\n constant_op.constant([[0.], [1.], [1.]]),\n ),\n use_batch=True)\n self.assertAllClose([constant_op.constant([[5.], [4.], [5. + 4.]])],\n jvp_flat)\n\n def testJVPFunctionRaisesError(self):\n sum_outputs = (constant_op.constant(6.),)\n\n with self.assertRaisesRegex(ValueError, r\".*was expected to be of shape*\"):\n forwardprop._jvp_dispatch(\n op_name=\"Add\",\n attr_tuple=(),\n inputs=(constant_op.constant(2.), constant_op.constant(4.)),\n outputs=sum_outputs,\n tangents=(constant_op.constant([1., 2.]),\n constant_op.constant([[1.], [2.]])),\n use_batch=True)\n\n def testNonDifferentiableOpWithInputTangent(self):\n x = constant_op.constant(1.)\n with forwardprop.ForwardAccumulator(x, 2.) as acc1:\n with forwardprop.ForwardAccumulator(x, 2.) as acc2:\n y = array_ops.zeros_like(x)\n self.assertIsNone(acc1.jvp(y))\n self.assertIsNone(acc2.jvp(y))\n\n def testRunFunctionsEagerly(self):\n try:\n original_setting = def_function.functions_run_eagerly()\n def_function.run_functions_eagerly(True)\n x = constant_op.constant(1.)\n with forwardprop.ForwardAccumulator(x, 2.) as acc:\n y = x * 3.\n self.assertAllClose(6., acc.jvp(y))\n finally:\n def_function.run_functions_eagerly(original_setting)\n\n def testJVPFunctionUsedByAccumulatorForOps(self):\n previous_fn = forwardprop._jvp_dispatch\n try:\n x = constant_op.constant(1.)\n with forwardprop.ForwardAccumulator(x, 2.) as acc:\n y = x + x\n pywrap_tfe.TFE_Py_RegisterJVPFunction(\n lambda *args, **kwargs: [constant_op.constant(-15.)])\n z = x + x\n self.assertAllClose(4., acc.jvp(y))\n self.assertAllClose(-15., acc.jvp(z))\n finally:\n pywrap_tfe.TFE_Py_RegisterJVPFunction(previous_fn)\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testFunctionCacheLimited(self):\n # Every time this loop is executed, it will create a slightly larger Tensor\n # and push it through Add's gradient.\n # We run TRACE_COUNT_LIMIT x 2 so that it is tested with both\n # experimental_relax_shapes on and off.\n for execution_count in range(forwardprop._TRACE_COUNT_LIMIT*2):\n x = array_ops.zeros([execution_count])\n with forwardprop.ForwardAccumulator(x, array_ops.ones_like(x)) as acc:\n y = x + x\n self.assertAllClose(2. * array_ops.ones_like(x), acc.jvp(y))\n\n def testVariableUnwatchedZero(self):\n v = variables.Variable([[1.]])\n x = constant_op.constant(1.)\n xt = constant_op.constant(2.)\n with forwardprop.ForwardAccumulator(x, xt) as acc:\n pass\n self.assertIsNone(acc.jvp(v))\n self.assertAllClose([[0.]], acc.jvp(v, unconnected_gradients=\"zero\"))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testFunctionReturnsResource(self):\n v = variables.Variable([[1.]])\n x = constant_op.constant(1.)\n xt = constant_op.constant(2.)\n\n @def_function.function\n def f(a):\n return a, v.handle\n\n with forwardprop.ForwardAccumulator(x, xt) as acc:\n y, _ = f(x)\n self.assertAllClose(2., acc.jvp(y))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testMultipleWatchesAdd(self):\n x = constant_op.constant(-2.)\n with self.assertRaisesRegex(ValueError, \"multiple times\"):\n with forwardprop.ForwardAccumulator([x, x], [1., 2.]):\n pass\n with forwardprop.ForwardAccumulator([x], [3.]) as acc:\n self.assertAllClose(3., acc.jvp(x))\n acc._watch(x, constant_op.constant(10.))\n self.assertAllClose(13., acc.jvp(x))\n acc._watch(x, constant_op.constant(11.))\n self.assertAllClose(24., acc.jvp(x))\n y = constant_op.constant(3.) * x\n self.assertAllClose(24., acc.jvp(x))\n self.assertAllClose(24. * 3., acc.jvp(y))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testReenter(self):\n x = constant_op.constant(-2.)\n with forwardprop.ForwardAccumulator(x, 1.5) as acc:\n self.assertAllClose(1.5, acc.jvp(x))\n y = 4. * x\n self.assertAllClose(6., acc.jvp(y))\n with self.assertRaisesRegex(ValueError, \"already recording\"):\n with acc:\n pass\n z = 4. * x\n self.assertIsNone(acc.jvp(z))\n with acc:\n yy = y * y\n self.assertAllClose(6. * -8. * 2., acc.jvp(yy))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testDeadTensorsJVPCleared(self):\n x = array_ops.ones([100])\n x_weak = weakref.ref(x)\n grad_tensor = constant_op.constant(array_ops.zeros([100]))\n grad_tensor_weak = weakref.ref(grad_tensor)\n with forwardprop.ForwardAccumulator(x, grad_tensor) as acc:\n derived_tensor = constant_op.constant(2.) * x\n del grad_tensor\n self.assertAllClose(array_ops.zeros([100]), acc.jvp(x))\n del x\n self.assertIsNone(x_weak())\n self.assertIsNone(grad_tensor_weak())\n derived_tensor_weak = weakref.ref(derived_tensor)\n derived_tensor_grad = acc.jvp(derived_tensor)\n derived_tensor_grad_weak = weakref.ref(derived_tensor_grad)\n del derived_tensor\n del derived_tensor_grad\n self.assertIsNone(derived_tensor_weak())\n self.assertIsNone(derived_tensor_grad_weak())\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testJVPManual(self):\n primal, tangent = _jvp(math_ops.sin, (constant_op.constant(0.1),),\n (constant_op.constant(0.2),))\n self.assertAllClose(math_ops.sin(0.1), primal)\n self.assertAllClose(math_ops.cos(0.1) * 0.2, tangent)\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testNumericHigherOrder(self):\n\n def f(x):\n pointwise = math_ops.sin(x) * math_ops.tan(x)\n return math_ops.reduce_prod(\n pointwise + math_ops.reduce_sum(pointwise), axis=1)\n\n _test_gradients(\n self,\n f,\n [constant_op.constant([[2.0, 3.0], [1.0, 4.0]])],\n order=3,\n srtol=1e-6,\n satol=1e-3,\n )\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testNumericHigherOrderFloat64(self):\n\n def f(x):\n pointwise = math_ops.sin(x) * math_ops.tan(x)\n return math_ops.reduce_prod(\n pointwise + math_ops.reduce_sum(pointwise), axis=1)\n\n _test_gradients(\n self,\n f,\n [constant_op.constant([[2.0, 3.0], [1.0, 4.0]], dtype=dtypes.float64)],\n order=3)\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testCustomGradient(self):\n\n @custom_gradient.custom_gradient\n def f(x):\n\n def grad(dy):\n return dy * math_ops.cos(x)\n\n return np.sin(x.numpy()), grad\n\n _test_gradients(self, f, [constant_op.constant([1., 2.])], order=3)\n\n # TODO(allenl): investigate why assert_no_new_pyobjects_executing_eagerly()\n # fails around this test?\n def testExceptionCustomGradientRecomputeGradForward(self):\n\n @custom_gradient.recompute_grad\n def f(x):\n return math_ops.reduce_prod(math_ops.tanh(x)**2)\n\n with self.assertRaisesRegex(NotImplementedError,\n \"recompute_grad tried to transpose\"):\n primals = [constant_op.constant([1.])]\n sym_jac_fwd = _jacfwd(f, primals)\n\n def testExceptionInCustomGradientNotSwallowed(self):\n\n @custom_gradient.custom_gradient\n def f(unused_x):\n\n def grad(unused_dy):\n raise ValueError(\"test_error_string\")\n\n return 1., grad\n\n c = constant_op.constant(1.)\n d = constant_op.constant(2.)\n with forwardprop.ForwardAccumulator(c, d):\n with self.assertRaisesRegex(ValueError, \"test_error_string\"):\n f(c)\n\n @parameterized.named_parameters([(\"EluM5\", -0.5, nn_ops.elu),\n (\"EluP5\", [0.5], nn_ops.elu),\n (\"SwishP5\", 0.5, nn_impl.swish),\n (\"SwishM5\", [-0.5], nn_impl.swish)])\n def testElementwiseNNOps(self, value, op_fn):\n _test_gradients(self, op_fn, [constant_op.constant(value)], order=3)\n\n def testFusedBatchNormGradsInference(self):\n\n x_shape = [4, 10, 10, 2]\n increment = 3. / math_ops.reduce_prod(\n constant_op.constant(x_shape, dtype=dtypes.float32))\n x = array_ops.reshape(math_ops.range(-2., 1., increment), x_shape)\n scale = constant_op.constant([1., 1.1])\n offset = constant_op.constant([-0.5, -0.6])\n mean = constant_op.constant([-1.3, 1.4])\n variance = constant_op.constant([0.7, 0.9])\n epsilon = 0.001\n\n def _bn_fused(x_arg, scale_arg, offset_arg):\n return nn_impl.fused_batch_norm(\n x_arg,\n scale_arg,\n offset_arg,\n mean,\n variance,\n epsilon=epsilon,\n is_training=False)[0]\n\n _test_gradients(self, _bn_fused, [x, scale, offset], order=2, atol=1e-2)\n\n def testPushPopAccumulatorState(self):\n # Note that this example is somewhat contrived. push_forwardprop_state is\n # probably only useful in practice for building functions that compute jvps\n # alongside their usual outputs.\n c = constant_op.constant(1.)\n d = constant_op.constant(2.)\n with forwardprop.ForwardAccumulator(c, d) as acc:\n\n @custom_gradient.custom_gradient\n def f(x):\n y = math_ops.sin(x.numpy())\n\n def grad(dy):\n with forwardprop_util.push_forwardprop_state():\n x_copy = constant_op.constant(x.numpy())\n acc._watch(x_copy, dy)\n y_copy = math_ops.sin(x_copy)\n return dy * acc.jvp(y_copy)\n\n return y, grad\n\n output = f(c)\n self.assertAllClose(d * math_ops.cos(c), acc.jvp(output))\n\n @parameterized.named_parameters([\n (\"Order{}\".format(order), order, expected)\n for order, expected in enumerate(_X11_35_DERIVATIVES)\n ])\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testHigherOrderPureForward(self, order, expected):\n\n def _forwardgrad(f):\n\n def _compute_forwardgrad(primal):\n tangent = constant_op.constant(1.)\n with forwardprop.ForwardAccumulator(primal, tangent) as acc:\n primal_out = f(primal)\n return acc.jvp(primal_out)\n\n return _compute_forwardgrad\n\n def _forward(x):\n return x**3.5\n\n f = _forward\n primal = constant_op.constant(1.1)\n for _ in range(order):\n f = _forwardgrad(f)\n self.assertAllClose(expected, f(primal))\n\n @parameterized.named_parameters([(\"Function\", def_function.function),\n (\"NoFunction\", lambda f: f)])\n def testGradPureForward(self, decorator):\n\n @decorator\n def f(x):\n return x**3.5\n\n primal = constant_op.constant(1.1)\n with forwardprop.ForwardAccumulator(primal,\n constant_op.constant(1.)) as outer_acc:\n with forwardprop.ForwardAccumulator(primal,\n constant_op.constant(1.)) as acc:\n primal_out = f(primal)\n inner_jvp = acc.jvp(primal_out)\n outer_jvp = outer_acc.jvp(inner_jvp)\n self.assertAllClose(1.1**3.5, primal_out)\n self.assertAllClose(3.5 * 1.1**2.5, inner_jvp)\n self.assertAllClose(3.5 * 2.5 * 1.1**1.5, outer_jvp)\n self.assertIsNone(acc.jvp(outer_acc.jvp(primal_out)))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testJVPPacking(self):\n two = constant_op.constant(2.)\n primal_in = constant_op.constant(1.)\n inner_jvp = constant_op.constant(3.)\n with forwardprop.ForwardAccumulator(\n [primal_in, inner_jvp],\n [constant_op.constant(2.),\n constant_op.constant(4.)]) as outer_acc:\n with forwardprop.ForwardAccumulator(primal_in, inner_jvp) as inner_acc:\n packed_input_indices, packed_input_tangents = (\n forwardprop_util.pack_tangents([primal_in]))\n self.assertAllClose([3., 2., 4.], packed_input_tangents)\n expected_indices = (\n # inner_acc watches primal_in\n (\n (0, 1),),\n # outer_acc watches primal_in and inner_jvp\n ((0, 2), (1, 3)))\n self.assertAllEqual(expected_indices, packed_input_indices)\n primal_out = primal_in * two\n self.assertAllClose(6., inner_acc.jvp(primal_out))\n self.assertAllClose(4., outer_acc.jvp(primal_out))\n self.assertAllClose(8., outer_acc.jvp(inner_acc.jvp(primal_out)))\n packed_output_indices, packed_output_tangents = (\n forwardprop_util.pack_tangents([primal_out]))\n self.assertAllClose([6., 4., 8.], packed_output_tangents)\n self.assertAllEqual(expected_indices, packed_output_indices)\n\n def testFunctionGradInFunctionPureForward(self):\n\n @def_function.function\n def take_gradients():\n\n @def_function.function\n def f(x):\n return x**3.5\n\n primal = constant_op.constant(1.1)\n with forwardprop.ForwardAccumulator(\n primal, constant_op.constant(1.)) as outer_acc:\n with forwardprop.ForwardAccumulator(primal,\n constant_op.constant(1.)) as acc:\n primal_out = f(primal)\n inner_jvp = acc.jvp(primal_out)\n outer_jvp = outer_acc.jvp(inner_jvp)\n self.assertIsNone(acc.jvp(outer_acc.jvp(primal_out)))\n return primal_out, inner_jvp, outer_jvp\n\n primal_out, inner_jvp, outer_jvp = take_gradients()\n self.assertAllClose(1.1**3.5, primal_out)\n self.assertAllClose(3.5 * 1.1**2.5, inner_jvp)\n self.assertAllClose(3.5 * 2.5 * 1.1**1.5, outer_jvp)\n\n def testFunctionGrad(self):\n\n @def_function.function\n def f(x):\n return math_ops.reduce_prod(math_ops.tanh(x)**2)\n\n _test_gradients(self, f, [constant_op.constant([1., 2.])], order=3)\n\n def testReusingJVP(self):\n m1 = random_ops.random_uniform((256, 2096))\n m2 = array_ops.identity(m1)\n tangent1 = random_ops.random_uniform((256, 2096))\n tangent2 = random_ops.random_uniform((256, 2096))\n matmul = def_function.function(math_ops.matmul)\n\n with forwardprop.ForwardAccumulator(\n primals=[m1, m2], tangents=[tangent1, tangent2]) as acc:\n result1 = matmul(m1, m1, transpose_b=True)\n result2 = matmul(m2, m2, transpose_b=True)\n\n def _expected(mat, tangent):\n return (math_ops.matmul(tangent, mat, transpose_b=True) +\n math_ops.matmul(mat, tangent, transpose_b=True))\n\n self.assertAllClose(result1, result2)\n self.assertAllClose(_expected(m1, tangent1), acc.jvp(result1))\n self.assertAllClose(_expected(m2, tangent2), acc.jvp(result2))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testHVPMemory(self):\n\n def fun(x):\n return math_ops.reduce_prod(math_ops.tanh(x)**2)\n\n primals = constant_op.constant([1., 2., 3.])\n tangents = constant_op.constant([3., 4., 5.])\n _hvp(fun, (primals,), (tangents,))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testHVPCorrectness(self):\n\n def fun(x):\n return math_ops.reduce_prod(math_ops.tanh(x)**2)\n\n primals = constant_op.constant([1., 2., 3.])\n tangents = constant_op.constant([3., 4., 5.])\n forwardback_hvp_eager, = _hvp(fun, (primals,), (tangents,))\n forwardback_hvp_function, = def_function.function(_hvp)(fun, (primals,),\n (tangents,))\n\n with backprop.GradientTape(persistent=True) as g:\n g.watch(primals)\n with backprop.GradientTape() as gg:\n gg.watch(primals)\n out = fun(primals)\n grad = array_ops_stack.unstack(gg.gradient(out, primals))\n hessian = []\n for i in range(3):\n hessian.append(g.gradient(grad[i], primals))\n hessian = array_ops_stack.stack(hessian, axis=0)\n backback_hvp = math_ops.tensordot(hessian, tangents, axes=1)\n\n self.assertAllClose(backback_hvp, forwardback_hvp_eager)\n self.assertAllClose(backback_hvp, forwardback_hvp_function)\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testShouldRecordAndStopRecord(self):\n c = constant_op.constant(1.)\n c_tangent = constant_op.constant(2.)\n with forwardprop.ForwardAccumulator(c, c_tangent) as acc:\n with backprop.GradientTape() as tape:\n self.assertFalse(record.should_record_backprop([c]))\n self.assertEqual(1, pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))\n tape.watch(c)\n self.assertEqual(2, pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))\n self.assertTrue(record.should_record_backprop([c]))\n with record.stop_recording():\n self.assertEqual(0,\n pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))\n self.assertFalse(record.should_record_backprop([c]))\n d = c * 2.\n self.assertEqual(2, pywrap_tfe.TFE_Py_TapeSetPossibleGradientTypes([c]))\n self.assertTrue(record.should_record_backprop([c]))\n self.assertFalse(record.should_record_backprop([d]))\n self.assertIsNone(acc.jvp(d))\n self.assertIsNone(tape.gradient(d, c))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testRecordingSelectively(self):\n c = constant_op.constant(1.)\n c_tangent = constant_op.constant(2.)\n with forwardprop.ForwardAccumulator(c, c_tangent) as acc:\n with backprop.GradientTape(persistent=True) as tape:\n tape.watch(c)\n with record.stop_recording():\n two = constant_op.constant(2.)\n d = c * two\n three = constant_op.constant(3.)\n e = c * three\n self.assertIsNone(acc.jvp(d))\n self.assertIsNone(acc.jvp(e))\n self.assertIsNone(tape.gradient(d, c))\n self.assertIsNone(tape.gradient(e, c))\n record.record_operation_forwardprop_only(\n \"CustomForwardMul\", [d], [c, two], lambda dd: (two * dd, c * dd),\n None)\n record.record_operation_backprop_only(\"CustomBackwardMul\", [e],\n [c, three], lambda de:\n (three * de, c * de))\n self.assertAllClose(4., acc.jvp(d))\n self.assertIsNone(acc.jvp(e))\n self.assertIsNone(tape.gradient(d, c))\n self.assertAllClose(3., tape.gradient(e, c))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testOpWithNoTrainableOutputs(self):\n v = variables.Variable(1.)\n with forwardprop.ForwardAccumulator(v, 11.):\n v.assign_sub(0.5)\n self.assertAllClose(0.5, self.evaluate(v))\n\n # TODO(b/141025187): Add a no_new_pyobjects decorator.\n def testVariableReadInFunction(self):\n v = variables.Variable(1.)\n with forwardprop.ForwardAccumulator(v, 11.) as acc:\n\n @def_function.function\n def f():\n return v.read_value(), 2. * v.read_value()\n\n result = f()\n self.assertAllClose((1.0, 2.), result)\n self.assertAllClose((11., 22.), acc.jvp(result))\n\n @parameterized.named_parameters([(\"ForwardPropFirst\", True),\n (\"TapeFirst\", False)])\n def testForwardOverBackwardMemoryEfficiency(self, forward_prop_first):\n # Watching depends on nesting, not creation order\n c = constant_op.constant(1.)\n if forward_prop_first:\n forward_accumulator = forwardprop.ForwardAccumulator(c, .1)\n gradient_tape = backprop.GradientTape()\n else:\n gradient_tape = backprop.GradientTape()\n forward_accumulator = forwardprop.ForwardAccumulator(c, .1)\n try:\n gc.disable()\n with gradient_tape as tape:\n # Adding and removing the tape multiple times in different nesting\n # patterns does not affect watch ordering.\n pass\n with forward_accumulator as acc:\n with gradient_tape as tape:\n tape.watch(c)\n d = math_ops.cos(c)\n self.assertFalse(record.should_record_backprop((acc.jvp(d),)))\n e = math_ops.cos(acc.jvp(d))\n math_ops.cos(e)\n weak_e = weakref.ref(e)\n del e\n self.assertIsNone(weak_e())\n self.assertIsNone(tape.gradient(acc.jvp(d), c))\n finally:\n gc.enable()\n\n @parameterized.named_parameters([(\"ForwardPropFirst\", True),\n (\"TapeFirst\", False)])\n def testBackwardOverForward(self, forward_prop_first):\n c = constant_op.constant(1.)\n # Watching depends on nesting, not creation order\n if forward_prop_first:\n forward_accumulator = forwardprop.ForwardAccumulator(c, .1)\n gradient_tape = backprop.GradientTape()\n else:\n gradient_tape = backprop.GradientTape()\n forward_accumulator = forwardprop.ForwardAccumulator(c, .1)\n with gradient_tape as tape:\n with forward_accumulator as acc:\n tape.watch(c)\n d = math_ops.cos(c)\n self.assertTrue(record.should_record_backprop((acc.jvp(d),)))\n self.assertAllClose(-.1 * math_ops.cos(1.), tape.gradient(acc.jvp(d), c))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testRecordingWithJVPIndices(self):\n c = constant_op.constant(1.)\n with forwardprop.ForwardAccumulator(c, 10.) as acc:\n packed_input_tangents = forwardprop_util.pack_tangents([c]).tangents\n self.assertAllClose([10.], packed_input_tangents)\n d = constant_op.constant(2.)\n d_tangent = constant_op.constant(3.)\n record.record_operation_forwardprop_only(\"FunctionWithInlineJVPs\",\n [d] + [d_tangent],\n [c] + packed_input_tangents,\n None, (((0, 1),),))\n self.assertAllClose(3., acc.jvp(d))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testSpecialForwardFunctionUsed(self):\n c = constant_op.constant(1.)\n d = constant_op.constant(2.)\n e = constant_op.constant(3.)\n with forwardprop.ForwardAccumulator(c, 10.) as acc:\n record.record_operation(\"ForwardIsSpecial\", [d], [c], None,\n lambda jvp: [-2. * jvp])\n self.assertAllClose(-20., acc.jvp(d))\n record.record_operation(\"ForwardIsSpecial2\", [], [], None, lambda: [])\n record.record_operation(\"ForwardIsSpecial3\", [e], [d], None,\n lambda x: [x])\n self.assertAllClose(-20., acc.jvp(e))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testVariableWatched(self):\n v = variables.Variable([1., 2., 3.])\n with forwardprop.ForwardAccumulator(v, constant_op.constant([.1, -.2,\n .3])) as acc:\n self.assertAllClose([.1, -.2, .3], acc.jvp(v))\n x = v * 2.\n self.assertAllClose([.2, -.4, .6], acc.jvp(x))\n x2 = v + .1\n self.assertAllClose([.1, -.2, .3], acc.jvp(x2))\n\n def testUnconnectedGradients(self):\n x = constant_op.constant(-1.)\n with forwardprop.ForwardAccumulator(x, 0.1) as acc:\n self.assertAllClose(0.1, acc.jvp(x, unconnected_gradients=\"zero\"))\n self.assertAllClose(0.1, acc.jvp(x, unconnected_gradients=\"none\"))\n y = constant_op.constant(-2.)\n self.assertAllClose(0.0, acc.jvp(y, unconnected_gradients=\"zero\"))\n self.assertIsNone(acc.jvp(y, unconnected_gradients=\"none\"))\n\n # TODO(kkb): One weakref instance is created with warmup_iters=2,\n # investigate.\n @test_util.assert_no_new_pyobjects_executing_eagerly(warmup_iters=3)\n def testVariableWatchedFunction(self):\n\n class _Model(module.Module):\n\n def __init__(self):\n self._v = None\n\n @def_function.function\n def compute_jvps(self):\n if self._v is None:\n self._v = variables.Variable([1., 2., 3.])\n with forwardprop.ForwardAccumulator(self._v,\n constant_op.constant([.1, -.2,\n .3])) as acc:\n x = self._v * 2.\n x2 = self._v + .1\n return acc.jvp((self._v, x, x2))\n\n model = _Model()\n v_jvp, x_jvp, x2_jvp = model.compute_jvps()\n self.assertAllClose([.1, -.2, .3], v_jvp)\n self.assertAllClose([.2, -.4, .6], x_jvp)\n self.assertAllClose([.1, -.2, .3], x2_jvp)\n\n def testIndexSlicesGrad(self):\n x = constant_op.constant([1.])\n\n with forwardprop.ForwardAccumulator(x, constant_op.constant([3.])) as acc:\n y = array_ops.gather(x, 0)\n self.assertAllClose(3., acc.jvp(y))\n\n def testIndexSlicesGradInFunction(self):\n\n @def_function.function\n def f(a):\n return array_ops.gather(a, 0)\n\n x = constant_op.constant([1.])\n\n with forwardprop.ForwardAccumulator(x, constant_op.constant([3.])) as acc:\n y = f(x)\n self.assertAllClose(3., acc.jvp(y))\n\n # NOTE: assert_no_new_pyobjects_executing_eagerly fails flakily on this\n # test... could be something wrong with the test decorator, or some sort of\n # nondeterministic caching.\n def testMirroredVariableWatched(self):\n\n def _replicated(input_tangent):\n with forwardprop.ForwardAccumulator(v, input_tangent) as acc:\n self.assertAllClose([.1, -.2, .3], acc.jvp(v))\n x = v * 2.\n self.assertAllClose([.2, -.4, .6], acc.jvp(x))\n x2 = v + .1\n self.assertAllClose([.1, -.2, .3], acc.jvp(x2))\n\n strategy = mirrored_strategy.MirroredStrategy()\n with strategy.scope():\n v = variables.Variable([1., 2., 3.])\n strategy.run(_replicated, args=(constant_op.constant([.1, -.2, .3]),))\n\n # TODO(b/141025187): Add a no_new_pyobjects decorator.\n def testArgumentUnused(self):\n v = constant_op.constant(1.)\n with forwardprop.ForwardAccumulator(v, 11.) as acc:\n\n @def_function.function\n def _f(x):\n del x\n return constant_op.constant(1.)\n\n result = _f(v)\n self.assertAllClose(1.0, result)\n self.assertIsNone(acc.jvp(result))\n\n\n@def_function.function\ndef _has_loop(iters, y):\n ret = 0.\n for i in math_ops.range(iters):\n ret += y * math_ops.cast(i, dtypes.float32)\n return ret\n\n\n@def_function.function\ndef _has_cond(k, y):\n if k > 1:\n ret = 3. * y\n else:\n ret = 0.\n return ret\n\n\n@def_function.function\ndef _fprop_while(iters, y):\n with forwardprop.ForwardAccumulator(y, 1.) as acc:\n ret = 0.\n for i in math_ops.range(iters):\n ret += y * math_ops.cast(i, dtypes.float32)\n return acc.jvp(ret)\n\n\n@def_function.function\ndef _fprop_cond(k, y):\n with forwardprop.ForwardAccumulator(y, 1.) as acc:\n if k > 1:\n ret = 3. * y\n else:\n ret = 0.\n return acc.jvp(ret)\n\n\nclass ControlFlowTests(test.TestCase):\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testOfFunctionWhile(self):\n y = constant_op.constant(1.)\n with forwardprop.ForwardAccumulator(y, 1.) as acc:\n self.assertAllClose(10., acc.jvp(_has_loop(constant_op.constant(5), y)))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testOfFunctionCond(self):\n y = constant_op.constant(1.)\n with forwardprop.ForwardAccumulator(y, 1.) as acc:\n self.assertAllClose(3., acc.jvp(_has_cond(constant_op.constant(5), y)))\n self.assertAllClose(0., acc.jvp(_has_cond(constant_op.constant(0), y)))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testInFunctionWhile(self):\n self.assertAllClose(\n 10., _fprop_while(constant_op.constant(5), constant_op.constant(1.)))\n\n @test_util.assert_no_new_pyobjects_executing_eagerly()\n def testInFunctionCond(self):\n self.assertAllClose(\n 3., _fprop_cond(constant_op.constant(5), constant_op.constant(1.)))\n self.assertAllClose(\n 0., _fprop_cond(constant_op.constant(0), constant_op.constant(1.)))\n\n\nclass HessianTests(test.TestCase, parameterized.TestCase):\n\n def testHessian1D(self):\n # Note: stolen from ops/gradients_test.py\n m = 4\n rng = np.random.RandomState([1, 2, 3])\n mat_value = rng.randn(m, m).astype(\"float32\")\n x_value = rng.randn(m).astype(\"float32\")\n hess_value = mat_value + mat_value.T\n mat = variables.Variable(mat_value)\n\n def _f(x):\n return math_ops.reduce_sum(x[:, None] * mat * x[None, :])\n\n hessian_eager, = _forward_over_back_hessian(\n _f, [constant_op.constant(x_value)],\n use_pfor=False,\n dtype=[dtypes.float32])\n self.assertAllClose(hess_value, hessian_eager)\n hessian_function, = def_function.function(_forward_over_back_hessian)(\n _f, [constant_op.constant(x_value)],\n use_pfor=False,\n dtype=[dtypes.float32])\n self.assertAllClose(hess_value, hessian_function)\n hessian_pfor, = def_function.function(_forward_over_back_hessian)(\n _f, [constant_op.constant(x_value)],\n use_pfor=True,\n dtype=[dtypes.float32])\n self.assertAllClose(hess_value, hessian_pfor)\n\n\nclass BatchTests(test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters([(math_ops.sin, (2, 3), 5),\n (math_ops.sin, (2, 3, 4), 10)])\n def testJVPBatchCorrectness(self, f, primal_shape, batch_size):\n primals = [random_ops.random_uniform(primal_shape)]\n tangent_batch = [random_ops.random_uniform([batch_size, *primal_shape])]\n self.assertAllClose(\n _jvp_batch(f, primals, tangent_batch)[1],\n _jvp_batch_matmul(f, primals, *tangent_batch))\n\n def testBatchCorrectness(self):\n x = constant_op.constant(2.0)\n y = constant_op.constant(5.0)\n tangents = (\n constant_op.constant([1., 0., 1.]),\n constant_op.constant([0., 1., 1.]),\n )\n with forwardprop.ForwardAccumulator._batch_accumulator((x, y),\n tangents) as acc:\n z = x * y\n self.assertAllClose(acc.jvp(z), constant_op.constant([5.0, 2.0, 7.0]))\n\n @parameterized.named_parameters([(\"ForwardPropFirst\", True),\n (\"TapeFirst\", False)])\n def testBatchBackwardOverForward(self, forward_prop_first):\n x = constant_op.constant(1.)\n tangents = random_ops.random_normal(shape=[10], seed=1)\n expected = [-t * math_ops.cos(1.) for t in tangents]\n if forward_prop_first:\n batch_acc = forwardprop.ForwardAccumulator._batch_accumulator(x, tangents)\n gradient_tape = backprop.GradientTape(persistent=True)\n else:\n gradient_tape = backprop.GradientTape(persistent=True)\n batch_acc = forwardprop.ForwardAccumulator._batch_accumulator(x, tangents)\n with gradient_tape as tape:\n with batch_acc as acc:\n tape.watch(x)\n y = math_ops.cos(x)\n self.assertTrue(record.should_record_backprop((acc.jvp(y),)))\n jvps = acc.jvp(y)\n d2y_dx2 = [tape.gradient(dy_dx, x) for dy_dx in jvps]\n self.assertAllClose(expected, d2y_dx2)\n\n\nif __name__ == \"__main__\":\n # TODO(allenl): Also test with 1.x-style graph mode.\n ops.enable_eager_execution()\n test.main()\n", "output": ["_grad", "_vectorize_parameters", "_hvp", "_forward_over_back_hessian", "_test_gradients", "_gradfwd", "_jvp_batch", "_jvp_batch_matmul", "_jacfwd", "_jvp", "ControlFlowTests", "ForwardpropTest", "BatchTests", "HessianTests", "_Model"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/eager/forwardprop_test.py", "file_length": 14615, "symbol_dict": [{"symbol": "_jacfwd", "type": "mannual_defined_function", "byte_location": 2535, "location": 739}, {"symbol": "_jvp_batch_matmul", "type": "mannual_defined_function", "byte_location": 3906, "location": 1216}, {"symbol": "_test_gradients", "type": "mannual_defined_function", "byte_location": 7370, "location": 2439}, {"symbol": "_jvp", "type": "mannual_defined_function", "byte_location": 2232, "location": 624}, {"symbol": "_vectorize_parameters", "type": "mannual_defined_function", "byte_location": 5700, "location": 1889}, {"symbol": "_hvp", "type": "mannual_defined_function", "byte_location": 5349, "location": 1758}, {"symbol": "_forward_over_back_hessian", "type": "mannual_defined_function", "byte_location": 6397, "location": 2139}, {"symbol": "_gradfwd", "type": "mannual_defined_function", "byte_location": 4737, "location": 1544}, {"symbol": "_grad", "type": "mannual_defined_function", "byte_location": 4388, "location": 1422}, {"symbol": "_jvp_batch", "type": "mannual_defined_function", "byte_location": 3721, "location": 1146}, {"symbol": "ControlFlowTests", "type": "mannual_defined_class", "byte_location": 36600, "location": 13042}, {"symbol": "_Model", "type": "mannual_defined_class", "byte_location": 33561, "location": 11861}, {"symbol": "BatchTests", "type": "mannual_defined_class", "byte_location": 38816, "location": 13879}, {"symbol": "HessianTests", "type": "mannual_defined_class", "byte_location": 37719, "location": 13454}, {"symbol": "ForwardpropTest", "type": "mannual_defined_class", "byte_location": 8509, "location": 2866}]}} {"input": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Framework of debug wrapper sessions.\n\nA debug wrapper session is a wrapper around a TensorFlow Python Session.\nThe wrapper preserves the Session interface, most importantly the run() method,\nwhile providing abilities to:\na) Intercept a run() call to a wrapped session and insert debug tensor watches\n according to externally-specified debug URLs.\n\nb) Release control to an external (i.e., non-Session) object before and after\n the run() call, so that the external object can perform actions such as\n launching a UI to let users inspect the intermediate tensors and partition\n graphs from the run() call.\n\nc) (To be implemented in a future CL) Enter an instruction loop to let an\n external object (e.g., remote client) launch run() and cont() calls\n remotely.\n\n*** The lifetime of a debug wrapper session: ***\n\n1) The wrapper session is created by calling the constructor with a\n wrapped (normal) session as the argument:\n wrapper = FooDebugWrapperSession(sess)\n wherein FooDebugWrapperSession is a concrete subclass implementing the\n abstract BaseDebugWrapperSession class below.\n\n2) Near the end of the constructor call, the on_session_init() callback is\n invoked, with a OnSessionInitRequest object as the argument. The object\n carries the wrapped (normal) session object.\n\n3) The callback handles the request and returns a OnSessionInitResponse\n object with an action field, directing the wrapper session what to do next.\n\nIf the action field in the OnSessionInitResponse is PROCEED, the constructor\nreturns. Control is released back to the caller of the constructor, which can\ninvoke run() method of wrapper session with the same syntax as a non-wrapped\nsession, e.g.,:\n wrapper.run(fetches, feed_dict=feeds, options=run_options)\n\nBelow, A1 - A2 is the lifetime of a wrapper run() call if the action is\nPROCEED:\n\nA1) Right at the start of each run() call, the on_run_start() callback is\n invoked, with an OnRunStartRequest object carrying information such as\n the fetches, the feed dict, the run options and run metadata used in\n this run call, along with a count of how many run calls has occurred\n on this wrapper session. The callback then returns an OnRunStartResponse\n object, of which the action field directs what the wrapper session\n actually will do of the run() call.\n\n If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,\n with the debug URLs supplied in the debug_urls field of the response.\n These can be file:// or grpc:// URLs, for example.\n\n If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.\n\nA2) Right before the run() returns, the on_run_end() callback is invoked,\n with an OnRunEndRequest object as the argument, which carries information\n including the actual action performed in the wrapper run() call and the\n run_metadata from the run() call.\n\nHowever, if the action field in OnSessionInitResponse is\nREMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop\nthat gives the control to a remote caller.\n\nIn the remote instruction loop, the following steps will happen:\n\nB1) Callback on_instr_start() is invoked. The callback will return an\n OnInstrStartResponse object with an action field which can order one of\n the following actions:\n i) a run() call with fetches, feeds and debug_urls specified.\n ii) exit the instruction loop.\n\nB2) The wrapper session carries out the action specified above.\n\nB3) If still in the instruction loop, the wrapper session invokes the\n on_instr_end() callback. After the on_instr_end() callback returns, jump\n back to B1.\n\nTODO(cais): Implemented the instruction loop in B1 - B3.\n\n\"\"\"\n\nimport abc\nimport re\nimport threading\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug.lib import debug_utils\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import stack\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.compat import collections_abc\n\n\n# Helper function.\ndef _check_type(obj, expected_types):\n \"\"\"Check if an object is of the expected type.\n\n Args:\n obj: The object being checked.\n expected_types: (`type` or an iterable of `type`s) The expected `type`(s)\n of obj.\n\n Raises:\n TypeError: If obj is not an instance of expected_type.\n \"\"\"\n if not isinstance(obj, expected_types):\n raise TypeError(\"Expected type %s; got type %s\" %\n (expected_types, type(obj)))\n\n\nclass OnSessionInitRequest:\n \"\"\"Request to an on-session-init callback.\n\n This callback is invoked during the __init__ call to a debug-wrapper session.\n \"\"\"\n\n def __init__(self, sess):\n \"\"\"Constructor.\n\n Args:\n sess: A tensorflow Session object.\n \"\"\"\n\n _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))\n self.session = sess\n\n\nclass OnSessionInitAction:\n \"\"\"Enum-like values for possible action to take on session init.\"\"\"\n\n # Proceed, without special actions, in the wrapper session initialization.\n # What action the wrapper session performs next is determined by the caller\n # of the wrapper session. E.g., it can call run().\n PROCEED = \"proceed\"\n\n # Instead of letting the caller of the wrapper session determine what actions\n # the wrapper session will perform next, enter a loop to receive instructions\n # from a remote client.\n # For example, TensorBoard visual debugger can use this action so that it can\n # launch session.run() calls remotely.\n REMOTE_INSTR_LOOP = \"remote_instr_loop\"\n\n\nclass OnSessionInitResponse:\n \"\"\"Response from an on-session-init callback.\"\"\"\n\n def __init__(self, action):\n \"\"\"Constructor.\n\n Args:\n action: (`OnSessionInitAction`) Debugger action to take on session init.\n \"\"\"\n _check_type(action, str)\n self.action = action\n\n\nclass OnRunStartRequest:\n \"\"\"Request to an on-run-start callback.\n\n This callback is invoked during a run() call of the debug-wrapper\n session, immediately after the run() call counter is incremented.\n \"\"\"\n\n def __init__(self, fetches, feed_dict, run_options, run_metadata,\n run_call_count, is_callable_runner=False):\n \"\"\"Constructor of `OnRunStartRequest`.\n\n Args:\n fetches: Fetch targets of the run() call.\n feed_dict: The feed dictionary to the run() call.\n run_options: RunOptions input to the run() call.\n run_metadata: RunMetadata input to the run() call.\n The above four arguments are identical to the input arguments to the\n run() method of a non-wrapped TensorFlow session.\n run_call_count: 1-based count of how many run calls (including this one)\n has been invoked.\n is_callable_runner: (bool) whether a runner returned by\n Session.make_callable is being run.\n \"\"\"\n self.fetches = fetches\n self.feed_dict = feed_dict\n self.run_options = run_options\n self.run_metadata = run_metadata\n self.run_call_count = run_call_count\n self.is_callable_runner = is_callable_runner\n\n\nclass OnRunStartAction:\n \"\"\"Enum-like values for possible action to take on start of a run() call.\"\"\"\n\n # Run once with debug tensor-watching.\n DEBUG_RUN = \"debug_run\"\n\n # Run once with profiler.\n PROFILE_RUN = \"profile_run\"\n\n # Run without debug tensor-watching.\n NON_DEBUG_RUN = \"non_debug_run\"\n\n\n\nclass OnRunStartResponse:\n \"\"\"Request from an on-run-start callback.\n\n The caller of the callback can use this response object to specify what\n action the debug-wrapper session actually takes on the run() call.\n \"\"\"\n\n def __init__(self,\n action,\n debug_urls,\n debug_ops=\"DebugIdentity\",\n node_name_regex_allowlist=None,\n op_type_regex_allowlist=None,\n tensor_dtype_regex_allowlist=None,\n tolerate_debug_op_creation_failures=False):\n \"\"\"Constructor of `OnRunStartResponse`.\n\n Args:\n action: (`OnRunStartAction`) the action actually taken by the wrapped\n session for the run() call.\n debug_urls: (`list` of `str`) debug_urls used in watching the tensors\n during the run() call.\n debug_ops: (`str` or `list` of `str`) Debug op(s) to be used by the\n debugger.\n node_name_regex_allowlist: Regular-expression allowlist for node\n name.\n op_type_regex_allowlist: Regular-expression allowlist for op type.\n tensor_dtype_regex_allowlist: Regular-expression allowlist for tensor\n dtype.\n tolerate_debug_op_creation_failures: Whether debug op creation failures\n are to be tolerated.\n \"\"\"\n\n _check_type(action, str)\n self.action = action\n\n _check_type(debug_urls, list)\n self.debug_urls = debug_urls\n\n self.debug_ops = debug_ops\n\n self.node_name_regex_allowlist = node_name_regex_allowlist\n self.op_type_regex_allowlist = op_type_regex_allowlist\n self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist\n self.tolerate_debug_op_creation_failures = (\n tolerate_debug_op_creation_failures)\n\n\nclass OnRunEndRequest:\n \"\"\"Request to an on-run-end callback.\n\n The callback is invoked immediately before the wrapped run() call ends.\n \"\"\"\n\n def __init__(self,\n performed_action,\n run_metadata=None,\n client_graph_def=None,\n tf_error=None):\n \"\"\"Constructor for `OnRunEndRequest`.\n\n Args:\n performed_action: (`OnRunStartAction`) Actually-performed action by the\n debug-wrapper session.\n run_metadata: run_metadata output from the run() call (if any).\n client_graph_def: (GraphDef) GraphDef from the client side, i.e., from\n the python front end of TensorFlow. Can be obtained with\n session.graph.as_graph_def().\n tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred\n during the run (if any).\n \"\"\"\n\n _check_type(performed_action, str)\n self.performed_action = performed_action\n\n if run_metadata is not None:\n _check_type(run_metadata, config_pb2.RunMetadata)\n self.run_metadata = run_metadata\n self.client_graph_def = client_graph_def\n self.tf_error = tf_error\n\n\nclass OnRunEndResponse:\n \"\"\"Response from an on-run-end callback.\"\"\"\n\n def __init__(self):\n\n # Currently only a placeholder.\n pass\n\n\nclass BaseDebugWrapperSession(session.SessionInterface, metaclass=abc.ABCMeta):\n \"\"\"Base class of debug-wrapper session classes.\n\n Concrete classes that inherit from this class need to implement the abstract\n methods such as on_session_init, on_run_start and on_run_end.\n \"\"\"\n\n def __init__(self, sess, thread_name_filter=None,\n pass_through_operrors=False):\n \"\"\"Constructor of `BaseDebugWrapperSession`.\n\n Args:\n sess: An (unwrapped) TensorFlow session instance. It should be a subtype\n of `BaseSession` or `tf.MonitoredSession`.\n thread_name_filter: Regular-expression filter (allowlist) for name(s) of\n thread(s) on which the wrapper session will be active. This regular\n expression is used in a start-anchored fashion on the thread name, i.e.,\n by applying the `match` method of the compiled pattern. The default\n `None` means that the wrapper session will be active on all threads.\n E.g., r\"MainThread$\", r\"QueueRunnerThread.*\".\n pass_through_operrors: If True, all captured OpErrors will be\n propagated. By default this captures all OpErrors.\n\n Raises:\n ValueError: On invalid `OnSessionInitAction` value.\n NotImplementedError: If a non-DirectSession sess object is received.\n \"\"\"\n\n _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))\n\n # The session being wrapped.\n self._sess = sess\n self._thread_name_filter_pattern = (re.compile(thread_name_filter)\n if thread_name_filter else None)\n # TODO(cais/kstevens): Unittest this pass through feature.\n self._pass_through_operrors = pass_through_operrors\n\n # Keeps track of number of run calls that have been performed on this\n # debug-wrapper session. The count can be used for purposes such as\n # displaying the state of the Session in a UI and determining a run\n # number-dependent debug URL.\n self._run_call_count = 0\n\n # Invoke on-session-init callback.\n response = self.on_session_init(OnSessionInitRequest(self._sess))\n _check_type(response, OnSessionInitResponse)\n\n if response.action == OnSessionInitAction.PROCEED:\n pass\n elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:\n # TODO(cais): Implement REMOTE_INSTR_LOOP\n raise NotImplementedError(\n \"OnSessionInitAction REMOTE_INSTR_LOOP has not been \"\n \"implemented.\")\n else:\n raise ValueError(\n \"Invalid OnSessionInitAction value: %s\" % response.action)\n\n self._default_session_context_manager = None\n\n # A cache for callables created from CallableOptions.\n self._cached_callables_from_options = {}\n\n @property\n def graph(self):\n return self._sess.graph\n\n @property\n def graph_def(self):\n return self._sess.graph_def\n\n @property\n def sess_str(self):\n return self._sess.sess_str\n\n @property\n def session(self):\n return self._sess\n\n def run(self,\n fetches,\n feed_dict=None,\n options=None,\n run_metadata=None,\n callable_runner=None,\n callable_runner_args=None,\n callable_options=None):\n \"\"\"Wrapper around Session.run() that inserts tensor watch options.\n\n Args:\n fetches: Same as the `fetches` arg to regular `Session.run()`.\n feed_dict: Same as the `feed_dict` arg to regular `Session.run()`.\n options: Same as the `options` arg to regular `Session.run()`.\n run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.\n callable_runner: A `callable` returned by `Session.make_callable()`.\n If not `None`, `fetches` and `feed_dict` must both be `None`.\n Mutually exclusive with `callable_options`.\n callable_runner_args: An optional list of arguments to `callable_runner`\n or for `callable_options`.\n callable_options: An instance of `config_pb2.CallableOptions`, to be\n used with `Session._make_callable_from_options()`. Mutually exclusive\n with `callable_runner`.\n\n Returns:\n Simply forwards the output of the wrapped `Session.run()` call.\n\n Raises:\n ValueError: On invalid `OnRunStartAction` value. Or if `callable_runner`\n is not `None` and either or both of `fetches` and `feed_dict` is `None`.\n \"\"\"\n if callable_runner and callable_options:\n raise ValueError(\n \"callable_runner and callable_options are mutually exclusive, but \"\n \"are both specified in this call to BaseDebugWrapperSession.run().\")\n\n if callable_runner and (fetches or feed_dict):\n raise ValueError(\n \"callable_runner and fetches/feed_dict are mutually exclusive, \"\n \"but are used simultaneously.\")\n elif callable_options and (fetches or feed_dict):\n raise ValueError(\n \"callable_options and fetches/feed_dict are mutually exclusive, \"\n \"but are used simultaneously.\")\n\n self.increment_run_call_count()\n\n def is_empty(x):\n \"\"\"Check whether a possibly nested structure is empty.\"\"\"\n if not nest.is_nested(x):\n return False\n if isinstance(x, collections_abc.Mapping):\n return is_empty(list(x.values()))\n for item in x:\n if not is_empty(item):\n return False\n return True\n\n empty_fetches = is_empty(fetches)\n if empty_fetches:\n tf_logging.info(\n \"Due to empty fetches, tfdbg Session wrapper is letting a \"\n \"Session.run pass through without any debugging actions.\")\n if self._is_disabled_thread() or empty_fetches:\n if callable_runner:\n return callable_runner(*callable_runner_args)\n elif callable_options:\n # pylint:disable=protected-access\n return self._sess._make_callable_from_options(\n callable_options)(*callable_runner_args)\n # pylint:enable=protected-access\n else:\n return self._sess.run(fetches,\n feed_dict=feed_dict,\n options=options,\n run_metadata=run_metadata)\n\n # Invoke on-run-start callback and obtain response.\n run_start_resp = self.on_run_start(\n OnRunStartRequest(fetches, feed_dict, options, run_metadata,\n self._run_call_count,\n is_callable_runner=bool(callable_runner)))\n _check_type(run_start_resp, OnRunStartResponse)\n\n if run_start_resp.action == OnRunStartAction.DEBUG_RUN:\n retvals, run_end_req = self._run_with_debugging(\n run_start_resp, fetches, feed_dict, options, run_metadata,\n callable_runner, callable_runner_args, callable_options)\n elif run_start_resp.action == OnRunStartAction.PROFILE_RUN:\n retvals, run_end_req = self._run_with_profiling(\n run_start_resp, fetches, feed_dict, options, run_metadata,\n callable_runner, callable_runner_args, callable_options)\n elif run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN:\n # Invoke run() method of the wrapped session.\n if callable_runner:\n retvals = callable_runner(*callable_runner_args)\n elif callable_options:\n # pylint:disable=protected-access\n callable_object = self._sess._make_callable_from_options(\n callable_options)\n # pylint:enable=protected-access\n retvals = callable_object(*callable_runner_args)\n else:\n retvals = self._sess.run(\n fetches,\n feed_dict=feed_dict,\n options=options,\n run_metadata=run_metadata)\n\n # Prepare arg for the on-run-end callback.\n run_end_req = OnRunEndRequest(run_start_resp.action)\n else:\n raise ValueError(\n \"Invalid OnRunStartAction value: %s\" % run_start_resp.action)\n\n # Invoke on-run-end callback and obtain response.\n run_end_resp = self.on_run_end(run_end_req)\n _check_type(run_end_resp, OnRunEndResponse)\n # Currently run_end_resp is only a placeholder. No action is taken on it.\n\n return retvals\n\n def _run_with_debugging(self,\n run_start_resp,\n fetches,\n feed_dict,\n options,\n run_metadata,\n callable_runner,\n callable_runner_args,\n callable_options):\n \"\"\"Perform a session.run() or callable with debugging.\"\"\"\n # Decorate RunOption to fill in debugger tensor watch specifications.\n decorated_run_options = None\n if callable_options:\n callable_options_id = id(callable_options)\n if callable_options_id not in self._cached_callables_from_options:\n # Make a copy of callable_options to avoid mutating it.\n new_callable_options = config_pb2.CallableOptions()\n new_callable_options.CopyFrom(callable_options)\n decorated_run_options = new_callable_options.run_options\n else:\n decorated_run_options = options or config_pb2.RunOptions()\n\n run_metadata = run_metadata or config_pb2.RunMetadata()\n\n if decorated_run_options:\n self._decorate_run_options_for_debug(\n decorated_run_options,\n run_start_resp.debug_urls,\n debug_ops=run_start_resp.debug_ops,\n node_name_regex_allowlist=(run_start_resp.node_name_regex_allowlist),\n op_type_regex_allowlist=run_start_resp.op_type_regex_allowlist,\n tensor_dtype_regex_allowlist=(\n run_start_resp.tensor_dtype_regex_allowlist),\n tolerate_debug_op_creation_failures=(\n run_start_resp.tolerate_debug_op_creation_failures))\n\n # Invoke the run() method of the wrapped Session. Catch any TensorFlow\n # runtime errors.\n tf_error = None\n try:\n if callable_runner:\n retvals = callable_runner(*callable_runner_args,\n options=decorated_run_options,\n run_metadata=run_metadata)\n elif callable_options:\n # pylint:disable=protected-access\n if callable_options_id in self._cached_callables_from_options:\n callable_object = self._cached_callables_from_options[\n callable_options_id]\n else:\n callable_object = self._sess._make_callable_from_options(\n new_callable_options)\n self._cached_callables_from_options[\n callable_options_id] = callable_object\n # pylint:enable=protected-access\n retvals = callable_object(\n *callable_runner_args, run_metadata=run_metadata)\n else:\n retvals = self._sess.run(fetches,\n feed_dict=feed_dict,\n options=decorated_run_options,\n run_metadata=run_metadata)\n except errors.OpError as op_error:\n if self._pass_through_operrors:\n raise op_error\n tf_error = op_error\n retvals = op_error\n\n return retvals, OnRunEndRequest(\n run_start_resp.action,\n run_metadata=run_metadata,\n client_graph_def=self._sess.graph.as_graph_def(),\n tf_error=tf_error)\n\n def _run_with_profiling(self,\n run_start_resp,\n fetches,\n feed_dict,\n options,\n run_metadata,\n callable_runner,\n callable_runner_args,\n callable_options):\n \"\"\"Perform a session.run() or callable with profiling.\"\"\"\n # Decorate RunOption to fill in debugger tensor watch specifications.\n decorated_run_options = None\n if callable_options:\n callable_options_id = id(callable_options)\n if callable_options_id not in self._cached_callables_from_options:\n # Make a copy of callable_options to avoid mutating it.\n new_callable_options = config_pb2.CallableOptions()\n new_callable_options.CopyFrom(callable_options)\n decorated_run_options = new_callable_options.run_options\n else:\n decorated_run_options = options or config_pb2.RunOptions()\n self._decorate_run_options_for_profile(decorated_run_options)\n\n run_metadata = run_metadata or config_pb2.RunMetadata()\n if callable_runner:\n retvals = callable_runner(*callable_runner_args,\n options=decorated_run_options,\n run_metadata=run_metadata)\n elif callable_options:\n # pylint:disable=protected-access\n callable_object = self._sess._make_callable_from_options(\n new_callable_options)\n # pylint:enable=protected-access\n retvals = callable_object(\n *callable_runner_args, run_metadata=run_metadata)\n else:\n retvals = self._sess.run(fetches,\n feed_dict=feed_dict,\n options=decorated_run_options,\n run_metadata=run_metadata)\n return retvals, OnRunEndRequest(\n run_start_resp.action,\n run_metadata=run_metadata,\n client_graph_def=self._sess.graph.as_graph_def())\n\n def _is_disabled_thread(self):\n thread_name = threading.current_thread().name or \"\"\n return (self._thread_name_filter_pattern and\n not self._thread_name_filter_pattern.match(thread_name))\n\n def run_step_fn(self, step_fn):\n return step_fn(\n monitored_session.MonitoredSession.StepContext(self._sess, self.run))\n\n def partial_run_setup(self, fetches, feeds=None):\n \"\"\"Sets up the feeds and fetches for partial runs in the session.\"\"\"\n raise NotImplementedError(\n \"partial_run_setup is not implemented for debug-wrapper sessions.\")\n\n def partial_run(self, handle, fetches, feed_dict=None):\n raise NotImplementedError(\n \"partial_run is not implemented for debug-wrapper sessions.\")\n\n def list_devices(self, *args, **kwargs):\n return self._sess.list_devices(*args, **kwargs)\n\n def reset(self, *args, **kwargs):\n return self._sess.reset(*args, **kwargs)\n\n def make_callable(self,\n fetches,\n feed_list=None,\n accept_options=False):\n runner = self._sess.make_callable(\n fetches, feed_list=feed_list, accept_options=True)\n def wrapped_runner(*runner_args, **kwargs):\n return self.run(None,\n feed_dict=None,\n options=kwargs.get(\"options\", None),\n run_metadata=kwargs.get(\"run_metadata\", None),\n callable_runner=runner,\n callable_runner_args=runner_args)\n return wrapped_runner\n\n def _make_callable_from_options(self, callable_options):\n def wrapped_runner(*feed_values, **kwargs):\n return self.run(None,\n run_metadata=kwargs.get(\"run_metadata\", None),\n callable_options=callable_options,\n callable_runner_args=feed_values)\n return wrapped_runner\n\n @property\n def run_call_count(self):\n return self._run_call_count\n\n def increment_run_call_count(self):\n self._run_call_count += 1\n\n def _is_disk_usage_reset_each_run(self):\n \"\"\"Indicates whether disk usage is reset after each Session.run.\n\n Subclasses that clean up the disk usage after every run should\n override this protected method.\n\n Returns:\n (`bool`) Whether the disk usage amount is reset to zero after\n each Session.run.\n \"\"\"\n return False\n\n def _decorate_run_options_for_debug(\n self,\n run_options,\n debug_urls,\n debug_ops=\"DebugIdentity\",\n node_name_regex_allowlist=None,\n op_type_regex_allowlist=None,\n tensor_dtype_regex_allowlist=None,\n tolerate_debug_op_creation_failures=False):\n \"\"\"Modify a RunOptions object for debug tensor watching.\n\n Specifies request for outputting partition graphs. Adds\n debug_tensor_watch_opts with proper debug URLs.\n\n Args:\n run_options: (RunOptions) the modified RunOptions object.\n debug_urls: (list of str) debug URLs to be entered in run_options.\n debug_tensor_watch_opts.\n debug_ops: (str or list of str) debug op(s) to be used by the debugger.\n node_name_regex_allowlist: Regular-expression allowlist for node\n name.\n op_type_regex_allowlist: Regular-expression allowlist for op type.\n tensor_dtype_regex_allowlist: Regular-expression allowlist for tensor\n dtype.\n tolerate_debug_op_creation_failures: Whether debug op creation failures\n are to be tolerated.\n \"\"\"\n\n run_options.output_partition_graphs = True\n debug_utils.watch_graph(\n run_options,\n self._sess.graph,\n debug_urls=debug_urls,\n debug_ops=debug_ops,\n node_name_regex_allowlist=node_name_regex_allowlist,\n op_type_regex_allowlist=op_type_regex_allowlist,\n tensor_dtype_regex_allowlist=tensor_dtype_regex_allowlist,\n tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,\n reset_disk_byte_usage=(self._run_call_count == 1 or\n self._is_disk_usage_reset_each_run()))\n\n def _decorate_run_options_for_profile(self, run_options):\n \"\"\"Modify a RunOptions object for profiling TensorFlow graph execution.\n\n Args:\n run_options: (RunOptions) the modified RunOptions object.\n \"\"\"\n\n run_options.trace_level = config_pb2.RunOptions.FULL_TRACE\n\n @abc.abstractmethod\n def on_session_init(self, request):\n \"\"\"Callback invoked during construction of the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens right before the constructor ends.\n\n Args:\n request: (`OnSessionInitRequest`) callback request carrying information\n such as the session being wrapped.\n\n Returns:\n An instance of `OnSessionInitResponse`.\n \"\"\"\n\n @abc.abstractmethod\n def on_run_start(self, request):\n \"\"\"Callback invoked on run() calls to the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens after the wrapper's run() call is entered,\n after an increment of run call counter.\n\n Args:\n request: (`OnRunStartRequest`) callback request object carrying\n information about the run call such as the fetches, feed dict, run\n options, run metadata, and how many `run()` calls to this wrapper\n session have occurred.\n\n Returns:\n An instance of `OnRunStartResponse`, carrying information to\n debug URLs used to watch the tensors.\n \"\"\"\n\n @abc.abstractmethod\n def on_run_end(self, request):\n \"\"\"Callback invoked on run() calls to the debug-wrapper session.\n\n This is a blocking callback.\n The invocation happens right before the wrapper exits its run() call.\n\n Args:\n request: (`OnRunEndRequest`) callback request object carrying information\n such as the actual action performed by the session wrapper for the\n run() call.\n\n Returns:\n An instance of `OnRunStartResponse`.\n \"\"\"\n\n def as_default(self):\n return stack.default_session(self)\n\n def __enter__(self):\n if self._default_session_context_manager is None:\n self._default_session_context_manager = self.as_default()\n return self._default_session_context_manager.__enter__()\n\n def __exit__(self, exec_type, exec_value, exec_tb):\n self._default_session_context_manager.__exit__(\n exec_type, exec_value, exec_tb)\n\n def __del__(self):\n if hasattr(self._sess, \"__del__\"):\n self._sess.__del__()\n\n def close(self):\n self._sess.close()\n\n # TODO(cais): Add _node_name_regex_allowlist and\n # _node_op_type_regex_allowlist.\n\n def should_stop(self):\n if hasattr(self._sess, \"should_stop\"):\n return self._sess.should_stop()\n else:\n raise ValueError(\n \"The wrapped session %r does not have a method called 'should_stop'. \"\n \"Do you intend to wrap a tf.MonitoredSession instead?\" % self._sess)\n\n\nclass WatchOptions:\n \"\"\"Type for return values of watch_fn.\"\"\"\n\n def __init__(self,\n debug_ops=None,\n node_name_regex_allowlist=None,\n op_type_regex_allowlist=None,\n tensor_dtype_regex_allowlist=None,\n tolerate_debug_op_creation_failures=False):\n \"\"\"Constructor of WatchOptions: Debug watch options.\n\n Used as return values of `watch_fn`s.\n\n Args:\n debug_ops: (`str` or `list of str`) Debug ops to be used.\n node_name_regex_allowlist: Regular-expression allowlist for node_name,\n e.g., `\"(weight_[0-9]+|bias_.*)\"`\n op_type_regex_allowlist: Regular-expression allowlist for the op type of\n nodes, e.g., `\"(Variable|Add)\"`.\n If both `node_name_regex_allowlist` and `op_type_regex_allowlist`\n are set, the two filtering operations will occur in a logical `AND`\n relation. In other words, a node will be included if and only if it\n hits both allowlists.\n tensor_dtype_regex_allowlist: Regular-expression allowlist for Tensor\n data type, e.g., `\"^int.*\"`.\n This allowlist operates in logical `AND` relations to the two allowlists\n above.\n tolerate_debug_op_creation_failures: (`bool`) whether debug op creation\n failures (e.g., due to dtype incompatibility) are to be tolerated by not\n throwing exceptions.\n \"\"\"\n if debug_ops:\n self.debug_ops = debug_ops\n else:\n self.debug_ops = [\"DebugIdentity\"]\n self.node_name_regex_allowlist = node_name_regex_allowlist\n self.op_type_regex_allowlist = op_type_regex_allowlist\n self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist\n self.tolerate_debug_op_creation_failures = (\n tolerate_debug_op_creation_failures)\n\n def __repr__(self):\n return (\"WatchOptions(debug_ops=%r, node_name_regex_allowlist=%r, \"\n \"op_type_regex_allowlist=%r, tensor_dtype_regex_allowlist=%r, \"\n \"tolerate_debug_op_creation_failures=%r)\" %\n (self.debug_ops, self.node_name_regex_allowlist,\n self.op_type_regex_allowlist, self.tensor_dtype_regex_allowlist,\n self.tolerate_debug_op_creation_failures))\n\n\nclass NonInteractiveDebugWrapperSession(BaseDebugWrapperSession):\n \"\"\"Base class for non-interactive (i.e., non-CLI) debug wrapper sessions.\"\"\"\n\n def __init__(self, sess, watch_fn=None, thread_name_filter=None,\n pass_through_operrors=False):\n \"\"\"Constructor of NonInteractiveDebugWrapperSession.\n\n Args:\n sess: The TensorFlow `Session` object being wrapped.\n watch_fn: (`Callable`) A Callable that maps the fetches and feeds of a\n debugged `Session.run()` call to `WatchOptions.`\n * Args:\n * `fetches`: the fetches to the `Session.run()` call.\n * `feeds`: the feeds to the `Session.run()` call.\n\n * Returns:\n (`tf_debug.WatchOptions`) An object containing debug options including\n the debug ops to use, the node names, op types and/or tensor data\n types to watch, etc. See the documentation of `tf_debug.WatchOptions`\n for more details.\n thread_name_filter: Regular-expression white list for threads on which the\n wrapper session will be active. See doc of `BaseDebugWrapperSession` for\n more details.\n pass_through_operrors: If true, all captured OpErrors will be\n propagated. By default this captures all OpErrors.\n Raises:\n TypeError: If a non-None `watch_fn` is specified and it is not callable.\n \"\"\"\n\n BaseDebugWrapperSession.__init__(\n self, sess, thread_name_filter=thread_name_filter,\n pass_through_operrors=pass_through_operrors)\n\n self._watch_fn = None\n if watch_fn is not None:\n if not callable(watch_fn):\n raise TypeError(\"watch_fn is not callable\")\n self._watch_fn = watch_fn\n\n def on_session_init(self, request):\n \"\"\"See doc of BaseDebugWrapperSession.on_run_start.\"\"\"\n\n return OnSessionInitResponse(OnSessionInitAction.PROCEED)\n\n @abc.abstractmethod\n def prepare_run_debug_urls(self, fetches, feed_dict):\n \"\"\"Abstract method to be implemented by concrete subclasses.\n\n This method prepares the run-specific debug URL(s).\n\n Args:\n fetches: Same as the `fetches` argument to `Session.run()`\n feed_dict: Same as the `feed_dict` argument to `Session.run()`\n\n Returns:\n debug_urls: (`str` or `list` of `str`) Debug URLs to be used in\n this `Session.run()` call.\n \"\"\"\n\n def on_run_start(self, request):\n \"\"\"See doc of BaseDebugWrapperSession.on_run_start.\"\"\"\n\n debug_urls, watch_opts = self._prepare_run_watch_config(\n request.fetches, request.feed_dict)\n\n return OnRunStartResponse(\n OnRunStartAction.DEBUG_RUN,\n debug_urls,\n debug_ops=watch_opts.debug_ops,\n node_name_regex_allowlist=watch_opts.node_name_regex_allowlist,\n op_type_regex_allowlist=watch_opts.op_type_regex_allowlist,\n tensor_dtype_regex_allowlist=watch_opts.tensor_dtype_regex_allowlist,\n tolerate_debug_op_creation_failures=(\n watch_opts.tolerate_debug_op_creation_failures))\n\n def _prepare_run_watch_config(self, fetches, feed_dict):\n \"\"\"Get the debug_urls, and node/op allowlists for the current run() call.\n\n Args:\n fetches: Same as the `fetches` argument to `Session.run()`.\n feed_dict: Same as the `feed_dict argument` to `Session.run()`.\n\n Returns:\n debug_urls: (str or list of str) Debug URLs for the current run() call.\n Currently, the list consists of only one URL that is a file:// URL.\n watch_options: (WatchOptions) The return value of a watch_fn, containing\n options including debug_ops, and allowlists.\n \"\"\"\n\n debug_urls = self.prepare_run_debug_urls(fetches, feed_dict)\n if self._watch_fn is None:\n watch_options = WatchOptions()\n else:\n watch_options = self._watch_fn(fetches, feed_dict)\n if isinstance(watch_options, tuple):\n # For legacy return type (tuples).\n watch_options = WatchOptions(*watch_options)\n\n return debug_urls, watch_options\n\n def on_run_end(self, request):\n \"\"\"See doc of BaseDebugWrapperSession.on_run_end.\"\"\"\n\n return OnRunEndResponse()\n", "output": ["_check_type", "OnRunEndRequest", "BaseDebugWrapperSession", "OnSessionInitRequest", "WatchOptions", "OnSessionInitAction", "OnSessionInitResponse", "NonInteractiveDebugWrapperSession", "OnRunEndResponse", "OnRunStartRequest", "OnRunStartResponse", "OnRunStartAction"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/debug/wrappers/framework.py", "file_length": 10715, "symbol_dict": [{"symbol": "_check_type", "type": "mannual_defined_function", "byte_location": 4894, "location": 1254}, {"symbol": "OnRunStartRequest", "type": "mannual_defined_class", "byte_location": 6684, "location": 1772}, {"symbol": "OnSessionInitResponse", "type": "mannual_defined_class", "byte_location": 6399, "location": 1687}, {"symbol": "OnRunStartAction", "type": "mannual_defined_class", "byte_location": 7867, "location": 2112}, {"symbol": "BaseDebugWrapperSession", "type": "mannual_defined_class", "byte_location": 11141, "location": 3088}, {"symbol": "OnRunStartResponse", "type": "mannual_defined_class", "byte_location": 8174, "location": 2220}, {"symbol": "OnRunEndRequest", "type": "mannual_defined_class", "byte_location": 9882, "location": 2722}, {"symbol": "OnSessionInitAction", "type": "mannual_defined_class", "byte_location": 5719, "location": 1509}, {"symbol": "WatchOptions", "type": "mannual_defined_class", "byte_location": 31079, "location": 8830}, {"symbol": "OnRunEndResponse", "type": "mannual_defined_class", "byte_location": 11000, "location": 3044}, {"symbol": "OnSessionInitRequest", "type": "mannual_defined_class", "byte_location": 5342, "location": 1396}, {"symbol": "NonInteractiveDebugWrapperSession", "type": "mannual_defined_class", "byte_location": 33283, "location": 9517}]}} {"input": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Building Blocks of the TensorFlow Debugger CLI.\"\"\"\nimport os\nimport stat\nimport tempfile\n\nimport numpy as np\n\nfrom tensorflow.python.client import pywrap_tf_session\nfrom tensorflow.python.debug.cli import debugger_cli_common\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import googletest\n\n\nclass CommandLineExitTest(test_util.TensorFlowTestCase):\n\n def testConstructionWithoutToken(self):\n exit_exc = debugger_cli_common.CommandLineExit()\n\n self.assertTrue(isinstance(exit_exc, Exception))\n\n def testConstructionWithToken(self):\n exit_exc = debugger_cli_common.CommandLineExit(exit_token={\"foo\": \"bar\"})\n\n self.assertTrue(isinstance(exit_exc, Exception))\n self.assertEqual({\"foo\": \"bar\"}, exit_exc.exit_token)\n\n\nclass RichTextLinesTest(test_util.TensorFlowTestCase):\n\n def testRichTextLinesConstructorComplete(self):\n # Test RichTextLines constructor.\n screen_output = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"],\n font_attr_segs={0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")]},\n annotations={0: \"longer wavelength\",\n 1: \"shorter wavelength\"})\n\n self.assertEqual(2, len(screen_output.lines))\n self.assertEqual(2, len(screen_output.font_attr_segs))\n self.assertEqual(1, len(screen_output.font_attr_segs[0]))\n self.assertEqual(1, len(screen_output.font_attr_segs[1]))\n self.assertEqual(2, len(screen_output.annotations))\n\n self.assertEqual(2, screen_output.num_lines())\n\n def testRichTextLinesConstructorWithInvalidType(self):\n with self.assertRaisesRegex(ValueError, \"Unexpected type in lines\"):\n debugger_cli_common.RichTextLines(123)\n\n def testRichTextLinesConstructorWithString(self):\n # Test constructing a RichTextLines object with a string, instead of a list\n # of strings.\n screen_output = debugger_cli_common.RichTextLines(\n \"Roses are red\",\n font_attr_segs={0: [(0, 5, \"red\")]},\n annotations={0: \"longer wavelength\"})\n\n self.assertEqual(1, len(screen_output.lines))\n self.assertEqual(1, len(screen_output.font_attr_segs))\n self.assertEqual(1, len(screen_output.font_attr_segs[0]))\n self.assertEqual(1, len(screen_output.annotations))\n\n def testRichLinesAppendRichLine(self):\n rtl = debugger_cli_common.RichTextLines(\n \"Roses are red\",\n font_attr_segs={0: [(0, 5, \"red\")]})\n rtl.append_rich_line(debugger_cli_common.RichLine(\"Violets are \") +\n debugger_cli_common.RichLine(\"blue\", \"blue\"))\n self.assertEqual(2, len(rtl.lines))\n self.assertEqual(2, len(rtl.font_attr_segs))\n self.assertEqual(1, len(rtl.font_attr_segs[0]))\n self.assertEqual(1, len(rtl.font_attr_segs[1]))\n\n def testRichLineLenMethodWorks(self):\n self.assertEqual(0, len(debugger_cli_common.RichLine()))\n self.assertEqual(0, len(debugger_cli_common.RichLine(\"\")))\n self.assertEqual(1, len(debugger_cli_common.RichLine(\"x\")))\n self.assertEqual(6, len(debugger_cli_common.RichLine(\"x y z \", \"blue\")))\n\n def testRichTextLinesConstructorIncomplete(self):\n # Test RichTextLines constructor, with incomplete keyword arguments.\n screen_output = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"],\n font_attr_segs={0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")]})\n\n self.assertEqual(2, len(screen_output.lines))\n self.assertEqual(2, len(screen_output.font_attr_segs))\n self.assertEqual(1, len(screen_output.font_attr_segs[0]))\n self.assertEqual(1, len(screen_output.font_attr_segs[1]))\n self.assertEqual({}, screen_output.annotations)\n\n def testModifyRichTextLinesObject(self):\n screen_output = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"])\n\n self.assertEqual(2, len(screen_output.lines))\n\n screen_output.lines.append(\"Sugar is sweet\")\n self.assertEqual(3, len(screen_output.lines))\n\n def testMergeRichTextLines(self):\n screen_output_1 = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"],\n font_attr_segs={0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")]},\n annotations={0: \"longer wavelength\",\n 1: \"shorter wavelength\"})\n screen_output_2 = debugger_cli_common.RichTextLines(\n [\"Lilies are white\", \"Sunflowers are yellow\"],\n font_attr_segs={0: [(0, 6, \"white\")],\n 1: [(0, 7, \"yellow\")]},\n annotations={\n \"metadata\": \"foo\",\n 0: \"full spectrum\",\n 1: \"medium wavelength\"\n })\n\n screen_output_1.extend(screen_output_2)\n\n self.assertEqual(4, screen_output_1.num_lines())\n self.assertEqual([\n \"Roses are red\", \"Violets are blue\", \"Lilies are white\",\n \"Sunflowers are yellow\"\n ], screen_output_1.lines)\n self.assertEqual({\n 0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")],\n 2: [(0, 6, \"white\")],\n 3: [(0, 7, \"yellow\")]\n }, screen_output_1.font_attr_segs)\n self.assertEqual({\n 0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")],\n 2: [(0, 6, \"white\")],\n 3: [(0, 7, \"yellow\")]\n }, screen_output_1.font_attr_segs)\n self.assertEqual({\n \"metadata\": \"foo\",\n 0: \"longer wavelength\",\n 1: \"shorter wavelength\",\n 2: \"full spectrum\",\n 3: \"medium wavelength\"\n }, screen_output_1.annotations)\n\n def testMergeRichTextLinesEmptyOther(self):\n screen_output_1 = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"],\n font_attr_segs={0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")]},\n annotations={0: \"longer wavelength\",\n 1: \"shorter wavelength\"})\n screen_output_2 = debugger_cli_common.RichTextLines([])\n\n screen_output_1.extend(screen_output_2)\n\n self.assertEqual(2, screen_output_1.num_lines())\n self.assertEqual([\"Roses are red\", \"Violets are blue\"],\n screen_output_1.lines)\n self.assertEqual({\n 0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")],\n }, screen_output_1.font_attr_segs)\n self.assertEqual({\n 0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")],\n }, screen_output_1.font_attr_segs)\n self.assertEqual({\n 0: \"longer wavelength\",\n 1: \"shorter wavelength\",\n }, screen_output_1.annotations)\n\n def testMergeRichTextLinesEmptySelf(self):\n screen_output_1 = debugger_cli_common.RichTextLines([])\n screen_output_2 = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"],\n font_attr_segs={0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")]},\n annotations={0: \"longer wavelength\",\n 1: \"shorter wavelength\"})\n\n screen_output_1.extend(screen_output_2)\n\n self.assertEqual(2, screen_output_1.num_lines())\n self.assertEqual([\"Roses are red\", \"Violets are blue\"],\n screen_output_1.lines)\n self.assertEqual({\n 0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")],\n }, screen_output_1.font_attr_segs)\n self.assertEqual({\n 0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")],\n }, screen_output_1.font_attr_segs)\n self.assertEqual({\n 0: \"longer wavelength\",\n 1: \"shorter wavelength\",\n }, screen_output_1.annotations)\n\n def testAppendALineWithAttributeSegmentsWorks(self):\n screen_output_1 = debugger_cli_common.RichTextLines(\n [\"Roses are red\"],\n font_attr_segs={0: [(0, 5, \"red\")]},\n annotations={0: \"longer wavelength\"})\n\n screen_output_1.append(\"Violets are blue\", [(0, 7, \"blue\")])\n\n self.assertEqual([\"Roses are red\", \"Violets are blue\"],\n screen_output_1.lines)\n self.assertEqual({\n 0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")],\n }, screen_output_1.font_attr_segs)\n\n def testPrependALineWithAttributeSegmentsWorks(self):\n screen_output_1 = debugger_cli_common.RichTextLines(\n [\"Roses are red\"],\n font_attr_segs={0: [(0, 5, \"red\")]},\n annotations={0: \"longer wavelength\"})\n\n screen_output_1.prepend(\"Violets are blue\", font_attr_segs=[(0, 7, \"blue\")])\n\n self.assertEqual([\"Violets are blue\", \"Roses are red\"],\n screen_output_1.lines)\n self.assertEqual({\n 0: [(0, 7, \"blue\")],\n 1: [(0, 5, \"red\")],\n }, screen_output_1.font_attr_segs)\n\n def testWriteToFileSucceeds(self):\n screen_output = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"],\n font_attr_segs={0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")]})\n\n fd, file_path = tempfile.mkstemp()\n os.close(fd) # file opened exclusively, so we need to close this\n # a better fix would be to make the API take a fd\n screen_output.write_to_file(file_path)\n\n with gfile.Open(file_path, \"r\") as f:\n self.assertEqual(\"Roses are red\\nViolets are blue\\n\", f.read())\n\n # Clean up.\n gfile.Remove(file_path)\n\n def testAttemptToWriteToADirectoryFails(self):\n screen_output = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"],\n font_attr_segs={0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")]})\n\n with self.assertRaises(Exception):\n screen_output.write_to_file(\"/\")\n\n def testAttemptToWriteToFileInNonexistentDirectoryFails(self):\n screen_output = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"],\n font_attr_segs={0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")]})\n\n file_path = os.path.join(tempfile.mkdtemp(), \"foo\", \"bar.txt\")\n with self.assertRaises(Exception):\n screen_output.write_to_file(file_path)\n\n\nclass CommandHandlerRegistryTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._intentional_error_msg = \"Intentionally raised exception\"\n\n def _noop_handler(self, argv, screen_info=None):\n # A handler that does nothing other than returning \"Done.\"\n return debugger_cli_common.RichTextLines([\"Done.\"])\n\n def _handler_raising_exception(self, argv, screen_info=None):\n # A handler that intentionally raises an exception.\n raise RuntimeError(self._intentional_error_msg)\n\n def _handler_returning_wrong_type(self, argv, screen_info=None):\n # A handler that returns a wrong type, instead of the correct type\n # (RichTextLines).\n return \"Hello\"\n\n def _echo_screen_cols(self, argv, screen_info=None):\n # A handler that uses screen_info.\n return debugger_cli_common.RichTextLines(\n [\"cols = %d\" % screen_info[\"cols\"]])\n\n def _exiting_handler(self, argv, screen_info=None):\n \"\"\"A handler that exits with an exit token.\"\"\"\n\n if argv:\n exit_token = argv[0]\n else:\n exit_token = None\n\n raise debugger_cli_common.CommandLineExit(exit_token=exit_token)\n\n def testRegisterEmptyCommandPrefix(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n\n # Attempt to register an empty-string as a command prefix should trigger\n # an exception.\n with self.assertRaisesRegex(ValueError, \"Empty command prefix\"):\n registry.register_command_handler(\"\", self._noop_handler, \"\")\n\n def testRegisterAndInvokeHandler(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\"noop\", self._noop_handler, \"\")\n\n self.assertTrue(registry.is_registered(\"noop\"))\n self.assertFalse(registry.is_registered(\"beep\"))\n\n cmd_output = registry.dispatch_command(\"noop\", [])\n self.assertEqual([\"Done.\"], cmd_output.lines)\n\n # Attempt to invoke an unregistered command prefix should trigger an\n # exception.\n with self.assertRaisesRegex(ValueError, \"No handler is registered\"):\n registry.dispatch_command(\"beep\", [])\n\n # Empty command prefix should trigger an exception.\n with self.assertRaisesRegex(ValueError, \"Prefix is empty\"):\n registry.dispatch_command(\"\", [])\n\n def testExitingHandler(self):\n \"\"\"Test that exit exception is correctly raised.\"\"\"\n\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\"exit\", self._exiting_handler, \"\")\n\n self.assertTrue(registry.is_registered(\"exit\"))\n\n exit_token = None\n try:\n registry.dispatch_command(\"exit\", [\"foo\"])\n except debugger_cli_common.CommandLineExit as e:\n exit_token = e.exit_token\n\n self.assertEqual(\"foo\", exit_token)\n\n def testInvokeHandlerWithScreenInfo(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n\n # Register and invoke a command handler that uses screen_info.\n registry.register_command_handler(\"cols\", self._echo_screen_cols, \"\")\n\n cmd_output = registry.dispatch_command(\n \"cols\", [], screen_info={\"cols\": 100})\n self.assertEqual([\"cols = 100\"], cmd_output.lines)\n\n def testRegisterAndInvokeHandlerWithAliases(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\n \"noop\", self._noop_handler, \"\", prefix_aliases=[\"n\", \"NOOP\"])\n\n # is_registered() should work for full prefix and aliases.\n self.assertTrue(registry.is_registered(\"noop\"))\n self.assertTrue(registry.is_registered(\"n\"))\n self.assertTrue(registry.is_registered(\"NOOP\"))\n\n cmd_output = registry.dispatch_command(\"n\", [])\n self.assertEqual([\"Done.\"], cmd_output.lines)\n\n cmd_output = registry.dispatch_command(\"NOOP\", [])\n self.assertEqual([\"Done.\"], cmd_output.lines)\n\n def testHandlerWithWrongReturnType(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\"wrong_return\",\n self._handler_returning_wrong_type, \"\")\n\n # If the command handler fails to return a RichTextLines instance, an error\n # should be triggered.\n with self.assertRaisesRegex(\n ValueError,\n \"Return value from command handler.*is not None or a RichTextLines \"\n \"instance\"):\n registry.dispatch_command(\"wrong_return\", [])\n\n def testRegisterDuplicateHandlers(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\"noop\", self._noop_handler, \"\")\n\n # Registering the same command prefix more than once should trigger an\n # exception.\n with self.assertRaisesRegex(\n ValueError, \"A handler is already registered for command prefix\"):\n registry.register_command_handler(\"noop\", self._noop_handler, \"\")\n\n cmd_output = registry.dispatch_command(\"noop\", [])\n self.assertEqual([\"Done.\"], cmd_output.lines)\n\n def testRegisterDuplicateAliases(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\n \"noop\", self._noop_handler, \"\", prefix_aliases=[\"n\"])\n\n # Clash with existing alias.\n with self.assertRaisesRegex(ValueError,\n \"clashes with existing prefixes or aliases\"):\n registry.register_command_handler(\n \"cols\", self._echo_screen_cols, \"\", prefix_aliases=[\"n\"])\n\n # The name clash should have prevent the handler from being registered.\n self.assertFalse(registry.is_registered(\"cols\"))\n\n # Aliases can also clash with command prefixes.\n with self.assertRaisesRegex(ValueError,\n \"clashes with existing prefixes or aliases\"):\n registry.register_command_handler(\n \"cols\", self._echo_screen_cols, \"\", prefix_aliases=[\"noop\"])\n\n self.assertFalse(registry.is_registered(\"cols\"))\n\n def testDispatchHandlerRaisingException(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\"raise_exception\",\n self._handler_raising_exception, \"\")\n\n # The registry should catch and wrap exceptions that occur during command\n # handling.\n cmd_output = registry.dispatch_command(\"raise_exception\", [])\n # The error output contains a stack trace.\n # So the line count should be >= 2.\n self.assertGreater(len(cmd_output.lines), 2)\n self.assertTrue(cmd_output.lines[0].startswith(\n \"Error occurred during handling of command\"))\n self.assertTrue(cmd_output.lines[1].endswith(self._intentional_error_msg))\n\n def testRegisterNonCallableHandler(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n\n # Attempt to register a non-callable handler should fail.\n with self.assertRaisesRegex(ValueError, \"handler is not callable\"):\n registry.register_command_handler(\"non_callable\", 1, \"\")\n\n def testRegisterHandlerWithInvalidHelpInfoType(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n\n with self.assertRaisesRegex(ValueError, \"help_info is not a str\"):\n registry.register_command_handler(\"noop\", self._noop_handler, [\"foo\"])\n\n def testGetHelpFull(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\n \"noop\",\n self._noop_handler,\n \"No operation.\\nI.e., do nothing.\",\n prefix_aliases=[\"n\", \"NOOP\"])\n registry.register_command_handler(\n \"cols\",\n self._echo_screen_cols,\n \"Show screen width in number of columns.\",\n prefix_aliases=[\"c\"])\n\n help_lines = registry.get_help().lines\n\n # The help info should list commands in alphabetically sorted order,\n # regardless of order in which the commands are registered.\n self.assertEqual(\"cols\", help_lines[0])\n self.assertTrue(help_lines[1].endswith(\"Aliases: c\"))\n self.assertFalse(help_lines[2])\n self.assertTrue(help_lines[3].endswith(\n \"Show screen width in number of columns.\"))\n\n self.assertFalse(help_lines[4])\n self.assertFalse(help_lines[5])\n\n # The default help command should appear in the help output.\n self.assertEqual(\"help\", help_lines[6])\n\n self.assertEqual(\"noop\", help_lines[12])\n self.assertTrue(help_lines[13].endswith(\"Aliases: n, NOOP\"))\n self.assertFalse(help_lines[14])\n self.assertTrue(help_lines[15].endswith(\"No operation.\"))\n self.assertTrue(help_lines[16].endswith(\"I.e., do nothing.\"))\n\n def testGetHelpSingleCommand(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\n \"noop\",\n self._noop_handler,\n \"No operation.\\nI.e., do nothing.\",\n prefix_aliases=[\"n\", \"NOOP\"])\n registry.register_command_handler(\n \"cols\",\n self._echo_screen_cols,\n \"Show screen width in number of columns.\",\n prefix_aliases=[\"c\"])\n\n # Get help info for one of the two commands, using full prefix.\n help_lines = registry.get_help(\"cols\").lines\n\n self.assertTrue(help_lines[0].endswith(\"cols\"))\n self.assertTrue(help_lines[1].endswith(\"Aliases: c\"))\n self.assertFalse(help_lines[2])\n self.assertTrue(help_lines[3].endswith(\n \"Show screen width in number of columns.\"))\n\n # Get help info for one of the two commands, using alias.\n help_lines = registry.get_help(\"c\").lines\n\n self.assertTrue(help_lines[0].endswith(\"cols\"))\n self.assertTrue(help_lines[1].endswith(\"Aliases: c\"))\n self.assertFalse(help_lines[2])\n self.assertTrue(help_lines[3].endswith(\n \"Show screen width in number of columns.\"))\n\n # Get help info for a nonexistent command.\n help_lines = registry.get_help(\"foo\").lines\n\n self.assertEqual(\"Invalid command prefix: \\\"foo\\\"\", help_lines[0])\n\n def testHelpCommandWithoutIntro(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\n \"noop\",\n self._noop_handler,\n \"No operation.\\nI.e., do nothing.\",\n prefix_aliases=[\"n\", \"NOOP\"])\n registry.register_command_handler(\n \"cols\",\n self._echo_screen_cols,\n \"Show screen width in number of columns.\",\n prefix_aliases=[\"c\"])\n\n # Get help for all commands.\n output = registry.dispatch_command(\"help\", [])\n self.assertEqual([\"cols\", \" Aliases: c\", \"\",\n \" Show screen width in number of columns.\", \"\", \"\",\n \"help\", \" Aliases: h\", \"\", \" Print this help message.\",\n \"\", \"\", \"noop\", \" Aliases: n, NOOP\", \"\",\n \" No operation.\", \" I.e., do nothing.\", \"\", \"\",\n \"version\", \" Aliases: ver\", \"\",\n \" Print the versions of TensorFlow and its key \"\n \"dependencies.\", \"\", \"\"],\n output.lines)\n\n # Get help for one specific command prefix.\n output = registry.dispatch_command(\"help\", [\"noop\"])\n self.assertEqual([\"noop\", \" Aliases: n, NOOP\", \"\", \" No operation.\",\n \" I.e., do nothing.\"], output.lines)\n\n # Get help for a nonexistent command prefix.\n output = registry.dispatch_command(\"help\", [\"foo\"])\n self.assertEqual([\"Invalid command prefix: \\\"foo\\\"\"], output.lines)\n\n def testHelpCommandWithIntro(self):\n registry = debugger_cli_common.CommandHandlerRegistry()\n registry.register_command_handler(\n \"noop\",\n self._noop_handler,\n \"No operation.\\nI.e., do nothing.\",\n prefix_aliases=[\"n\", \"NOOP\"])\n\n help_intro = debugger_cli_common.RichTextLines(\n [\"Introductory comments.\", \"\"])\n registry.set_help_intro(help_intro)\n\n output = registry.dispatch_command(\"help\", [])\n self.assertEqual(help_intro.lines + [\n \"help\", \" Aliases: h\", \"\", \" Print this help message.\", \"\", \"\",\n \"noop\", \" Aliases: n, NOOP\", \"\", \" No operation.\",\n \" I.e., do nothing.\", \"\", \"\",\n \"version\", \" Aliases: ver\", \"\",\n \" Print the versions of TensorFlow and its key dependencies.\", \"\", \"\"\n ], output.lines)\n\n\nclass RegexFindTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._orig_screen_output = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"])\n\n def testRegexFindWithoutExistingFontAttrSegs(self):\n new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,\n \"are\", \"yellow\")\n\n self.assertEqual(2, len(new_screen_output.font_attr_segs))\n self.assertEqual([(6, 9, \"yellow\")], new_screen_output.font_attr_segs[0])\n self.assertEqual([(8, 11, \"yellow\")], new_screen_output.font_attr_segs[1])\n\n # Check field in annotations carrying a list of matching line indices.\n self.assertEqual([0, 1], new_screen_output.annotations[\n debugger_cli_common.REGEX_MATCH_LINES_KEY])\n\n def testRegexFindWithExistingFontAttrSegs(self):\n # Add a font attribute segment first.\n self._orig_screen_output.font_attr_segs[0] = [(9, 12, \"red\")]\n self.assertEqual(1, len(self._orig_screen_output.font_attr_segs))\n\n new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,\n \"are\", \"yellow\")\n self.assertEqual(2, len(new_screen_output.font_attr_segs))\n\n self.assertEqual([(6, 9, \"yellow\"), (9, 12, \"red\")],\n new_screen_output.font_attr_segs[0])\n\n self.assertEqual([0, 1], new_screen_output.annotations[\n debugger_cli_common.REGEX_MATCH_LINES_KEY])\n\n def testRegexFindWithNoMatches(self):\n new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,\n \"infrared\", \"yellow\")\n\n self.assertEqual({}, new_screen_output.font_attr_segs)\n self.assertEqual([], new_screen_output.annotations[\n debugger_cli_common.REGEX_MATCH_LINES_KEY])\n\n def testInvalidRegex(self):\n with self.assertRaisesRegex(ValueError, \"Invalid regular expression\"):\n debugger_cli_common.regex_find(self._orig_screen_output, \"[\", \"yellow\")\n\n def testRegexFindOnPrependedLinesWorks(self):\n rich_lines = debugger_cli_common.RichTextLines([\"Violets are blue\"])\n rich_lines.prepend([\"Roses are red\"])\n searched_rich_lines = debugger_cli_common.regex_find(\n rich_lines, \"red\", \"bold\")\n self.assertEqual(\n {0: [(10, 13, \"bold\")]}, searched_rich_lines.font_attr_segs)\n\n rich_lines = debugger_cli_common.RichTextLines([\"Violets are blue\"])\n rich_lines.prepend([\"A poem\"], font_attr_segs=[(0, 1, \"underline\")])\n searched_rich_lines = debugger_cli_common.regex_find(\n rich_lines, \"poem\", \"italic\")\n self.assertEqual(\n {0: [(0, 1, \"underline\"), (2, 6, \"italic\")]},\n searched_rich_lines.font_attr_segs)\n\n\nclass WrapScreenOutputTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._orig_screen_output = debugger_cli_common.RichTextLines(\n [\"Folk song:\", \"Roses are red\", \"Violets are blue\"],\n font_attr_segs={1: [(0, 5, \"red\"), (6, 9, \"gray\"), (10, 12, \"red\"),\n (12, 13, \"crimson\")],\n 2: [(0, 7, \"blue\"), (8, 11, \"gray\"), (12, 14, \"blue\"),\n (14, 16, \"indigo\")]},\n annotations={1: \"longer wavelength\",\n 2: \"shorter wavelength\"})\n\n def testNoActualWrapping(self):\n # Large column limit should lead to no actual wrapping.\n out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(\n self._orig_screen_output, 100)\n\n self.assertEqual(self._orig_screen_output.lines, out.lines)\n self.assertEqual(self._orig_screen_output.font_attr_segs,\n out.font_attr_segs)\n self.assertEqual(self._orig_screen_output.annotations, out.annotations)\n self.assertEqual(new_line_indices, [0, 1, 2])\n\n def testWrappingWithAttrCutoff(self):\n out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(\n self._orig_screen_output, 11)\n\n # Add non-row-index field to out.\n out.annotations[\"metadata\"] = \"foo\"\n\n # Check wrapped text.\n self.assertEqual(5, len(out.lines))\n self.assertEqual(\"Folk song:\", out.lines[0])\n self.assertEqual(\"Roses are r\", out.lines[1])\n self.assertEqual(\"ed\", out.lines[2])\n self.assertEqual(\"Violets are\", out.lines[3])\n self.assertEqual(\" blue\", out.lines[4])\n\n # Check wrapped font_attr_segs.\n self.assertFalse(0 in out.font_attr_segs)\n self.assertEqual([(0, 5, \"red\"), (6, 9, \"gray\"), (10, 11, \"red\")],\n out.font_attr_segs[1])\n self.assertEqual([(0, 1, \"red\"), (1, 2, \"crimson\")], out.font_attr_segs[2])\n self.assertEqual([(0, 7, \"blue\"), (8, 11, \"gray\")], out.font_attr_segs[3])\n self.assertEqual([(1, 3, \"blue\"), (3, 5, \"indigo\")], out.font_attr_segs[4])\n\n # Check annotations.\n self.assertFalse(0 in out.annotations)\n self.assertEqual(\"longer wavelength\", out.annotations[1])\n self.assertFalse(2 in out.annotations)\n self.assertEqual(\"shorter wavelength\", out.annotations[3])\n self.assertFalse(4 in out.annotations)\n\n # Check that the non-row-index field is present in output.\n self.assertEqual(\"foo\", out.annotations[\"metadata\"])\n\n self.assertEqual(new_line_indices, [0, 1, 3])\n\n def testWrappingWithMultipleAttrCutoff(self):\n self._orig_screen_output = debugger_cli_common.RichTextLines(\n [\"Folk song:\", \"Roses are red\", \"Violets are blue\"],\n font_attr_segs={1: [(0, 12, \"red\")],\n 2: [(1, 16, \"blue\")]},\n annotations={1: \"longer wavelength\",\n 2: \"shorter wavelength\"})\n\n out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(\n self._orig_screen_output, 5)\n\n # Check wrapped text.\n self.assertEqual(9, len(out.lines))\n self.assertEqual(\"Folk \", out.lines[0])\n self.assertEqual(\"song:\", out.lines[1])\n self.assertEqual(\"Roses\", out.lines[2])\n self.assertEqual(\" are \", out.lines[3])\n self.assertEqual(\"red\", out.lines[4])\n self.assertEqual(\"Viole\", out.lines[5])\n self.assertEqual(\"ts ar\", out.lines[6])\n self.assertEqual(\"e blu\", out.lines[7])\n self.assertEqual(\"e\", out.lines[8])\n\n # Check wrapped font_attr_segs.\n self.assertFalse(0 in out.font_attr_segs)\n self.assertFalse(1 in out.font_attr_segs)\n self.assertEqual([(0, 5, \"red\")], out.font_attr_segs[2])\n self.assertEqual([(0, 5, \"red\")], out.font_attr_segs[3])\n self.assertEqual([(0, 2, \"red\")], out.font_attr_segs[4])\n self.assertEqual([(1, 5, \"blue\")], out.font_attr_segs[5])\n self.assertEqual([(0, 5, \"blue\")], out.font_attr_segs[6])\n self.assertEqual([(0, 5, \"blue\")], out.font_attr_segs[7])\n self.assertEqual([(0, 1, \"blue\")], out.font_attr_segs[8])\n\n # Check annotations.\n self.assertFalse(0 in out.annotations)\n self.assertFalse(1 in out.annotations)\n self.assertEqual(\"longer wavelength\", out.annotations[2])\n self.assertFalse(3 in out.annotations)\n self.assertFalse(4 in out.annotations)\n self.assertEqual(\"shorter wavelength\", out.annotations[5])\n self.assertFalse(6 in out.annotations)\n self.assertFalse(7 in out.annotations)\n self.assertFalse(8 in out.annotations)\n\n self.assertEqual(new_line_indices, [0, 2, 5])\n\n def testWrappingInvalidArguments(self):\n with self.assertRaisesRegex(ValueError,\n \"Invalid type of input screen_output\"):\n debugger_cli_common.wrap_rich_text_lines(\"foo\", 12)\n\n with self.assertRaisesRegex(ValueError, \"Invalid type of input cols\"):\n debugger_cli_common.wrap_rich_text_lines(\n debugger_cli_common.RichTextLines([\"foo\", \"bar\"]), \"12\")\n\n def testWrappingEmptyInput(self):\n out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(\n debugger_cli_common.RichTextLines([]), 10)\n\n self.assertEqual([], out.lines)\n self.assertEqual([], new_line_indices)\n\n\nclass SliceRichTextLinesTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._original = debugger_cli_common.RichTextLines(\n [\"Roses are red\", \"Violets are blue\"],\n font_attr_segs={0: [(0, 5, \"red\")],\n 1: [(0, 7, \"blue\")]},\n annotations={\n 0: \"longer wavelength\",\n 1: \"shorter wavelength\",\n \"foo_metadata\": \"bar\"\n })\n\n def testSliceBeginning(self):\n sliced = self._original.slice(0, 1)\n\n self.assertEqual([\"Roses are red\"], sliced.lines)\n self.assertEqual({0: [(0, 5, \"red\")]}, sliced.font_attr_segs)\n\n # Non-line-number metadata should be preserved.\n self.assertEqual({\n 0: \"longer wavelength\",\n \"foo_metadata\": \"bar\"\n }, sliced.annotations)\n\n self.assertEqual(1, sliced.num_lines())\n\n def testSliceEnd(self):\n sliced = self._original.slice(1, 2)\n\n self.assertEqual([\"Violets are blue\"], sliced.lines)\n\n # The line index should have changed from 1 to 0.\n self.assertEqual({0: [(0, 7, \"blue\")]}, sliced.font_attr_segs)\n self.assertEqual({\n 0: \"shorter wavelength\",\n \"foo_metadata\": \"bar\"\n }, sliced.annotations)\n\n self.assertEqual(1, sliced.num_lines())\n\n def testAttemptSliceWithNegativeIndex(self):\n with self.assertRaisesRegex(ValueError, \"Encountered negative index\"):\n self._original.slice(0, -1)\n\n\nclass TabCompletionRegistryTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._tc_reg = debugger_cli_common.TabCompletionRegistry()\n\n # Register the items in an unsorted order deliberately, to test the sorted\n # output from get_completions().\n self._tc_reg.register_tab_comp_context(\n [\"print_tensor\", \"pt\"],\n [\"node_b:1\", \"node_b:2\", \"node_a:1\", \"node_a:2\"])\n self._tc_reg.register_tab_comp_context([\"node_info\"],\n [\"node_c\", \"node_b\", \"node_a\"])\n\n def testTabCompletion(self):\n # The returned completions should have sorted order.\n self.assertEqual(\n ([\"node_a:1\", \"node_a:2\", \"node_b:1\", \"node_b:2\"], \"node_\"),\n self._tc_reg.get_completions(\"print_tensor\", \"node_\"))\n\n self.assertEqual(([\"node_a:1\", \"node_a:2\", \"node_b:1\", \"node_b:2\"],\n \"node_\"), self._tc_reg.get_completions(\"pt\", \"\"))\n\n self.assertEqual(([\"node_a:1\", \"node_a:2\"], \"node_a:\"),\n self._tc_reg.get_completions(\"print_tensor\", \"node_a\"))\n\n self.assertEqual(([\"node_a:1\"], \"node_a:1\"),\n self._tc_reg.get_completions(\"pt\", \"node_a:1\"))\n\n self.assertEqual(([], \"\"),\n self._tc_reg.get_completions(\"print_tensor\", \"node_a:3\"))\n\n self.assertEqual((None, None), self._tc_reg.get_completions(\"foo\", \"node_\"))\n\n def testExtendCompletionItems(self):\n self.assertEqual(\n ([\"node_a:1\", \"node_a:2\", \"node_b:1\", \"node_b:2\"], \"node_\"),\n self._tc_reg.get_completions(\"print_tensor\", \"node_\"))\n self.assertEqual(([\"node_a\", \"node_b\", \"node_c\"], \"node_\"),\n self._tc_reg.get_completions(\"node_info\", \"node_\"))\n\n self._tc_reg.extend_comp_items(\"print_tensor\", [\"node_A:1\", \"node_A:2\"])\n\n self.assertEqual(([\"node_A:1\", \"node_A:2\", \"node_a:1\", \"node_a:2\",\n \"node_b:1\", \"node_b:2\"], \"node_\"),\n self._tc_reg.get_completions(\"print_tensor\", \"node_\"))\n\n # Extending the completions for one of the context's context words should\n # have taken effect on other context words of the same context as well.\n self.assertEqual(([\"node_A:1\", \"node_A:2\", \"node_a:1\", \"node_a:2\",\n \"node_b:1\", \"node_b:2\"], \"node_\"),\n self._tc_reg.get_completions(\"pt\", \"node_\"))\n self.assertEqual(([\"node_a\", \"node_b\", \"node_c\"], \"node_\"),\n self._tc_reg.get_completions(\"node_info\", \"node_\"))\n\n def testExtendCompletionItemsNonexistentContext(self):\n with self.assertRaisesRegex(KeyError,\n \"Context word \\\"foo\\\" has not been registered\"):\n self._tc_reg.extend_comp_items(\"foo\", [\"node_A:1\", \"node_A:2\"])\n\n def testRemoveCompletionItems(self):\n self.assertEqual(\n ([\"node_a:1\", \"node_a:2\", \"node_b:1\", \"node_b:2\"], \"node_\"),\n self._tc_reg.get_completions(\"print_tensor\", \"node_\"))\n self.assertEqual(([\"node_a\", \"node_b\", \"node_c\"], \"node_\"),\n self._tc_reg.get_completions(\"node_info\", \"node_\"))\n\n self._tc_reg.remove_comp_items(\"pt\", [\"node_a:1\", \"node_a:2\"])\n\n self.assertEqual(([\"node_b:1\", \"node_b:2\"], \"node_b:\"),\n self._tc_reg.get_completions(\"print_tensor\", \"node_\"))\n self.assertEqual(([\"node_a\", \"node_b\", \"node_c\"], \"node_\"),\n self._tc_reg.get_completions(\"node_info\", \"node_\"))\n\n def testRemoveCompletionItemsNonexistentContext(self):\n with self.assertRaisesRegex(KeyError,\n \"Context word \\\"foo\\\" has not been registered\"):\n self._tc_reg.remove_comp_items(\"foo\", [\"node_a:1\", \"node_a:2\"])\n\n def testDeregisterContext(self):\n self.assertEqual(\n ([\"node_a:1\", \"node_a:2\", \"node_b:1\", \"node_b:2\"], \"node_\"),\n self._tc_reg.get_completions(\"print_tensor\", \"node_\"))\n self.assertEqual(([\"node_a\", \"node_b\", \"node_c\"], \"node_\"),\n self._tc_reg.get_completions(\"node_info\", \"node_\"))\n\n self._tc_reg.deregister_context([\"print_tensor\"])\n\n self.assertEqual((None, None),\n self._tc_reg.get_completions(\"print_tensor\", \"node_\"))\n\n # The alternative context word should be unaffected.\n self.assertEqual(\n ([\"node_a:1\", \"node_a:2\", \"node_b:1\", \"node_b:2\"], \"node_\"),\n self._tc_reg.get_completions(\"pt\", \"node_\"))\n\n def testDeregisterNonexistentContext(self):\n self.assertEqual(\n ([\"node_a:1\", \"node_a:2\", \"node_b:1\", \"node_b:2\"], \"node_\"),\n self._tc_reg.get_completions(\"print_tensor\", \"node_\"))\n self.assertEqual(([\"node_a\", \"node_b\", \"node_c\"], \"node_\"),\n self._tc_reg.get_completions(\"node_info\", \"node_\"))\n\n self._tc_reg.deregister_context([\"print_tensor\"])\n\n with self.assertRaisesRegex(\n KeyError,\n \"Cannot deregister unregistered context word \\\"print_tensor\\\"\"):\n self._tc_reg.deregister_context([\"print_tensor\"])\n\n\nclass CommandHistoryTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._fd, self._history_file_path = tempfile.mkstemp()\n self._cmd_hist = debugger_cli_common.CommandHistory(\n limit=3, history_file_path=self._history_file_path)\n\n def tearDown(self):\n if os.path.isfile(self._history_file_path):\n os.close(self._fd)\n os.remove(self._history_file_path)\n\n def _restoreFileReadWritePermissions(self, file_path):\n os.chmod(file_path,\n (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR |\n stat.S_IWGRP | stat.S_IWOTH))\n\n def testLookUpMostRecent(self):\n self.assertEqual([], self._cmd_hist.most_recent_n(3))\n\n self._cmd_hist.add_command(\"list_tensors\")\n self._cmd_hist.add_command(\"node_info node_a\")\n\n self.assertEqual([\"node_info node_a\"], self._cmd_hist.most_recent_n(1))\n self.assertEqual([\"list_tensors\", \"node_info node_a\"],\n self._cmd_hist.most_recent_n(2))\n self.assertEqual([\"list_tensors\", \"node_info node_a\"],\n self._cmd_hist.most_recent_n(3))\n\n self._cmd_hist.add_command(\"node_info node_b\")\n\n self.assertEqual([\"node_info node_b\"], self._cmd_hist.most_recent_n(1))\n self.assertEqual([\"node_info node_a\", \"node_info node_b\"],\n self._cmd_hist.most_recent_n(2))\n self.assertEqual([\"list_tensors\", \"node_info node_a\", \"node_info node_b\"],\n self._cmd_hist.most_recent_n(3))\n self.assertEqual([\"list_tensors\", \"node_info node_a\", \"node_info node_b\"],\n self._cmd_hist.most_recent_n(4))\n\n # Go over the limit.\n self._cmd_hist.add_command(\"node_info node_a\")\n\n self.assertEqual([\"node_info node_a\"], self._cmd_hist.most_recent_n(1))\n self.assertEqual([\"node_info node_b\", \"node_info node_a\"],\n self._cmd_hist.most_recent_n(2))\n self.assertEqual(\n [\"node_info node_a\", \"node_info node_b\", \"node_info node_a\"],\n self._cmd_hist.most_recent_n(3))\n self.assertEqual(\n [\"node_info node_a\", \"node_info node_b\", \"node_info node_a\"],\n self._cmd_hist.most_recent_n(4))\n\n def testLookUpPrefix(self):\n self._cmd_hist.add_command(\"node_info node_b\")\n self._cmd_hist.add_command(\"list_tensors\")\n self._cmd_hist.add_command(\"node_info node_a\")\n\n self.assertEqual([\"node_info node_b\", \"node_info node_a\"],\n self._cmd_hist.lookup_prefix(\"node_info\", 10))\n\n self.assertEqual([\"node_info node_a\"], self._cmd_hist.lookup_prefix(\n \"node_info\", 1))\n\n self.assertEqual([], self._cmd_hist.lookup_prefix(\"print_tensor\", 10))\n\n def testAddNonStrCommand(self):\n with self.assertRaisesRegex(\n TypeError, \"Attempt to enter non-str entry to command history\"):\n self._cmd_hist.add_command([\"print_tensor node_a:0\"])\n\n def testRepeatingCommandsDoNotGetLoggedRepeatedly(self):\n self._cmd_hist.add_command(\"help\")\n self._cmd_hist.add_command(\"help\")\n\n self.assertEqual([\"help\"], self._cmd_hist.most_recent_n(2))\n\n def testLoadingCommandHistoryFileObeysLimit(self):\n self._cmd_hist.add_command(\"help 1\")\n self._cmd_hist.add_command(\"help 2\")\n self._cmd_hist.add_command(\"help 3\")\n self._cmd_hist.add_command(\"help 4\")\n\n cmd_hist_2 = debugger_cli_common.CommandHistory(\n limit=3, history_file_path=self._history_file_path)\n self.assertEqual([\"help 2\", \"help 3\", \"help 4\"],\n cmd_hist_2.most_recent_n(3))\n\n with open(self._history_file_path, \"rt\") as f:\n self.assertEqual(\n [\"help 2\\n\", \"help 3\\n\", \"help 4\\n\"], f.readlines())\n\n def testCommandHistoryHandlesReadingIOErrorGraciously(self):\n with open(self._history_file_path, \"wt\") as f:\n f.write(\"help\\n\")\n\n # Change file to not readable by anyone.\n os.chmod(self._history_file_path, 0)\n\n # The creation of a CommandHistory object should not error out.\n debugger_cli_common.CommandHistory(\n limit=3, history_file_path=self._history_file_path)\n\n self._restoreFileReadWritePermissions(self._history_file_path)\n\n\nclass MenuNodeTest(test_util.TensorFlowTestCase):\n\n def testCommandTypeConstructorSucceeds(self):\n menu_node = debugger_cli_common.MenuItem(\"water flower\", \"water_flower\")\n\n self.assertEqual(\"water flower\", menu_node.caption)\n self.assertEqual(\"water_flower\", menu_node.content)\n\n def testDisableWorks(self):\n menu_node = debugger_cli_common.MenuItem(\"water flower\", \"water_flower\")\n self.assertTrue(menu_node.is_enabled())\n\n menu_node.disable()\n self.assertFalse(menu_node.is_enabled())\n menu_node.enable()\n self.assertTrue(menu_node.is_enabled())\n\n def testConstructAsDisabledWorks(self):\n menu_node = debugger_cli_common.MenuItem(\n \"water flower\", \"water_flower\", enabled=False)\n self.assertFalse(menu_node.is_enabled())\n\n menu_node.enable()\n self.assertTrue(menu_node.is_enabled())\n\n\nclass MenuTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self.menu = debugger_cli_common.Menu()\n self.assertEqual(0, self.menu.num_items())\n\n self.node1 = debugger_cli_common.MenuItem(\"water flower\", \"water_flower\")\n self.node2 = debugger_cli_common.MenuItem(\n \"measure wavelength\", \"measure_wavelength\")\n self.menu.append(self.node1)\n self.menu.append(self.node2)\n self.assertEqual(2, self.menu.num_items())\n\n def testFormatAsSingleLineWithStrItemAttrsWorks(self):\n output = self.menu.format_as_single_line(\n prefix=\"Menu: \", divider=\", \", enabled_item_attrs=\"underline\")\n self.assertEqual([\"Menu: water flower, measure wavelength, \"], output.lines)\n self.assertEqual((6, 18, [self.node1, \"underline\"]),\n output.font_attr_segs[0][0])\n self.assertEqual((20, 38, [self.node2, \"underline\"]),\n output.font_attr_segs[0][1])\n self.assertEqual({}, output.annotations)\n\n def testFormatAsSingleLineWithListItemAttrsWorks(self):\n output = self.menu.format_as_single_line(\n prefix=\"Menu: \", divider=\", \", enabled_item_attrs=[\"underline\", \"bold\"])\n self.assertEqual([\"Menu: water flower, measure wavelength, \"], output.lines)\n self.assertEqual((6, 18, [self.node1, \"underline\", \"bold\"]),\n output.font_attr_segs[0][0])\n self.assertEqual((20, 38, [self.node2, \"underline\", \"bold\"]),\n output.font_attr_segs[0][1])\n self.assertEqual({}, output.annotations)\n\n def testFormatAsSingleLineWithNoneItemAttrsWorks(self):\n output = self.menu.format_as_single_line(prefix=\"Menu: \", divider=\", \")\n self.assertEqual([\"Menu: water flower, measure wavelength, \"], output.lines)\n self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])\n self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])\n self.assertEqual({}, output.annotations)\n\n def testInsertNode(self):\n self.assertEqual([\"water flower\", \"measure wavelength\"],\n self.menu.captions())\n\n node2 = debugger_cli_common.MenuItem(\"write poem\", \"write_poem\")\n self.menu.insert(1, node2)\n self.assertEqual([\"water flower\", \"write poem\", \"measure wavelength\"],\n self.menu.captions())\n\n output = self.menu.format_as_single_line(prefix=\"Menu: \", divider=\", \")\n self.assertEqual([\"Menu: water flower, write poem, measure wavelength, \"],\n output.lines)\n\n def testFormatAsSingleLineWithDisabledNode(self):\n node2 = debugger_cli_common.MenuItem(\n \"write poem\", \"write_poem\", enabled=False)\n self.menu.append(node2)\n\n output = self.menu.format_as_single_line(\n prefix=\"Menu: \", divider=\", \", disabled_item_attrs=\"bold\")\n self.assertEqual([\"Menu: water flower, measure wavelength, write poem, \"],\n output.lines)\n self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])\n self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])\n self.assertEqual((40, 50, [\"bold\"]), output.font_attr_segs[0][2])\n\n\nclass GetTensorFlowVersionLinesTest(test_util.TensorFlowTestCase):\n\n def testGetVersionWithoutDependencies(self):\n out = debugger_cli_common.get_tensorflow_version_lines()\n self.assertEqual(2, len(out.lines))\n self.assertEqual(\"TensorFlow version: %s\" % pywrap_tf_session.__version__,\n out.lines[0])\n\n def testGetVersionWithDependencies(self):\n out = debugger_cli_common.get_tensorflow_version_lines(True)\n self.assertIn(\"TensorFlow version: %s\" % pywrap_tf_session.__version__,\n out.lines)\n self.assertIn(\" numpy: %s\" % np.__version__, out.lines)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n", "output": ["WrapScreenOutputTest", "MenuNodeTest", "CommandLineExitTest", "MenuTest", "CommandHistoryTest", "CommandHandlerRegistryTest", "SliceRichTextLinesTest", "RichTextLinesTest", "RegexFindTest", "GetTensorFlowVersionLinesTest", "TabCompletionRegistryTest"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/debug/cli/debugger_cli_common_test.py", "file_length": 15134, "symbol_dict": [{"symbol": "RichTextLinesTest", "type": "mannual_defined_class", "byte_location": 1513, "location": 432}, {"symbol": "TabCompletionRegistryTest", "type": "mannual_defined_class", "byte_location": 31691, "location": 10358}, {"symbol": "WrapScreenOutputTest", "type": "mannual_defined_class", "byte_location": 25206, "location": 8009}, {"symbol": "SliceRichTextLinesTest", "type": "mannual_defined_class", "byte_location": 30310, "location": 9868}, {"symbol": "MenuNodeTest", "type": "mannual_defined_class", "byte_location": 40671, "location": 13612}, {"symbol": "RegexFindTest", "type": "mannual_defined_class", "byte_location": 22465, "location": 7084}, {"symbol": "CommandHandlerRegistryTest", "type": "mannual_defined_class", "byte_location": 10488, "location": 3572}, {"symbol": "MenuTest", "type": "mannual_defined_class", "byte_location": 41508, "location": 13884}, {"symbol": "CommandLineExitTest", "type": "mannual_defined_class", "byte_location": 1074, "location": 285}, {"symbol": "GetTensorFlowVersionLinesTest", "type": "mannual_defined_class", "byte_location": 44586, "location": 14920}, {"symbol": "CommandHistoryTest", "type": "mannual_defined_class", "byte_location": 36608, "location": 12107}]}} {"input": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Building Blocks of TensorFlow Debugger Command-Line Interface.\"\"\"\nimport copy\nimport os\nimport re\nimport traceback\n\nimport numpy as np\n\nfrom tensorflow.python.client import pywrap_tf_session\nfrom tensorflow.python.platform import gfile\n\nHELP_INDENT = \" \"\n\nEXPLICIT_USER_EXIT = \"explicit_user_exit\"\nREGEX_MATCH_LINES_KEY = \"regex_match_lines\"\nINIT_SCROLL_POS_KEY = \"init_scroll_pos\"\n\nMAIN_MENU_KEY = \"mm:\"\n\n\nclass CommandLineExit(Exception):\n\n def __init__(self, exit_token=None):\n Exception.__init__(self)\n self._exit_token = exit_token\n\n @property\n def exit_token(self):\n return self._exit_token\n\n\nclass RichLine:\n \"\"\"Rich single-line text.\n\n Attributes:\n text: A plain string, the raw text represented by this object. Should not\n contain newlines.\n font_attr_segs: A list of (start, end, font attribute) triples, representing\n richness information applied to substrings of text.\n \"\"\"\n\n def __init__(self, text=\"\", font_attr=None):\n \"\"\"Construct a RichLine with no rich attributes or a single attribute.\n\n Args:\n text: Raw text string\n font_attr: If specified, a single font attribute to be applied to the\n entire text. Extending this object via concatenation allows creation\n of text with varying attributes.\n \"\"\"\n # TODO(ebreck) Make .text and .font_attr protected members when we no\n # longer need public access.\n self.text = text\n if font_attr:\n self.font_attr_segs = [(0, len(text), font_attr)]\n else:\n self.font_attr_segs = []\n\n def __add__(self, other):\n \"\"\"Concatenate two chunks of maybe rich text to make a longer rich line.\n\n Does not modify self.\n\n Args:\n other: Another piece of text to concatenate with this one.\n If it is a plain str, it will be appended to this string with no\n attributes. If it is a RichLine, it will be appended to this string\n with its attributes preserved.\n\n Returns:\n A new RichLine comprising both chunks of text, with appropriate\n attributes applied to the corresponding substrings.\n \"\"\"\n ret = RichLine()\n if isinstance(other, str):\n ret.text = self.text + other\n ret.font_attr_segs = self.font_attr_segs[:]\n return ret\n elif isinstance(other, RichLine):\n ret.text = self.text + other.text\n ret.font_attr_segs = self.font_attr_segs[:]\n old_len = len(self.text)\n for start, end, font_attr in other.font_attr_segs:\n ret.font_attr_segs.append((old_len + start, old_len + end, font_attr))\n return ret\n else:\n raise TypeError(\"%r cannot be concatenated with a RichLine\" % other)\n\n def __len__(self):\n return len(self.text)\n\n\ndef rich_text_lines_from_rich_line_list(rich_text_list, annotations=None):\n \"\"\"Convert a list of RichLine objects or strings to a RichTextLines object.\n\n Args:\n rich_text_list: a list of RichLine objects or strings\n annotations: annotations for the resultant RichTextLines object.\n\n Returns:\n A corresponding RichTextLines object.\n \"\"\"\n lines = []\n font_attr_segs = {}\n for i, rl in enumerate(rich_text_list):\n if isinstance(rl, RichLine):\n lines.append(rl.text)\n if rl.font_attr_segs:\n font_attr_segs[i] = rl.font_attr_segs\n else:\n lines.append(rl)\n return RichTextLines(lines, font_attr_segs, annotations=annotations)\n\n\ndef get_tensorflow_version_lines(include_dependency_versions=False):\n \"\"\"Generate RichTextLines with TensorFlow version info.\n\n Args:\n include_dependency_versions: Include the version of TensorFlow's key\n dependencies, such as numpy.\n\n Returns:\n A formatted, multi-line `RichTextLines` object.\n \"\"\"\n lines = [\"TensorFlow version: %s\" % pywrap_tf_session.__version__]\n lines.append(\"\")\n if include_dependency_versions:\n lines.append(\"Dependency version(s):\")\n lines.append(\" numpy: %s\" % np.__version__)\n lines.append(\"\")\n return RichTextLines(lines)\n\n\nclass RichTextLines:\n \"\"\"Rich multi-line text.\n\n Line-by-line text output, with font attributes (e.g., color) and annotations\n (e.g., indices in a multi-dimensional tensor). Used as the text output of CLI\n commands. Can be rendered on terminal environments such as curses.\n\n This is not to be confused with Rich Text Format (RTF). This class is for text\n lines only.\n \"\"\"\n\n def __init__(self, lines, font_attr_segs=None, annotations=None):\n \"\"\"Constructor of RichTextLines.\n\n Args:\n lines: A list of str or a single str, representing text output to\n screen. The latter case is for convenience when the text output is\n single-line.\n font_attr_segs: A map from 0-based row index to a list of 3-tuples.\n It lists segments in each row that have special font attributes, such\n as colors, that are not the default attribute. For example:\n {1: [(0, 3, \"red\"), (4, 7, \"green\")], 2: [(10, 20, \"yellow\")]}\n\n In each tuple, the 1st element is the start index of the segment. The\n 2nd element is the end index, in an \"open interval\" fashion. The 3rd\n element is an object or a list of objects that represents the font\n attribute. Colors are represented as strings as in the examples above.\n annotations: A map from 0-based row index to any object for annotating\n the row. A typical use example is annotating rows of the output as\n indices in a multi-dimensional tensor. For example, consider the\n following text representation of a 3x2x2 tensor:\n [[[0, 0], [0, 0]],\n [[0, 0], [0, 0]],\n [[0, 0], [0, 0]]]\n The annotation can indicate the indices of the first element shown in\n each row, i.e.,\n {0: [0, 0, 0], 1: [1, 0, 0], 2: [2, 0, 0]}\n This information can make display of tensors on screen clearer and can\n help the user navigate (scroll) to the desired location in a large\n tensor.\n\n Raises:\n ValueError: If lines is of invalid type.\n \"\"\"\n if isinstance(lines, list):\n self._lines = lines\n elif isinstance(lines, str):\n self._lines = [lines]\n else:\n raise ValueError(\"Unexpected type in lines: %s\" % type(lines))\n\n self._font_attr_segs = font_attr_segs\n if not self._font_attr_segs:\n self._font_attr_segs = {}\n # TODO(cais): Refactor to collections.defaultdict(list) to simplify code.\n\n self._annotations = annotations\n if not self._annotations:\n self._annotations = {}\n # TODO(cais): Refactor to collections.defaultdict(list) to simplify code.\n\n @property\n def lines(self):\n return self._lines\n\n @property\n def font_attr_segs(self):\n return self._font_attr_segs\n\n @property\n def annotations(self):\n return self._annotations\n\n def num_lines(self):\n return len(self._lines)\n\n def slice(self, begin, end):\n \"\"\"Slice a RichTextLines object.\n\n The object itself is not changed. A sliced instance is returned.\n\n Args:\n begin: (int) Beginning line index (inclusive). Must be >= 0.\n end: (int) Ending line index (exclusive). Must be >= 0.\n\n Returns:\n (RichTextLines) Sliced output instance of RichTextLines.\n\n Raises:\n ValueError: If begin or end is negative.\n \"\"\"\n\n if begin < 0 or end < 0:\n raise ValueError(\"Encountered negative index.\")\n\n # Copy lines.\n lines = self.lines[begin:end]\n\n # Slice font attribute segments.\n font_attr_segs = {}\n for key in self.font_attr_segs:\n if key >= begin and key < end:\n font_attr_segs[key - begin] = self.font_attr_segs[key]\n\n # Slice annotations.\n annotations = {}\n for key in self.annotations:\n if not isinstance(key, int):\n # Annotations can contain keys that are not line numbers.\n annotations[key] = self.annotations[key]\n elif key >= begin and key < end:\n annotations[key - begin] = self.annotations[key]\n\n return RichTextLines(\n lines, font_attr_segs=font_attr_segs, annotations=annotations)\n\n def extend(self, other):\n \"\"\"Extend this instance of RichTextLines with another instance.\n\n The extension takes effect on the text lines, the font attribute segments,\n as well as the annotations. The line indices in the font attribute\n segments and the annotations are adjusted to account for the existing\n lines. If there are duplicate, non-line-index fields in the annotations,\n the value from the input argument \"other\" will override that in this\n instance.\n\n Args:\n other: (RichTextLines) The other RichTextLines instance to be appended at\n the end of this instance.\n \"\"\"\n\n orig_num_lines = self.num_lines() # Record original number of lines.\n\n # Merge the lines.\n self._lines.extend(other.lines)\n\n # Merge the font_attr_segs.\n for line_index in other.font_attr_segs:\n self._font_attr_segs[orig_num_lines + line_index] = (\n other.font_attr_segs[line_index])\n\n # Merge the annotations.\n for key in other.annotations:\n if isinstance(key, int):\n self._annotations[orig_num_lines + key] = (other.annotations[key])\n else:\n self._annotations[key] = other.annotations[key]\n\n def _extend_before(self, other):\n \"\"\"Add another RichTextLines object to the front.\n\n Args:\n other: (RichTextLines) The other object to add to the front to this\n object.\n \"\"\"\n\n other_num_lines = other.num_lines() # Record original number of lines.\n\n # Merge the lines.\n self._lines = other.lines + self._lines\n\n # Merge the font_attr_segs.\n new_font_attr_segs = {}\n for line_index in self.font_attr_segs:\n new_font_attr_segs[other_num_lines + line_index] = (\n self.font_attr_segs[line_index])\n new_font_attr_segs.update(other.font_attr_segs)\n self._font_attr_segs = new_font_attr_segs\n\n # Merge the annotations.\n new_annotations = {}\n for key in self._annotations:\n if isinstance(key, int):\n new_annotations[other_num_lines + key] = (self.annotations[key])\n else:\n new_annotations[key] = other.annotations[key]\n\n new_annotations.update(other.annotations)\n self._annotations = new_annotations\n\n def append(self, line, font_attr_segs=None):\n \"\"\"Append a single line of text.\n\n Args:\n line: (str) The text to be added to the end.\n font_attr_segs: (list of tuples) Font attribute segments of the appended\n line.\n \"\"\"\n\n self._lines.append(line)\n if font_attr_segs:\n self._font_attr_segs[len(self._lines) - 1] = font_attr_segs\n\n def append_rich_line(self, rich_line):\n self.append(rich_line.text, rich_line.font_attr_segs)\n\n def prepend(self, line, font_attr_segs=None):\n \"\"\"Prepend (i.e., add to the front) a single line of text.\n\n Args:\n line: (str) The text to be added to the front.\n font_attr_segs: (list of tuples) Font attribute segments of the appended\n line.\n \"\"\"\n\n other = RichTextLines(line)\n if font_attr_segs:\n other.font_attr_segs[0] = font_attr_segs\n self._extend_before(other)\n\n def write_to_file(self, file_path):\n \"\"\"Write the object itself to file, in a plain format.\n\n The font_attr_segs and annotations are ignored.\n\n Args:\n file_path: (str) path of the file to write to.\n \"\"\"\n\n with gfile.Open(file_path, \"w\") as f:\n for line in self._lines:\n f.write(line + \"\\n\")\n\n # TODO(cais): Add a method to allow appending to a line in RichTextLines with\n # both text and font_attr_segs.\n\n\ndef regex_find(orig_screen_output, regex, font_attr):\n \"\"\"Perform regex match in rich text lines.\n\n Produces a new RichTextLines object with font_attr_segs containing highlighted\n regex matches.\n\n Example use cases include:\n 1) search for specific items in a large list of items, and\n 2) search for specific numerical values in a large tensor.\n\n Args:\n orig_screen_output: The original RichTextLines, in which the regex find\n is to be performed.\n regex: The regex used for matching.\n font_attr: Font attribute used for highlighting the found result.\n\n Returns:\n A modified copy of orig_screen_output.\n\n Raises:\n ValueError: If input str regex is not a valid regular expression.\n \"\"\"\n new_screen_output = RichTextLines(\n orig_screen_output.lines,\n font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs),\n annotations=orig_screen_output.annotations)\n\n try:\n re_prog = re.compile(regex)\n except re.error:\n raise ValueError(\"Invalid regular expression: \\\"%s\\\"\" % regex)\n\n regex_match_lines = []\n for i, line in enumerate(new_screen_output.lines):\n find_it = re_prog.finditer(line)\n\n match_segs = []\n for match in find_it:\n match_segs.append((match.start(), match.end(), font_attr))\n\n if match_segs:\n if i not in new_screen_output.font_attr_segs:\n new_screen_output.font_attr_segs[i] = match_segs\n else:\n new_screen_output.font_attr_segs[i].extend(match_segs)\n new_screen_output.font_attr_segs[i] = sorted(\n new_screen_output.font_attr_segs[i], key=lambda x: x[0])\n regex_match_lines.append(i)\n\n new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines\n return new_screen_output\n\n\ndef wrap_rich_text_lines(inp, cols):\n \"\"\"Wrap RichTextLines according to maximum number of columns.\n\n Produces a new RichTextLines object with the text lines, font_attr_segs and\n annotations properly wrapped. This ought to be used sparingly, as in most\n cases, command handlers producing RichTextLines outputs should know the\n screen/panel width via the screen_info kwarg and should produce properly\n length-limited lines in the output accordingly.\n\n Args:\n inp: Input RichTextLines object.\n cols: Number of columns, as an int.\n\n Returns:\n 1) A new instance of RichTextLines, with line lengths limited to cols.\n 2) A list of new (wrapped) line index. For example, if the original input\n consists of three lines and only the second line is wrapped, and it's\n wrapped into two lines, this return value will be: [0, 1, 3].\n Raises:\n ValueError: If inputs have invalid types.\n \"\"\"\n\n new_line_indices = []\n\n if not isinstance(inp, RichTextLines):\n raise ValueError(\"Invalid type of input screen_output\")\n\n if not isinstance(cols, int):\n raise ValueError(\"Invalid type of input cols\")\n\n out = RichTextLines([])\n\n row_counter = 0 # Counter for new row index\n for i, line in enumerate(inp.lines):\n new_line_indices.append(out.num_lines())\n\n if i in inp.annotations:\n out.annotations[row_counter] = inp.annotations[i]\n\n if len(line) <= cols:\n # No wrapping.\n out.lines.append(line)\n if i in inp.font_attr_segs:\n out.font_attr_segs[row_counter] = inp.font_attr_segs[i]\n\n row_counter += 1\n else:\n # Wrap.\n wlines = [] # Wrapped lines.\n\n osegs = []\n if i in inp.font_attr_segs:\n osegs = inp.font_attr_segs[i]\n\n idx = 0\n while idx < len(line):\n if idx + cols > len(line):\n rlim = len(line)\n else:\n rlim = idx + cols\n\n wlines.append(line[idx:rlim])\n for seg in osegs:\n if (seg[0] < rlim) and (seg[1] >= idx):\n # Calculate left bound within wrapped line.\n if seg[0] >= idx:\n lb = seg[0] - idx\n else:\n lb = 0\n\n # Calculate right bound within wrapped line.\n if seg[1] < rlim:\n rb = seg[1] - idx\n else:\n rb = rlim - idx\n\n if rb > lb: # Omit zero-length segments.\n wseg = (lb, rb, seg[2])\n if row_counter not in out.font_attr_segs:\n out.font_attr_segs[row_counter] = [wseg]\n else:\n out.font_attr_segs[row_counter].append(wseg)\n\n idx += cols\n row_counter += 1\n\n out.lines.extend(wlines)\n\n # Copy over keys of annotation that are not row indices.\n for key in inp.annotations:\n if not isinstance(key, int):\n out.annotations[key] = inp.annotations[key]\n\n return out, new_line_indices\n\n\nclass CommandHandlerRegistry:\n \"\"\"Registry of command handlers for CLI.\n\n Handler methods (callables) for user commands can be registered with this\n class, which then is able to dispatch commands to the correct handlers and\n retrieve the RichTextLines output.\n\n For example, suppose you have the following handler defined:\n def echo(argv, screen_info=None):\n return RichTextLines([\"arguments = %s\" % \" \".join(argv),\n \"screen_info = \" + repr(screen_info)])\n\n you can register the handler with the command prefix \"echo\" and alias \"e\":\n registry = CommandHandlerRegistry()\n registry.register_command_handler(\"echo\", echo,\n \"Echo arguments, along with screen info\", prefix_aliases=[\"e\"])\n\n then to invoke this command handler with some arguments and screen_info, do:\n registry.dispatch_command(\"echo\", [\"foo\", \"bar\"], screen_info={\"cols\": 80})\n\n or with the prefix alias:\n registry.dispatch_command(\"e\", [\"foo\", \"bar\"], screen_info={\"cols\": 80})\n\n The call will return a RichTextLines object which can be rendered by a CLI.\n \"\"\"\n\n HELP_COMMAND = \"help\"\n HELP_COMMAND_ALIASES = [\"h\"]\n VERSION_COMMAND = \"version\"\n VERSION_COMMAND_ALIASES = [\"ver\"]\n\n def __init__(self):\n # A dictionary from command prefix to handler.\n self._handlers = {}\n\n # A dictionary from prefix alias to prefix.\n self._alias_to_prefix = {}\n\n # A dictionary from prefix to aliases.\n self._prefix_to_aliases = {}\n\n # A dictionary from command prefix to help string.\n self._prefix_to_help = {}\n\n # Introductory text to help information.\n self._help_intro = None\n\n # Register a default handler for the command \"help\".\n self.register_command_handler(\n self.HELP_COMMAND,\n self._help_handler,\n \"Print this help message.\",\n prefix_aliases=self.HELP_COMMAND_ALIASES)\n\n # Register a default handler for the command \"version\".\n self.register_command_handler(\n self.VERSION_COMMAND,\n self._version_handler,\n \"Print the versions of TensorFlow and its key dependencies.\",\n prefix_aliases=self.VERSION_COMMAND_ALIASES)\n\n def register_command_handler(self,\n prefix,\n handler,\n help_info,\n prefix_aliases=None):\n \"\"\"Register a callable as a command handler.\n\n Args:\n prefix: Command prefix, i.e., the first word in a command, e.g.,\n \"print\" as in \"print tensor_1\".\n handler: A callable of the following signature:\n foo_handler(argv, screen_info=None),\n where argv is the argument vector (excluding the command prefix) and\n screen_info is a dictionary containing information about the screen,\n such as number of columns, e.g., {\"cols\": 100}.\n The callable should return:\n 1) a RichTextLines object representing the screen output.\n\n The callable can also raise an exception of the type CommandLineExit,\n which if caught by the command-line interface, will lead to its exit.\n The exception can optionally carry an exit token of arbitrary type.\n help_info: A help string.\n prefix_aliases: Aliases for the command prefix, as a list of str. E.g.,\n shorthands for the command prefix: [\"p\", \"pr\"]\n\n Raises:\n ValueError: If\n 1) the prefix is empty, or\n 2) handler is not callable, or\n 3) a handler is already registered for the prefix, or\n 4) elements in prefix_aliases clash with existing aliases.\n 5) help_info is not a str.\n \"\"\"\n\n if not prefix:\n raise ValueError(\"Empty command prefix\")\n\n if prefix in self._handlers:\n raise ValueError(\n \"A handler is already registered for command prefix \\\"%s\\\"\" % prefix)\n\n # Make sure handler is callable.\n if not callable(handler):\n raise ValueError(\"handler is not callable\")\n\n # Make sure that help info is a string.\n if not isinstance(help_info, str):\n raise ValueError(\"help_info is not a str\")\n\n # Process prefix aliases.\n if prefix_aliases:\n for alias in prefix_aliases:\n if self._resolve_prefix(alias):\n raise ValueError(\n \"The prefix alias \\\"%s\\\" clashes with existing prefixes or \"\n \"aliases.\" % alias)\n self._alias_to_prefix[alias] = prefix\n\n self._prefix_to_aliases[prefix] = prefix_aliases\n\n # Store handler.\n self._handlers[prefix] = handler\n\n # Store help info.\n self._prefix_to_help[prefix] = help_info\n\n def dispatch_command(self, prefix, argv, screen_info=None):\n \"\"\"Handles a command by dispatching it to a registered command handler.\n\n Args:\n prefix: Command prefix, as a str, e.g., \"print\".\n argv: Command argument vector, excluding the command prefix, represented\n as a list of str, e.g.,\n [\"tensor_1\"]\n screen_info: A dictionary containing screen info, e.g., {\"cols\": 100}.\n\n Returns:\n An instance of RichTextLines or None. If any exception is caught during\n the invocation of the command handler, the RichTextLines will wrap the\n error type and message.\n\n Raises:\n ValueError: If\n 1) prefix is empty, or\n 2) no command handler is registered for the command prefix, or\n 3) the handler is found for the prefix, but it fails to return a\n RichTextLines or raise any exception.\n CommandLineExit:\n If the command handler raises this type of exception, this method will\n simply pass it along.\n \"\"\"\n if not prefix:\n raise ValueError(\"Prefix is empty\")\n\n resolved_prefix = self._resolve_prefix(prefix)\n if not resolved_prefix:\n raise ValueError(\"No handler is registered for command prefix \\\"%s\\\"\" %\n prefix)\n\n handler = self._handlers[resolved_prefix]\n try:\n output = handler(argv, screen_info=screen_info)\n except CommandLineExit as e:\n raise e\n except SystemExit as e:\n # Special case for syntax errors caught by argparse.\n lines = [\"Syntax error for command: %s\" % prefix,\n \"For help, do \\\"help %s\\\"\" % prefix]\n output = RichTextLines(lines)\n\n except BaseException as e: # pylint: disable=broad-except\n lines = [\"Error occurred during handling of command: %s %s:\" %\n (resolved_prefix, \" \".join(argv)), \"%s: %s\" % (type(e), str(e))]\n\n # Include traceback of the exception.\n lines.append(\"\")\n lines.extend(traceback.format_exc().split(\"\\n\"))\n\n output = RichTextLines(lines)\n\n if not isinstance(output, RichTextLines) and output is not None:\n raise ValueError(\n \"Return value from command handler %s is not None or a RichTextLines \"\n \"instance\" % str(handler))\n\n return output\n\n def is_registered(self, prefix):\n \"\"\"Test if a command prefix or its alias is has a registered handler.\n\n Args:\n prefix: A prefix or its alias, as a str.\n\n Returns:\n True iff a handler is registered for prefix.\n \"\"\"\n return self._resolve_prefix(prefix) is not None\n\n def get_help(self, cmd_prefix=None):\n \"\"\"Compile help information into a RichTextLines object.\n\n Args:\n cmd_prefix: Optional command prefix. As the prefix itself or one of its\n aliases.\n\n Returns:\n A RichTextLines object containing the help information. If cmd_prefix\n is None, the return value will be the full command-line help. Otherwise,\n it will be the help information for the specified command.\n \"\"\"\n if not cmd_prefix:\n # Print full help information, in sorted order of the command prefixes.\n help_info = RichTextLines([])\n if self._help_intro:\n # If help intro is available, show it at the beginning.\n help_info.extend(self._help_intro)\n\n sorted_prefixes = sorted(self._handlers)\n for cmd_prefix in sorted_prefixes:\n lines = self._get_help_for_command_prefix(cmd_prefix)\n lines.append(\"\")\n lines.append(\"\")\n help_info.extend(RichTextLines(lines))\n\n return help_info\n else:\n return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))\n\n def set_help_intro(self, help_intro):\n \"\"\"Set an introductory message to help output.\n\n Args:\n help_intro: (RichTextLines) Rich text lines appended to the\n beginning of the output of the command \"help\", as introductory\n information.\n \"\"\"\n self._help_intro = help_intro\n\n def _help_handler(self, args, screen_info=None):\n \"\"\"Command handler for \"help\".\n\n \"help\" is a common command that merits built-in support from this class.\n\n Args:\n args: Command line arguments to \"help\" (not including \"help\" itself).\n screen_info: (dict) Information regarding the screen, e.g., the screen\n width in characters: {\"cols\": 80}\n\n Returns:\n (RichTextLines) Screen text output.\n \"\"\"\n\n _ = screen_info # Unused currently.\n\n if not args:\n return self.get_help()\n elif len(args) == 1:\n return self.get_help(args[0])\n else:\n return RichTextLines([\"ERROR: help takes only 0 or 1 input argument.\"])\n\n def _version_handler(self, args, screen_info=None):\n del args # Unused currently.\n del screen_info # Unused currently.\n return get_tensorflow_version_lines(include_dependency_versions=True)\n\n def _resolve_prefix(self, token):\n \"\"\"Resolve command prefix from the prefix itself or its alias.\n\n Args:\n token: a str to be resolved.\n\n Returns:\n If resolvable, the resolved command prefix.\n If not resolvable, None.\n \"\"\"\n if token in self._handlers:\n return token\n elif token in self._alias_to_prefix:\n return self._alias_to_prefix[token]\n else:\n return None\n\n def _get_help_for_command_prefix(self, cmd_prefix):\n \"\"\"Compile the help information for a given command prefix.\n\n Args:\n cmd_prefix: Command prefix, as the prefix itself or one of its aliases.\n\n Returns:\n A list of str as the help information for cmd_prefix. If the cmd_prefix\n does not exist, the returned list of str will indicate that.\n \"\"\"\n lines = []\n\n resolved_prefix = self._resolve_prefix(cmd_prefix)\n if not resolved_prefix:\n lines.append(\"Invalid command prefix: \\\"%s\\\"\" % cmd_prefix)\n return lines\n\n lines.append(resolved_prefix)\n\n if resolved_prefix in self._prefix_to_aliases:\n lines.append(HELP_INDENT + \"Aliases: \" + \", \".join(\n self._prefix_to_aliases[resolved_prefix]))\n\n lines.append(\"\")\n help_lines = self._prefix_to_help[resolved_prefix].split(\"\\n\")\n for line in help_lines:\n lines.append(HELP_INDENT + line)\n\n return lines\n\n\nclass TabCompletionRegistry:\n \"\"\"Registry for tab completion responses.\"\"\"\n\n def __init__(self):\n self._comp_dict = {}\n\n # TODO(cais): Rename method names with \"comp\" to \"*completion*\" to avoid\n # confusion.\n\n def register_tab_comp_context(self, context_words, comp_items):\n \"\"\"Register a tab-completion context.\n\n Register that, for each word in context_words, the potential tab-completions\n are the words in comp_items.\n\n A context word is a pre-existing, completed word in the command line that\n determines how tab-completion works for another, incomplete word in the same\n command line.\n Completion items consist of potential candidates for the incomplete word.\n\n To give a general example, a context word can be \"drink\", and the completion\n items can be [\"coffee\", \"tea\", \"water\"]\n\n Note: A context word can be empty, in which case the context is for the\n top-level commands.\n\n Args:\n context_words: A list of context words belonging to the context being\n registered. It is a list of str, instead of a single string, to support\n synonym words triggering the same tab-completion context, e.g.,\n both \"drink\" and the short-hand \"dr\" can trigger the same context.\n comp_items: A list of completion items, as a list of str.\n\n Raises:\n TypeError: if the input arguments are not all of the correct types.\n \"\"\"\n\n if not isinstance(context_words, list):\n raise TypeError(\"Incorrect type in context_list: Expected list, got %s\" %\n type(context_words))\n\n if not isinstance(comp_items, list):\n raise TypeError(\"Incorrect type in comp_items: Expected list, got %s\" %\n type(comp_items))\n\n # Sort the completion items on registration, so that later during\n # get_completions calls, no sorting will be necessary.\n sorted_comp_items = sorted(comp_items)\n\n for context_word in context_words:\n self._comp_dict[context_word] = sorted_comp_items\n\n def deregister_context(self, context_words):\n \"\"\"Deregister a list of context words.\n\n Args:\n context_words: A list of context words to deregister, as a list of str.\n\n Raises:\n KeyError: if there are word(s) in context_words that do not correspond\n to any registered contexts.\n \"\"\"\n\n for context_word in context_words:\n if context_word not in self._comp_dict:\n raise KeyError(\"Cannot deregister unregistered context word \\\"%s\\\"\" %\n context_word)\n\n for context_word in context_words:\n del self._comp_dict[context_word]\n\n def extend_comp_items(self, context_word, new_comp_items):\n \"\"\"Add a list of completion items to a completion context.\n\n Args:\n context_word: A single completion word as a string. The extension will\n also apply to all other context words of the same context.\n new_comp_items: (list of str) New completion items to add.\n\n Raises:\n KeyError: if the context word has not been registered.\n \"\"\"\n\n if context_word not in self._comp_dict:\n raise KeyError(\"Context word \\\"%s\\\" has not been registered\" %\n context_word)\n\n self._comp_dict[context_word].extend(new_comp_items)\n self._comp_dict[context_word] = sorted(self._comp_dict[context_word])\n\n def remove_comp_items(self, context_word, comp_items):\n \"\"\"Remove a list of completion items from a completion context.\n\n Args:\n context_word: A single completion word as a string. The removal will\n also apply to all other context words of the same context.\n comp_items: Completion items to remove.\n\n Raises:\n KeyError: if the context word has not been registered.\n \"\"\"\n\n if context_word not in self._comp_dict:\n raise KeyError(\"Context word \\\"%s\\\" has not been registered\" %\n context_word)\n\n for item in comp_items:\n self._comp_dict[context_word].remove(item)\n\n def get_completions(self, context_word, prefix):\n \"\"\"Get the tab completions given a context word and a prefix.\n\n Args:\n context_word: The context word.\n prefix: The prefix of the incomplete word.\n\n Returns:\n (1) None if no registered context matches the context_word.\n A list of str for the matching completion items. Can be an empty list\n of a matching context exists, but no completion item matches the\n prefix.\n (2) Common prefix of all the words in the first return value. If the\n first return value is None, this return value will be None, too. If\n the first return value is not None, i.e., a list, this return value\n will be a str, which can be an empty str if there is no common\n prefix among the items of the list.\n \"\"\"\n\n if context_word not in self._comp_dict:\n return None, None\n\n comp_items = self._comp_dict[context_word]\n comp_items = sorted(\n [item for item in comp_items if item.startswith(prefix)])\n\n return comp_items, self._common_prefix(comp_items)\n\n def _common_prefix(self, m):\n \"\"\"Given a list of str, returns the longest common prefix.\n\n Args:\n m: (list of str) A list of strings.\n\n Returns:\n (str) The longest common prefix.\n \"\"\"\n if not m:\n return \"\"\n\n s1 = min(m)\n s2 = max(m)\n for i, c in enumerate(s1):\n if c != s2[i]:\n return s1[:i]\n\n return s1\n\n\nclass CommandHistory:\n \"\"\"Keeps command history and supports lookup.\"\"\"\n\n _HISTORY_FILE_NAME = \".tfdbg_history\"\n\n def __init__(self, limit=100, history_file_path=None):\n \"\"\"CommandHistory constructor.\n\n Args:\n limit: Maximum number of the most recent commands that this instance\n keeps track of, as an int.\n history_file_path: (str) Manually specified path to history file. Used in\n testing.\n \"\"\"\n\n self._commands = []\n self._limit = limit\n self._history_file_path = (\n history_file_path or self._get_default_history_file_path())\n self._load_history_from_file()\n\n def _load_history_from_file(self):\n if os.path.isfile(self._history_file_path):\n try:\n with open(self._history_file_path, \"rt\") as history_file:\n commands = history_file.readlines()\n self._commands = [command.strip() for command in commands\n if command.strip()]\n\n # Limit the size of the history file.\n if len(self._commands) > self._limit:\n self._commands = self._commands[-self._limit:]\n with open(self._history_file_path, \"wt\") as history_file:\n for command in self._commands:\n history_file.write(command + \"\\n\")\n except IOError:\n print(\"WARNING: writing history file failed.\")\n\n def _add_command_to_history_file(self, command):\n try:\n with open(self._history_file_path, \"at\") as history_file:\n history_file.write(command + \"\\n\")\n except IOError:\n pass\n\n @classmethod\n def _get_default_history_file_path(cls):\n return os.path.join(os.path.expanduser(\"~\"), cls._HISTORY_FILE_NAME)\n\n def add_command(self, command):\n \"\"\"Add a command to the command history.\n\n Args:\n command: The history command, as a str.\n\n Raises:\n TypeError: if command is not a str.\n \"\"\"\n\n if self._commands and command == self._commands[-1]:\n # Ignore repeating commands in a row.\n return\n\n if not isinstance(command, str):\n raise TypeError(\"Attempt to enter non-str entry to command history\")\n\n self._commands.append(command)\n\n if len(self._commands) > self._limit:\n self._commands = self._commands[-self._limit:]\n\n self._add_command_to_history_file(command)\n\n def most_recent_n(self, n):\n \"\"\"Look up the n most recent commands.\n\n Args:\n n: Number of most recent commands to look up.\n\n Returns:\n A list of n most recent commands, or all available most recent commands,\n if n exceeds size of the command history, in chronological order.\n \"\"\"\n\n return self._commands[-n:]\n\n def lookup_prefix(self, prefix, n):\n \"\"\"Look up the n most recent commands that starts with prefix.\n\n Args:\n prefix: The prefix to lookup.\n n: Number of most recent commands to look up.\n\n Returns:\n A list of n most recent commands that have the specified prefix, or all\n available most recent commands that have the prefix, if n exceeds the\n number of history commands with the prefix.\n \"\"\"\n\n commands = [cmd for cmd in self._commands if cmd.startswith(prefix)]\n\n return commands[-n:]\n\n # TODO(cais): Lookup by regex.\n\n\nclass MenuItem:\n \"\"\"A class for an item in a text-based menu.\"\"\"\n\n def __init__(self, caption, content, enabled=True):\n \"\"\"Menu constructor.\n\n TODO(cais): Nested menu is currently not supported. Support it.\n\n Args:\n caption: (str) caption of the menu item.\n content: Content of the menu item. For a menu item that triggers\n a command, for example, content is the command string.\n enabled: (bool) whether this menu item is enabled.\n \"\"\"\n\n self._caption = caption\n self._content = content\n self._enabled = enabled\n\n @property\n def caption(self):\n return self._caption\n\n @property\n def type(self):\n return self._node_type\n\n @property\n def content(self):\n return self._content\n\n def is_enabled(self):\n return self._enabled\n\n def disable(self):\n self._enabled = False\n\n def enable(self):\n self._enabled = True\n\n\nclass Menu:\n \"\"\"A class for text-based menu.\"\"\"\n\n def __init__(self, name=None):\n \"\"\"Menu constructor.\n\n Args:\n name: (str or None) name of this menu.\n \"\"\"\n\n self._name = name\n self._items = []\n\n def append(self, item):\n \"\"\"Append an item to the Menu.\n\n Args:\n item: (MenuItem) the item to be appended.\n \"\"\"\n self._items.append(item)\n\n def insert(self, index, item):\n self._items.insert(index, item)\n\n def num_items(self):\n return len(self._items)\n\n def captions(self):\n return [item.caption for item in self._items]\n\n def caption_to_item(self, caption):\n \"\"\"Get a MenuItem from the caption.\n\n Args:\n caption: (str) The caption to look up.\n\n Returns:\n (MenuItem) The first-match menu item with the caption, if any.\n\n Raises:\n LookupError: If a menu item with the caption does not exist.\n \"\"\"\n\n captions = self.captions()\n if caption not in captions:\n raise LookupError(\"There is no menu item with the caption \\\"%s\\\"\" %\n caption)\n\n return self._items[captions.index(caption)]\n\n def format_as_single_line(self,\n prefix=None,\n divider=\" | \",\n enabled_item_attrs=None,\n disabled_item_attrs=None):\n \"\"\"Format the menu as a single-line RichTextLines object.\n\n Args:\n prefix: (str) String added to the beginning of the line.\n divider: (str) The dividing string between the menu items.\n enabled_item_attrs: (list or str) Attributes applied to each enabled\n menu item, e.g., [\"bold\", \"underline\"].\n disabled_item_attrs: (list or str) Attributes applied to each\n disabled menu item, e.g., [\"red\"].\n\n Returns:\n (RichTextLines) A single-line output representing the menu, with\n font_attr_segs marking the individual menu items.\n \"\"\"\n\n if (enabled_item_attrs is not None and\n not isinstance(enabled_item_attrs, list)):\n enabled_item_attrs = [enabled_item_attrs]\n\n if (disabled_item_attrs is not None and\n not isinstance(disabled_item_attrs, list)):\n disabled_item_attrs = [disabled_item_attrs]\n\n menu_line = prefix if prefix is not None else \"\"\n attr_segs = []\n\n for item in self._items:\n menu_line += item.caption\n item_name_begin = len(menu_line) - len(item.caption)\n\n if item.is_enabled():\n final_attrs = [item]\n if enabled_item_attrs:\n final_attrs.extend(enabled_item_attrs)\n attr_segs.append((item_name_begin, len(menu_line), final_attrs))\n else:\n if disabled_item_attrs:\n attr_segs.append(\n (item_name_begin, len(menu_line), disabled_item_attrs))\n\n menu_line += divider\n\n return RichTextLines(menu_line, font_attr_segs={0: attr_segs})\n", "output": ["rich_text_lines_from_rich_line_list", "regex_find", "get_tensorflow_version_lines", "wrap_rich_text_lines", "TabCompletionRegistry", "RichLine", "CommandHistory", "CommandHandlerRegistry", "Menu", "MenuItem", "CommandLineExit", "RichTextLines"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/debug/cli/debugger_cli_common.py", "file_length": 11816, "symbol_dict": [{"symbol": "regex_find", "type": "mannual_defined_function", "byte_location": 12124, "location": 3739}, {"symbol": "rich_text_lines_from_rich_line_list", "type": "mannual_defined_function", "byte_location": 3370, "location": 1004}, {"symbol": "get_tensorflow_version_lines", "type": "mannual_defined_function", "byte_location": 4037, "location": 1223}, {"symbol": "wrap_rich_text_lines", "type": "mannual_defined_function", "byte_location": 13847, "location": 4295}, {"symbol": "TabCompletionRegistry", "type": "mannual_defined_class", "byte_location": 27447, "location": 8241}, {"symbol": "Menu", "type": "mannual_defined_class", "byte_location": 36883, "location": 10935}, {"symbol": "RichLine", "type": "mannual_defined_class", "byte_location": 1304, "location": 401}, {"symbol": "CommandHandlerRegistry", "type": "mannual_defined_class", "byte_location": 16724, "location": 5211}, {"symbol": "RichTextLines", "type": "mannual_defined_class", "byte_location": 4618, "location": 1402}, {"symbol": "CommandLineExit", "type": "mannual_defined_class", "byte_location": 1100, "location": 335}, {"symbol": "CommandHistory", "type": "mannual_defined_class", "byte_location": 32831, "location": 9750}, {"symbol": "MenuItem", "type": "mannual_defined_class", "byte_location": 36004, "location": 10675}]}} {"input": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Reader class for tfdbg v2 debug events.\"\"\"\n\nimport collections\nimport os\nimport threading\n\nfrom tensorflow.core.protobuf import debug_event_pb2\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.lib.io import tf_record\nfrom tensorflow.python.util import compat\n\n\nDebugEventWithOffset = collections.namedtuple(\n \"DebugEventWithOffset\", \"debug_event offset\")\n\n\nclass DebugEventsReader:\n \"\"\"Reader class for a tfdbg v2 DebugEvents directory.\"\"\"\n\n # Number of digests after which a read lock is released and re-acquired during\n # serial reading of digests for SourceFiles, Execution, and\n # GraphExecutionTrace. This allows us to avoid releasing and re-acquiring the\n # lock too often (i.e., after each digest) and to minimize performance\n # penalty.\n _READER_RELEASE_PER = 100\n\n _METADATA_SUFFIX = \".metadata\"\n _SOURCE_FILE_SUFFIX = \".source_files\"\n _STACK_FRAMES_SUFFIX = \".stack_frames\"\n _GRAPHS_SUFFIX = \".graphs\"\n _EXECUTION_SUFFIX = \".execution\"\n _GRAPH_EXECUTION_TRACES_SUFFIX = \".graph_execution_traces\"\n\n def __init__(self, dump_root):\n if not file_io.is_directory(dump_root):\n raise ValueError(\"Specified dump_root is not a directory: %s\" % dump_root)\n self._dump_root = dump_root\n self._metadata_paths = self._load_metadata_files()\n\n prefixes = [\n metadata_path[:-len(self._METADATA_SUFFIX)]\n for metadata_path in self._metadata_paths\n ]\n prefix = prefixes[0] # This is the prefix of the main file set.\n self._source_files_path = compat.as_bytes(prefix + self._SOURCE_FILE_SUFFIX)\n self._stack_frames_path = compat.as_bytes(prefix +\n self._STACK_FRAMES_SUFFIX)\n self._graphs_path = compat.as_bytes(prefix + self._GRAPHS_SUFFIX)\n self._execution_path = compat.as_bytes(prefix + self._EXECUTION_SUFFIX)\n # There can be multiple .graph_execution_trace files each belonging\n # to a file set generated on an individual host, in the case of\n # a distributed TensorFlow job.\n # This is different from the other debug event files in the file set.\n self._graph_execution_traces_paths = [\n compat.as_bytes(prefix + self._GRAPH_EXECUTION_TRACES_SUFFIX)\n for prefix in prefixes\n ]\n self._readers = dict() # A map from file path to reader.\n # A map from file path to current reading offset.\n self._reader_offsets = dict()\n # Lock for reader creation.\n self._readers_lock = threading.Lock()\n # Locks for read operation on individual readers.\n self._reader_read_locks = dict()\n\n self._offsets = dict()\n\n def _load_metadata_files(self):\n \"\"\"Load and parse metadata files in the dump root.\n\n Check that all metadata files have a common tfdbg_run_id, and raise\n a ValueError if their tfdbg_run_ids differ.\n\n Returns:\n A list of metadata file paths in ascending order of their starting\n wall_time timestamp.\n \"\"\"\n\n metadata_paths = file_io.get_matching_files(\n os.path.join(self._dump_root, \"*%s\" % self._METADATA_SUFFIX))\n if not metadata_paths:\n raise ValueError(\"Cannot find any tfdbg metadata file in directory: %s\" %\n self._dump_root)\n wall_times = []\n run_ids = []\n tensorflow_versions = []\n file_versions = []\n for metadata_path in metadata_paths:\n reader = tf_record.tf_record_random_reader(metadata_path)\n try:\n record = reader.read(0)[0]\n debug_event = debug_event_pb2.DebugEvent.FromString(record)\n wall_times.append(debug_event.wall_time)\n run_ids.append(debug_event.debug_metadata.tfdbg_run_id)\n tensorflow_versions.append(\n debug_event.debug_metadata.tensorflow_version\n )\n file_versions.append(debug_event.debug_metadata.file_version)\n except Exception as e:\n raise errors.DataLossError(\n None,\n None,\n \"Error reading tfdbg metadata from paths %s\" % metadata_paths,\n ) from e\n finally:\n reader.close()\n self._starting_wall_time = wall_times[0]\n self._tfdbg_run_id = run_ids[0]\n self._tensorflow_version = tensorflow_versions[0]\n self._file_version = file_versions[0]\n if len(metadata_paths) == 1:\n # Fast path for a common case (only one DebugEvent file set.)\n return metadata_paths\n\n num_no_id = len([run_id for run_id in run_ids if not run_id])\n if num_no_id:\n paths_without_run_id = [\n metadata_path\n for metadata_path, run_id in zip(metadata_paths, run_ids)\n if not run_id\n ]\n raise ValueError(\n \"Found %d tfdbg metadata files and %d of them do not \"\n \"have tfdbg run ids. The metadata files without run ids are: %s\" %\n (len(run_ids), num_no_id, paths_without_run_id))\n elif len(set(run_ids)) != 1:\n raise ValueError(\n \"Unexpected: Found multiple (%d) tfdbg2 runs in directory %s\" %\n (len(set(run_ids)), self._dump_root))\n # Return the metadata files in ascending order of their timestamps.\n paths_and_timestamps = sorted(\n zip(metadata_paths, wall_times), key=lambda t: t[1])\n self._starting_wall_time = paths_and_timestamps[0][1]\n return [path[0] for path in paths_and_timestamps]\n\n def starting_wall_time(self):\n \"\"\"Get the starting timestamp of the instrumented TensorFlow program.\n\n When there are multiple hosts (i.e., multiple tfdbg file sets), the earliest\n timestamp among the file sets is returned. It is assumed to be the job that\n starts first (e.g., the coordinator).\n\n Returns:\n Starting timestamp in seconds since the epoch, as a float.\n \"\"\"\n return self._starting_wall_time\n\n def tfdbg_run_id(self):\n \"\"\"Get the run ID of the instrumented TensorFlow program.\"\"\"\n return self._tfdbg_run_id\n\n def tensorflow_version(self):\n \"\"\"Get the version string of TensorFlow that the debugged program ran on.\"\"\"\n return self._tensorflow_version\n\n def tfdbg_file_version(self):\n \"\"\"Get the tfdbg file format version.\"\"\"\n return self._file_version\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n del exception_type, exception_value, traceback # Unused\n self.close()\n\n def _generic_iterator(self, file_path):\n \"\"\"A helper method that makes an iterator given a debug-events file path.\n\n Repeated calls to this method create iterators that remember the last\n successful reading position (offset) for each given `file_path`. So the\n iterators are meant for incremental reading of the file.\n\n Args:\n file_path: Path to the file to create the iterator for.\n\n Yields:\n A tuple of (offset, debug_event_proto) on each `next()` call.\n \"\"\"\n yield_count = 0\n reader = self._get_reader(file_path)\n read_lock = self._reader_read_locks[file_path]\n read_lock.acquire()\n try:\n while True:\n current_offset = self._reader_offsets[file_path]\n try:\n record, self._reader_offsets[file_path] = reader.read(current_offset)\n except (errors.DataLossError, IndexError):\n # We ignore partial read exceptions, because a record may be\n # truncated. The PyRandomRecordReader throws an `IndexError` when\n # offset goes out of bound.\n break\n yield DebugEventWithOffset(\n debug_event=debug_event_pb2.DebugEvent.FromString(record),\n offset=current_offset)\n yield_count += 1\n # The read lock must be periodically released to allow for concurrent\n # random reads. But we do so at a number of reads, instead of after\n # every single read, in order to minimize the performance penalty.\n if yield_count % self._READER_RELEASE_PER == 0:\n read_lock.release()\n read_lock.acquire()\n finally:\n read_lock.release()\n\n def _get_reader(self, file_path):\n \"\"\"Get a random-access reader for TFRecords file at file_path.\"\"\"\n file_path = compat.as_bytes(file_path)\n # The following code uses the double-checked locking pattern to optimize\n # the common case (where the reader is already initialized).\n if file_path not in self._readers: # 1st check, without lock.\n with self._readers_lock:\n if file_path not in self._readers: # 2nd check, with lock.\n self._readers[file_path] = tf_record.tf_record_random_reader(\n file_path)\n self._reader_read_locks[file_path] = threading.Lock()\n self._reader_offsets[file_path] = 0\n return self._readers[file_path]\n\n def source_files_iterator(self):\n return self._generic_iterator(self._source_files_path)\n\n def stack_frames_iterator(self):\n return self._generic_iterator(self._stack_frames_path)\n\n def graphs_iterator(self):\n return self._generic_iterator(self._graphs_path)\n\n def read_source_files_event(self, offset):\n \"\"\"Read a DebugEvent proto at given offset from the .source_files file.\"\"\"\n with self._reader_read_locks[self._source_files_path]:\n proto_string = self._get_reader(self._source_files_path).read(offset)[0]\n return debug_event_pb2.DebugEvent.FromString(proto_string)\n\n def read_graphs_event(self, offset):\n \"\"\"Read a DebugEvent proto at a given offset from the .graphs file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n \"\"\"\n return debug_event_pb2.DebugEvent.FromString(\n self._get_reader(self._graphs_path).read(offset)[0])\n\n def execution_iterator(self):\n return self._generic_iterator(self._execution_path)\n\n def read_execution_event(self, offset):\n \"\"\"Read a DebugEvent proto at a given offset from the .execution file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n \"\"\"\n with self._reader_read_locks[self._execution_path]:\n proto_string = self._get_reader(self._execution_path).read(offset)[0]\n return debug_event_pb2.DebugEvent.FromString(proto_string)\n\n def graph_execution_traces_iterators(self):\n return [\n self._generic_iterator(path)\n for path in self._graph_execution_traces_paths\n ]\n\n def read_graph_execution_traces_event(self, locator):\n \"\"\"Read DebugEvent at given offset from given .graph_execution_traces file.\n\n Args:\n locator: A (file_index, offset) tuple that locates the DebugEvent\n containing the graph execution trace.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n \"\"\"\n file_index, offset = locator\n graph_execution_traces_path = self._graph_execution_traces_paths[file_index]\n with self._reader_read_locks[graph_execution_traces_path]:\n proto_string = self._get_reader(graph_execution_traces_path).read(\n offset)[0]\n return debug_event_pb2.DebugEvent.FromString(proto_string)\n\n def close(self):\n with self._readers_lock:\n file_paths = list(self._readers.keys())\n for file_path in file_paths:\n self._readers[file_path].close()\n del self._readers[file_path]\n\n\nclass BaseDigest:\n \"\"\"Base class for digest.\n\n Properties:\n wall_time: A timestamp for the digest as a `float` (unit: s).\n locator: A datum that allows tracng the digest to its original\n location. It can be either of the two:\n 1. Bytes offset from the beginning of the file as a single integer,\n for the case of all digests of the same kind coming from the same\n file.\n 2. A tuple of a file index and a byte offset. This applies to case\n in which the same type of debugger data may come from multiple files,\n e.g., graph execution traces.\n \"\"\"\n\n def __init__(self, wall_time, locator):\n self._wall_time = wall_time\n self._locator = locator\n\n @property\n def wall_time(self):\n return self._wall_time\n\n @property\n def locator(self):\n return self._locator\n\n def to_json(self):\n return {\"wall_time\": self.wall_time}\n\n\nclass ExecutionDigest(BaseDigest):\n \"\"\"Light-weight digest summarizing top-level execution event.\n\n Use `DebugDataReader.read_execution(execution_digest)` to load the more\n detailed data object concerning the execution event (`Execution`).\n\n Properties:\n op_type: Type name of the executed op. In the case of the eager execution of\n an individual op, it is the name of the op (e.g., \"MatMul\").\n In the case of the execution of a tf.function (FuncGraph), this is the\n internally-generated name of the function (e.g.,\n \"__inference_my_func_123\").\n output_tensor_device_ids: IDs of the devices on which the output tensors of\n the execution reside. For no-output execution, this is `None`.\n \"\"\"\n\n def __init__(self,\n wall_time,\n locator,\n op_type,\n output_tensor_device_ids=None):\n super().__init__(wall_time, locator)\n self._op_type = op_type\n self._output_tensor_device_ids = _tuple_or_none(output_tensor_device_ids)\n\n @property\n def op_type(self):\n return self._op_type\n\n @property\n def output_tensor_device_ids(self):\n return self._output_tensor_device_ids\n\n def to_json(self):\n output = super().to_json()\n output.update({\n \"op_type\": self.op_type,\n \"output_tensor_device_ids\": self.output_tensor_device_ids,\n })\n return output\n\n\ndef _tuple_or_none(data):\n return tuple(data) if data else None\n\n\nclass Execution(ExecutionDigest):\n \"\"\"Detailed data relating to a top-level execution event.\n\n The execution is of an individual op or a tf.function, which may have any\n number of output tensors.\n\n Properties (beyond the base class `ExecutionDigest`):\n host_name: Name of the host on which the execution happened.\n stack_frame_ids: Reference IDs for stack frames, ordered from bottommost to\n topmost. Use `DebugDataReader.read_execution_stack_trace()` to load the\n detailed stack frames (filepath, lineno and function name).\n tensor_debug_mode: TensorDebugMode enum value, as an `int`.\n graph_id: ID of the executed FuncGraph (applicable only the execution of a\n tf.function). `None` for the eager execution of an individual op.\n input_tensor_ids: IDs of the input (eager) tensor(s) for this execution, if\n any. If the eager execution has no input tensor, this is `None`. Else,\n this is a `tuple` of `int`s.\n output_tensor_ids: IDs of the output (eager) tensor(s) from this execution,\n if any. If the eager execution produces no output tensor, this is `None`.\n Else, this is a `tuple` of `int`s.\n debug_tensor_values: Values of the debug tensor(s), applicable only to\n non-FULL_TENSOR tensor debug mode. A tuple of list of numbers. Each\n element of the tuple corresponds to an output tensor of the execution.\n See documentation of the various TensorDebugModes for the semantics of the\n numbers. If the eager execution produces no output tensor, this is\n `None`. Else, this is a `tuple` of `list`s.\n \"\"\"\n\n def __init__(self,\n execution_digest,\n host_name,\n stack_frame_ids,\n tensor_debug_mode,\n graph_id=None,\n input_tensor_ids=None,\n output_tensor_ids=None,\n debug_tensor_values=None):\n super().__init__(\n execution_digest.wall_time,\n execution_digest.locator,\n execution_digest.op_type,\n output_tensor_device_ids=execution_digest.output_tensor_device_ids)\n self._host_name = host_name\n self._stack_frame_ids = tuple(stack_frame_ids)\n self._tensor_debug_mode = tensor_debug_mode\n self._graph_id = graph_id\n self._input_tensor_ids = _tuple_or_none(input_tensor_ids)\n self._output_tensor_ids = _tuple_or_none(output_tensor_ids)\n self._debug_tensor_values = _tuple_or_none(debug_tensor_values)\n\n @property\n def host_name(self):\n return self._host_name\n\n @property\n def stack_frame_ids(self):\n return self._stack_frame_ids\n\n @property\n def tensor_debug_mode(self):\n return self._tensor_debug_mode\n\n @property\n def graph_id(self):\n return self._graph_id\n\n @property\n def input_tensor_ids(self):\n return self._input_tensor_ids\n\n @property\n def num_outputs(self):\n return len(self._output_tensor_ids) if self._output_tensor_ids else 0\n\n @property\n def output_tensor_ids(self):\n return self._output_tensor_ids\n\n @property\n def debug_tensor_values(self):\n return self._debug_tensor_values\n\n def to_json(self):\n output = super().to_json()\n output.update({\n \"host_name\": self.host_name,\n \"stack_frame_ids\": self.stack_frame_ids,\n \"tensor_debug_mode\": self.tensor_debug_mode,\n \"graph_id\": self.graph_id,\n \"input_tensor_ids\": self.input_tensor_ids,\n \"output_tensor_ids\": self.output_tensor_ids,\n \"debug_tensor_values\": self.debug_tensor_values,\n })\n return output\n\n\nclass DebuggedGraph:\n \"\"\"Data object representing debugging information about a tf.Graph.\n\n Includes `FuncGraph`s.\n\n Properties:\n name: Name of the graph (if any). May be `None` for non-function graphs.\n graph_id: Debugger-generated ID for the graph.\n inner_graph_ids: A list of the debugger-generated IDs for the graphs\n enclosed by this graph.\n outer_graph_id: If this graph is nested within an outer graph, ID of the\n outer graph. If this is an outermost graph, `None`.\n \"\"\"\n\n def __init__(self,\n name,\n graph_id,\n outer_graph_id=None):\n self._name = name\n self._graph_id = graph_id\n self._outer_graph_id = outer_graph_id\n self._inner_graph_ids = []\n # A dictionary from op name to GraphOpCreationDigest.\n self._op_by_name = dict()\n # A dictionary mapping op to immediate downstream consumers.\n self._op_consumers = collections.defaultdict(list)\n\n def add_inner_graph_id(self, inner_graph_id):\n \"\"\"Add the debugger-generated ID of a graph nested within this graph.\n\n Args:\n inner_graph_id: The debugger-generated ID of the nested inner graph.\n \"\"\"\n assert isinstance(inner_graph_id, str)\n self._inner_graph_ids.append(inner_graph_id)\n\n def add_op(self, graph_op_creation_digest):\n \"\"\"Add an op creation data object.\n\n Args:\n graph_op_creation_digest: A GraphOpCreationDigest data object describing\n the creation of an op inside this graph.\n \"\"\"\n if graph_op_creation_digest.op_name in self._op_by_name:\n raise ValueError(\n \"Duplicate op name: %s (op type: %s)\" %\n (graph_op_creation_digest.op_name, graph_op_creation_digest.op_type))\n self._op_by_name[\n graph_op_creation_digest.op_name] = graph_op_creation_digest\n\n def add_op_consumer(self, src_op_name, src_slot, dst_op_name, dst_slot):\n \"\"\"Add a consuming op for this op.\n\n Args:\n src_op_name: Name of the op of which the output tensor is being consumed.\n src_slot: 0-based output slot of the op being consumed.\n dst_op_name: Name of the consuming op (e.g., \"Conv2D_3/BiasAdd\")\n dst_slot: 0-based input slot of the consuming op that receives the tensor\n from this op.\n \"\"\"\n self._op_consumers[src_op_name].append((src_slot, dst_op_name, dst_slot))\n\n @property\n def name(self):\n return self._name\n\n @property\n def graph_id(self):\n return self._graph_id\n\n @property\n def outer_graph_id(self):\n return self._outer_graph_id\n\n @property\n def inner_graph_ids(self):\n return self._inner_graph_ids\n\n def get_tensor_id(self, op_name, output_slot):\n \"\"\"Get the ID of a symbolic tensor in this graph.\"\"\"\n return self._op_by_name[op_name].output_tensor_ids[output_slot]\n\n def get_op_creation_digest(self, op_name):\n \"\"\"Get the GraphOpCreationDigest for a op in the graph.\"\"\"\n return self._op_by_name[op_name]\n\n def get_op_consumers(self, src_op_name):\n \"\"\"Get all the downstream consumers of this op.\n\n Only data (non-control) edges are tracked.\n\n Args:\n src_op_name: Name of the op providing the tensor being consumed.\n\n Returns:\n A list of (src_slot, dst_op_name, dst_slot) tuples. In each item of\n the list:\n src_slot: 0-based output slot of the op of which the output tensor\n is being consumed.\n dst_op_name: Name of the consuming op (e.g., \"Conv2D_3/BiasAdd\")\n dst_slot: 0-based input slot of the consuming op that receives\n the tensor from this op.\n \"\"\"\n return self._op_consumers[src_op_name]\n\n def to_json(self):\n return {\n \"name\": self.name,\n \"graph_id\": self.graph_id,\n \"outer_graph_id\": self._outer_graph_id,\n \"inner_graph_ids\": self._inner_graph_ids,\n }\n\n\nclass DebuggedDevice:\n \"\"\"Debugger data regarding a device involved in the debugged program.\n\n Properties:\n device_name: Name of the device, as a str.\n device_id: An integer ID for the device, unique for each device within\n the scope of the debugged TensorFlow program.\n \"\"\"\n\n def __init__(self,\n device_name,\n device_id):\n self._device_name = device_name\n self._device_id = device_id\n\n @property\n def device_name(self):\n return self._device_name\n\n @property\n def device_id(self):\n return self._device_id\n\n def to_json(self):\n return {\n \"device_name\": self._device_name,\n \"device_id\": self._device_id,\n }\n\n\nclass GraphOpCreationDigest(BaseDigest):\n \"\"\"Data object describing the creation of an op inside a graph.\n\n For size efficiency, this digest object does not contain any stack frames or\n any references to them. To obtain the stack frames, use\n `DataReader.read_graph_op_creation_stack_trace()`.\n\n Properties (beyond the base class):\n graph_id: Debugger-generated ID of the immediately-enclosing graph.\n op_type: Type name of the op (e.g., \"MatMul\").\n op_name: Name of the op (e.g., \"dense_1/MatMul\").\n output_tensor_ids: Debugger-generated IDs for the output(s) of the op.\n If the op produces no output tensor, this is `None`. Else, this is a\n `tuple` of `int`s.\n input_names: Names of the input tensors to the op.\n device_name: The name of the device that the op is placed on (if available).\n host_name: Name of the host on which the op is created.\n stack_frame_ids: IDs of the frames of the stack trace at which the op\n is created.\n \"\"\"\n\n def __init__(self,\n wall_time,\n locator,\n graph_id,\n op_type,\n op_name,\n output_tensor_ids,\n host_name,\n stack_frame_ids,\n input_names=None,\n device_name=None):\n super().__init__(wall_time, locator)\n self._graph_id = graph_id\n self._op_type = op_type\n self._op_name = op_name\n self._output_tensor_ids = _tuple_or_none(output_tensor_ids)\n self._host_name = host_name\n self._stack_frame_ids = stack_frame_ids\n self._input_names = _tuple_or_none(input_names)\n self._device_name = device_name\n\n @property\n def graph_id(self):\n return self._graph_id\n\n @property\n def op_type(self):\n return self._op_type\n\n @property\n def op_name(self):\n return self._op_name\n\n @property\n def output_tensor_ids(self):\n return self._output_tensor_ids\n\n @property\n def num_outputs(self):\n return len(self._output_tensor_ids) if self.output_tensor_ids else 0\n\n @property\n def input_names(self):\n return self._input_names\n\n @property\n def device_name(self):\n return self._device_name\n\n @property\n def host_name(self):\n return self._host_name\n\n @property\n def stack_frame_ids(self):\n return self._stack_frame_ids\n\n def to_json(self):\n output = super().to_json()\n output.update({\n \"graph_id\": self.graph_id,\n \"op_type\": self.op_type,\n \"op_name\": self.op_name,\n \"output_tensor_ids\": self.output_tensor_ids,\n \"host_name\": self.host_name,\n \"stack_frame_ids\": self.stack_frame_ids,\n \"input_names\": self.input_names,\n \"device_name\": self.device_name,\n })\n return output\n\n\nclass GraphExecutionTraceDigest(BaseDigest):\n \"\"\"Light-weight summary of a intra-graph tensor execution event.\n\n Use `DebugDataReader.read_graph_execution_trace()` on this object to read more\n detailed data (`GraphExecutionTrace`).\n\n Properties (beyond the base class):\n op_type: Type name of the executed op (e.g., \"Conv2D\").\n op_name: Name of the op (e.g., \"conv_2d_3/Conv2D\").\n output_slot: Output slot index of the tensor.\n graph_id: The debugger-generated ID of the innermost (immediately-enclosing)\n graph.\n \"\"\"\n\n def __init__(self, wall_time, locator, op_type, op_name, output_slot,\n graph_id):\n super().__init__(wall_time, locator)\n self._op_type = op_type\n self._op_name = op_name\n self._output_slot = output_slot\n self._graph_id = graph_id\n\n @property\n def op_type(self):\n return self._op_type\n\n @property\n def op_name(self):\n return self._op_name\n\n @property\n def output_slot(self):\n return self._output_slot\n\n @property\n def graph_id(self):\n return self._graph_id\n\n def to_json(self):\n output = super().to_json()\n output.update({\n \"op_type\": self.op_type,\n \"op_name\": self.op_name,\n \"output_slot\": self.output_slot,\n \"graph_id\": self.graph_id,\n })\n return output\n\n\nclass GraphExecutionTrace(GraphExecutionTraceDigest):\n \"\"\"Detailed data object describing an intra-graph tensor execution.\n\n Attributes (in addition to GraphExecutionTraceDigest):\n graph_ids: The debugger-generated IDs of the graphs that enclose the\n executed op (tensor), ordered from the outermost to the innermost.\n graph_id: The debugger-generated ID of the innermost (immediately-enclosing)\n graph.\n tensor_debug_mode: TensorDebugMode enum value.\n debug_tensor_value: Debug tensor values (only for non-FULL_TENSOR\n tensor_debug_mode). A list of numbers. See the documentation of the\n TensorDebugModes for the semantics of the numbers.\n device_name: Device on which the tensor resides (if available)\n \"\"\"\n\n def __init__(self,\n graph_execution_trace_digest,\n graph_ids,\n tensor_debug_mode,\n debug_tensor_value=None,\n device_name=None):\n super().__init__(graph_execution_trace_digest.wall_time,\n graph_execution_trace_digest.locator,\n graph_execution_trace_digest.op_type,\n graph_execution_trace_digest.op_name,\n graph_execution_trace_digest.output_slot,\n graph_execution_trace_digest.graph_id)\n self._graph_ids = tuple(graph_ids)\n self._tensor_debug_mode = tensor_debug_mode\n self._debug_tensor_value = debug_tensor_value\n self._device_name = device_name\n\n @property\n def graph_ids(self):\n return self._graph_ids\n\n @property\n def graph_id(self):\n return self._graph_ids[-1]\n\n @property\n def tensor_debug_mode(self):\n return self._tensor_debug_mode\n\n @property\n def debug_tensor_value(self):\n return _tuple_or_none(self._debug_tensor_value)\n\n @property\n def device_name(self):\n return self._device_name\n\n def to_json(self):\n output = super().to_json()\n output.update({\n \"graph_ids\": self.graph_ids,\n \"tensor_debug_mode\": self.tensor_debug_mode,\n \"debug_tensor_value\": self.debug_tensor_value,\n \"device_name\": self.device_name,\n })\n return output\n\n\ndef _parse_tensor_value(tensor_proto, return_list=False):\n \"\"\"Helper method for reading a tensor value from a tensor proto.\n\n The rationale for the distinction between `True` and `False value of\n `return_list` is as follows:\n - `return_list=True` is used for TensorDebugMode values other than\n FULL_TENSOR, e.g., CONCISE_HEALTH, SHAPE and FULL_HEATLH. Under\n those modes, the value is guaranteed (by contract) to be a 1D float64\n tensor.\n - `return_list=False` is used for the FULL_HEALTH TensorDebugMode\n specifically. Instead, we use `numpy.ndarray` to maximally preserve\n the shape, dtype and value information regarding the underlying tensor\n value. Under that mode, we don't use a python list to represent the\n tensor value because that can lead to loss of information (e.g., both\n float16 and float32 dtypes get mapped to Python floats).\n\n Args:\n tensor_proto: The TensorProto instance from which the tensor value will be\n loaded.\n return_list: Whether the return value will be a nested Python list that\n comes out from `numpy.ndarray.tolist()`.\n\n Returns:\n If parsing is successful, the tensor value as a `numpy.ndarray` or the\n nested Python list converted from it.\n If parsing fails, `None`.\n \"\"\"\n try:\n ndarray = tensor_util.MakeNdarray(tensor_proto)\n return ndarray.tolist() if return_list else ndarray\n except TypeError:\n # Depending on tensor_debug_mode, certain dtype of tensors don't\n # have logged debug tensor values.\n return None\n\n\ndef _execution_digest_from_debug_event_proto(debug_event, locator):\n \"\"\"Convert a DebugEvent proto into an ExecutionDigest data object.\"\"\"\n return ExecutionDigest(\n debug_event.wall_time,\n locator,\n debug_event.execution.op_type,\n output_tensor_device_ids=(debug_event.execution.output_tensor_device_ids\n or None))\n\n\ndef _execution_from_debug_event_proto(debug_event, locator):\n \"\"\"Convert a DebugEvent proto into an Execution data object.\"\"\"\n execution_proto = debug_event.execution\n\n debug_tensor_values = None\n if (execution_proto.tensor_debug_mode ==\n debug_event_pb2.TensorDebugMode.FULL_TENSOR):\n pass # TODO(cais): Build tensor store.\n elif (execution_proto.tensor_debug_mode !=\n debug_event_pb2.TensorDebugMode.NO_TENSOR):\n debug_tensor_values = []\n for tensor_proto in execution_proto.tensor_protos:\n # TODO(cais): Refactor into a helper method.\n debug_tensor_values.append(\n _parse_tensor_value(tensor_proto, return_list=True))\n return Execution(\n _execution_digest_from_debug_event_proto(debug_event, locator),\n execution_proto.code_location.host_name,\n tuple(execution_proto.code_location.stack_frame_ids),\n execution_proto.tensor_debug_mode,\n graph_id=execution_proto.graph_id,\n input_tensor_ids=tuple(execution_proto.input_tensor_ids),\n output_tensor_ids=tuple(execution_proto.output_tensor_ids),\n debug_tensor_values=_tuple_or_none(debug_tensor_values))\n\n\nclass DebugDataReader:\n \"\"\"A reader that reads structured debugging data in the tfdbg v2 format.\n\n The set of data read by an object of this class concerns the execution history\n of a tfdbg2-instrumented TensorFlow program.\n\n Note:\n - An object of this class incrementally reads data from files that belong to\n the tfdbg v2 DebugEvent file set. Calling `update()` triggers the reading\n from the last-successful reading positions in the files.\n - This object can be used as a context manager. Its `__exit__()` call\n closes the file readers cleanly.\n \"\"\"\n\n def __init__(self, dump_root):\n self._reader = DebugEventsReader(dump_root)\n\n # TODO(cais): Implement pagination for memory constraints.\n self._execution_digests = []\n\n # Mapping (host_name, file_path) tuple to offset in the .source_files file.\n self._host_name_file_path_to_offset = collections.OrderedDict()\n # A dict mapping id to (host_name, file_path, lineno, func) tuple.\n self._stack_frame_by_id = dict()\n # Stores unprocessed stack frame IDs. This is necessary to handle the\n # case in which reading of the .stack_frames file gets ahead of the reading\n # of the .source_files file.\n self._unprocessed_stack_frames = dict()\n # A dict mapping id to DebuggedDevice objects.\n self._device_by_id = dict()\n # A dict mapping id to DebuggedGraph objects.\n self._graph_by_id = dict()\n self._graph_op_digests = []\n # TODO(cais): Implement pagination for memory constraints.\n self._graph_execution_trace_digests = []\n\n self._monitors = []\n\n def _add_monitor(self, monitor):\n self._monitors.append(monitor)\n\n def _load_source_files(self):\n \"\"\"Incrementally read the .source_files DebugEvent file.\"\"\"\n source_files_iter = self._reader.source_files_iterator()\n for debug_event, offset in source_files_iter:\n source_file = debug_event.source_file\n self._host_name_file_path_to_offset[\n (source_file.host_name, source_file.file_path)] = offset\n\n def _load_stack_frames(self):\n \"\"\"Incrementally read the .stack_frames file.\n\n This must be called after _load_source_files().\n It assumes that the following contract is honored by the writer of the tfdbg\n v2 data file set:\n - Before a stack frame is written to the .stack_frames file, the\n corresponding source file information must have been written to the\n .source_files file first.\n \"\"\"\n stack_frames_iter = self._reader.stack_frames_iterator()\n for debug_event, _ in stack_frames_iter:\n stack_frame_with_id = debug_event.stack_frame_with_id\n file_line_col = stack_frame_with_id.file_line_col\n self._unprocessed_stack_frames[stack_frame_with_id.id] = file_line_col\n # We do the processing in a separate stage, because the reading in the\n # .source_files file may sometimes get ahead of the .source_files file.\n unprocessed_stack_frame_ids = tuple(self._unprocessed_stack_frames.keys())\n for stack_frame_id in unprocessed_stack_frame_ids:\n file_line_col = self._unprocessed_stack_frames[stack_frame_id]\n if len(self._host_name_file_path_to_offset) > file_line_col.file_index:\n host_name, file_path = list(self._host_name_file_path_to_offset.keys())[\n file_line_col.file_index]\n self._stack_frame_by_id[stack_frame_id] = (\n host_name, file_path, file_line_col.line, file_line_col.func)\n del self._unprocessed_stack_frames[stack_frame_id]\n\n def _load_graphs(self):\n \"\"\"Incrementally read the .graphs file.\n\n Compiles the DebuggedGraph and GraphOpCreation data.\n \"\"\"\n graphs_iter = self._reader.graphs_iterator()\n for debug_event, offset in graphs_iter:\n if debug_event.graph_op_creation.ByteSize():\n op_creation_proto = debug_event.graph_op_creation\n op_digest = GraphOpCreationDigest(\n debug_event.wall_time,\n offset,\n op_creation_proto.graph_id,\n op_creation_proto.op_type,\n op_creation_proto.op_name,\n tuple(op_creation_proto.output_tensor_ids),\n op_creation_proto.code_location.host_name,\n tuple(op_creation_proto.code_location.stack_frame_ids),\n input_names=tuple(op_creation_proto.input_names))\n self._graph_op_digests.append(op_digest)\n debugged_graph = self._graph_by_id[op_creation_proto.graph_id]\n debugged_graph.add_op(op_digest)\n for dst_slot, input_name in enumerate(op_creation_proto.input_names):\n src_op_name, src_slot = input_name.split(\":\")\n debugged_graph.add_op_consumer(src_op_name, int(src_slot),\n op_creation_proto.op_name, dst_slot)\n\n elif debug_event.debugged_graph.ByteSize():\n graph_proto = debug_event.debugged_graph\n graph = DebuggedGraph(\n graph_proto.graph_name or None,\n graph_proto.graph_id,\n outer_graph_id=graph_proto.outer_context_id or None)\n self._graph_by_id[graph_proto.graph_id] = graph\n if graph_proto.outer_context_id:\n self._graph_by_id[\n graph_proto.outer_context_id].add_inner_graph_id(graph.graph_id)\n elif debug_event.debugged_device.ByteSize():\n device_proto = debug_event.debugged_device\n self._device_by_id[device_proto.device_id] = DebuggedDevice(\n device_proto.device_name, device_proto.device_id)\n\n def _load_graph_execution_traces(self):\n \"\"\"Incrementally load the .graph_execution_traces file.\"\"\"\n for i, traces_iter in enumerate(\n self._reader.graph_execution_traces_iterators()):\n for debug_event, offset in traces_iter:\n self._graph_execution_trace_digests.append(\n self._graph_execution_trace_digest_from_debug_event_proto(\n debug_event, (i, offset)))\n if self._monitors:\n graph_execution_trace = (\n self._graph_execution_trace_from_debug_event_proto(\n debug_event, (i, offset)))\n for monitor in self._monitors:\n monitor.on_graph_execution_trace(\n len(self._graph_execution_trace_digests) - 1,\n graph_execution_trace)\n\n def _graph_execution_trace_digest_from_debug_event_proto(\n self, debug_event, locator):\n trace_proto = debug_event.graph_execution_trace\n op_name = trace_proto.op_name\n op_type = self._lookup_op_type(trace_proto.tfdbg_context_id, op_name)\n return GraphExecutionTraceDigest(\n debug_event.wall_time, locator, op_type, op_name,\n trace_proto.output_slot,\n debug_event.graph_execution_trace.tfdbg_context_id)\n\n def _graph_execution_trace_from_debug_event_proto(self, debug_event, locator):\n \"\"\"Convert a DebugEvent proto into a GraphExecutionTrace data object.\"\"\"\n trace_proto = debug_event.graph_execution_trace\n graph_ids = [trace_proto.tfdbg_context_id]\n # Walk up the chain of outer contexts (graphs), so as to include all of\n # their IDs\n while True:\n graph = self.graph_by_id(graph_ids[0])\n if graph.outer_graph_id:\n graph_ids.insert(0, graph.outer_graph_id)\n else:\n break\n\n if (trace_proto.tensor_debug_mode ==\n debug_event_pb2.TensorDebugMode.FULL_TENSOR):\n debug_tensor_value = None\n else:\n debug_tensor_value = _parse_tensor_value(\n trace_proto.tensor_proto, return_list=True)\n return GraphExecutionTrace(\n self._graph_execution_trace_digest_from_debug_event_proto(\n debug_event, locator),\n graph_ids=graph_ids,\n tensor_debug_mode=trace_proto.tensor_debug_mode,\n debug_tensor_value=debug_tensor_value,\n device_name=trace_proto.device_name or None)\n\n def _lookup_op_type(self, graph_id, op_name):\n \"\"\"Lookup the type of an op by name and the immediately enclosing graph.\n\n Args:\n graph_id: Debugger-generated ID of the immediately-enclosing graph.\n op_name: Name of the op.\n\n Returns:\n Op type as a str.\n \"\"\"\n return self._graph_by_id[graph_id].get_op_creation_digest(op_name).op_type\n\n def _load_execution(self):\n \"\"\"Incrementally read the .execution file.\"\"\"\n execution_iter = self._reader.execution_iterator()\n for debug_event, offset in execution_iter:\n self._execution_digests.append(\n _execution_digest_from_debug_event_proto(debug_event, offset))\n if self._monitors:\n execution = _execution_from_debug_event_proto(debug_event, offset)\n for monitor in self._monitors:\n monitor.on_execution(len(self._execution_digests) - 1, execution)\n\n def update(self):\n \"\"\"Perform incremental read of the file set.\"\"\"\n self._load_source_files()\n self._load_stack_frames()\n self._load_graphs()\n self._load_graph_execution_traces()\n self._load_execution()\n\n def source_file_list(self):\n \"\"\"Get a list of source files known to the debugger data reader.\n\n Returns:\n A tuple of `(host_name, file_path)` tuples.\n \"\"\"\n return tuple(self._host_name_file_path_to_offset.keys())\n\n def source_lines(self, host_name, file_path):\n \"\"\"Read the line-by-line content of a source file.\n\n Args:\n host_name: Host name on which the source file is located.\n file_path: File path at which the source file is located.\n\n Returns:\n Lines of the source file as a `list` of `str`s.\n \"\"\"\n offset = self._host_name_file_path_to_offset[(host_name, file_path)]\n return list(self._reader.read_source_files_event(offset).source_file.lines)\n\n def starting_wall_time(self):\n \"\"\"Wall timestamp for when the debugged TensorFlow program started.\n\n Returns:\n Stating wall time as seconds since the epoch, as a `float`.\n \"\"\"\n return self._reader.starting_wall_time()\n\n def tensorflow_version(self):\n \"\"\"TensorFlow version used in the debugged TensorFlow program.\n\n Note: this is not necessarily the same as the version of TensorFlow used to\n load the DebugEvent file set.\n\n Returns:\n TensorFlow version used by the debugged program, as a `str`.\n \"\"\"\n return self._reader.tensorflow_version()\n\n def tfdbg_run_id(self):\n \"\"\"Get the debugger run ID of the debugged TensorFlow program.\"\"\"\n return self._reader.tfdbg_run_id()\n\n def outermost_graphs(self):\n \"\"\"Get the number of outer most graphs read so far.\"\"\"\n return [graph for graph in self._graph_by_id.values()\n if not graph.outer_graph_id]\n\n def graph_by_id(self, graph_id):\n \"\"\"Get a DebuggedGraph object by its ID.\"\"\"\n return self._graph_by_id[graph_id]\n\n def device_name_by_id(self, device_id):\n \"\"\"Get the name of a device by the debugger-generated ID of the device.\"\"\"\n return self._device_by_id[device_id].device_name\n\n def device_name_map(self):\n \"\"\"Get a map mapping device IDs to device names.\"\"\"\n return {device_id: self._device_by_id[device_id].device_name\n for device_id in self._device_by_id}\n\n def graph_op_digests(self, op_type=None):\n \"\"\"Get the list of the digests for graph-op creation so far.\n\n Args:\n op_type: Optional op type to filter the creation events with.\n\n Returns:\n A list of `GraphOpCreationDigest` objects.\n \"\"\"\n if op_type is not None:\n return [digest for digest in self._graph_op_digests\n if digest.op_type == op_type]\n else:\n return self._graph_op_digests\n\n def graph_execution_traces(self, digest=False, begin=None, end=None):\n \"\"\"Get all the intra-graph execution tensor traces read so far.\n\n Args:\n digest: Whether the results will be returned in the more light-weight\n digest form.\n begin: Optional beginning index for the requested traces or their digests.\n Python-style negative indices are supported.\n end: Optional ending index for the requested traces or their digests.\n Python-style negative indices are supported.\n\n Returns:\n If `digest`: a `list` of `GraphExecutionTraceDigest` objects.\n Else: a `list` of `GraphExecutionTrace` objects.\n \"\"\"\n digests = self._graph_execution_trace_digests\n if begin is not None or end is not None:\n begin = begin or 0\n end = end or len(digests)\n digests = digests[begin:end]\n if digest:\n return digests\n else:\n return [self.read_graph_execution_trace(digest) for digest in digests]\n\n def num_graph_execution_traces(self):\n \"\"\"Get the number of graph execution traces read so far.\"\"\"\n return len(self._graph_execution_trace_digests)\n\n def executions(self, digest=False, begin=None, end=None):\n \"\"\"Get `Execution`s or `ExecutionDigest`s this reader has read so far.\n\n Args:\n digest: Whether the results are returned in a digest form, i.e.,\n `ExecutionDigest` format, instead of the more detailed `Execution`\n format.\n begin: Optional beginning index for the requested execution data objects\n or their digests. Python-style negative indices are supported.\n end: Optional ending index for the requested execution data objects or\n their digests. Python-style negative indices are supported.\n\n Returns:\n If `digest`: a `list` of `ExecutionDigest` objects.\n Else: a `list` of `Execution` objects.\n \"\"\"\n digests = self._execution_digests\n if begin is not None or end is not None:\n begin = begin or 0\n end = end or len(digests)\n digests = digests[begin:end]\n if digest:\n return digests\n else:\n # TODO(cais): Optimizer performance removing repeated file open/close.\n return [self.read_execution(digest) for digest in digests]\n\n def num_executions(self):\n \"\"\"Get the number of execution events read so far.\"\"\"\n return len(self._execution_digests)\n\n def read_execution(self, execution_digest):\n \"\"\"Read a detailed Execution object.\"\"\"\n debug_event = self._reader.read_execution_event(execution_digest.locator)\n return _execution_from_debug_event_proto(debug_event,\n execution_digest.locator)\n\n def read_graph_execution_trace(self, graph_execution_trace_digest):\n \"\"\"Read the detailed graph execution trace.\n\n Args:\n graph_execution_trace_digest: A `GraphExecutionTraceDigest` object.\n\n Returns:\n The corresponding `GraphExecutionTrace` object.\n \"\"\"\n debug_event = self._reader.read_graph_execution_traces_event(\n graph_execution_trace_digest.locator)\n return self._graph_execution_trace_from_debug_event_proto(\n debug_event, graph_execution_trace_digest.locator)\n\n def read_execution_stack_trace(self, execution):\n \"\"\"Read the stack trace of a given Execution object.\n\n Args:\n execution: The Execution object of interest.\n\n Returns:\n 1. The host name.\n 2. The stack trace, as a list of (file_path, lineno, func) tuples.\n \"\"\"\n host_name = self._stack_frame_by_id[execution.stack_frame_ids[0]][0]\n return (host_name, [\n self._stack_frame_by_id[frame_id][1:]\n for frame_id in execution.stack_frame_ids])\n\n def read_graph_op_creation_stack_trace(self, graph_op_creation_digest):\n \"\"\"Read the stack trace of a given graph op creation object.\n\n Args:\n graph_op_creation_digest: The GraphOpCreationDigest object of interest.\n\n Returns:\n A tuple consisting of:\n 1. The host name.\n 2. The stack trace, as a list of (file_path, lineno, func) tuples.\n \"\"\"\n return graph_op_creation_digest.host_name, [\n self._stack_frame_by_id[frame_id][1:]\n for frame_id in graph_op_creation_digest.stack_frame_ids\n ]\n\n # TODO(cais): Add graph_execution_digests() with an ExecutionDigest\n # as a kwarg, to establish the association between top-level and intra-graph\n # execution events.\n\n def execution_to_tensor_values(self, execution):\n \"\"\"Read the full tensor values from an Execution or ExecutionDigest.\n\n Args:\n execution: An `ExecutionDigest` or `ExecutionDigest` object.\n\n Returns:\n A list of numpy arrays representing the output tensor values of the\n execution event.\n \"\"\"\n debug_event = self._reader.read_execution_event(execution.locator)\n return [_parse_tensor_value(tensor_proto)\n for tensor_proto in debug_event.execution.tensor_protos]\n\n def graph_execution_trace_to_tensor_value(self, trace):\n \"\"\"Read full tensor values from an Execution or ExecutionDigest.\n\n Args:\n trace: An `GraphExecutionTraceDigest` or `GraphExecutionTrace` object.\n\n Returns:\n A numpy array representing the output tensor value of the intra-graph\n tensor execution event.\n \"\"\"\n debug_event = self._reader.read_graph_execution_traces_event(trace.locator)\n return _parse_tensor_value(debug_event.graph_execution_trace.tensor_proto)\n\n def symbolic_tensor_id(self, graph_id, op_name, output_slot):\n \"\"\"Get the ID of a symbolic tensor.\n\n Args:\n graph_id: The ID of the immediately-enclosing graph.\n op_name: Name of the op.\n output_slot: Output slot as an int.\n\n Returns:\n The ID of the symbolic tensor as an int.\n \"\"\"\n return self._graph_by_id[graph_id].get_tensor_id(op_name, output_slot)\n\n def graph_execution_trace_to_tensor_id(self, trace):\n \"\"\"Get symbolic tensor ID from a GraphExecutoinTraceDigest object.\"\"\"\n return self.symbolic_tensor_id(\n trace.graph_id, trace.op_name, trace.output_slot)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n del exception_type, exception_value, traceback # Unused\n self._reader.close()\n", "output": ["_execution_digest_from_debug_event_proto", "_parse_tensor_value", "_execution_from_debug_event_proto", "_tuple_or_none", "ExecutionDigest", "GraphExecutionTraceDigest", "GraphOpCreationDigest", "DebuggedGraph", "BaseDigest", "GraphExecutionTrace", "DebugDataReader", "Execution", "DebugEventsReader", "DebuggedDevice"], "metadata": {"file_path": "tensorflow-master/tensorflow/python/debug/lib/debug_events_reader.py", "file_length": 15066, "symbol_dict": [{"symbol": "_execution_from_debug_event_proto", "type": "mannual_defined_function", "byte_location": 30532, "location": 9367}, {"symbol": "_execution_digest_from_debug_event_proto", "type": "mannual_defined_function", "byte_location": 30162, "location": 9260}, {"symbol": "_parse_tensor_value", "type": "mannual_defined_function", "byte_location": 28636, "location": 8821}, {"symbol": "_tuple_or_none", "type": "mannual_defined_function", "byte_location": 14474, "location": 4350}, {"symbol": "DebuggedDevice", "type": "mannual_defined_class", "byte_location": 21814, "location": 6677}, {"symbol": "DebuggedGraph", "type": "mannual_defined_class", "byte_location": 18047, "location": 5427}, {"symbol": "DebugEventsReader", "type": "mannual_defined_class", "byte_location": 1170, "location": 312}, {"symbol": "GraphExecutionTraceDigest", "type": "mannual_defined_class", "byte_location": 25206, "location": 7749}, {"symbol": "BaseDigest", "type": "mannual_defined_class", "byte_location": 12209, "location": 3675}, {"symbol": "ExecutionDigest", "type": "mannual_defined_class", "byte_location": 13105, "location": 3938}, {"symbol": "GraphOpCreationDigest", "type": "mannual_defined_class", "byte_location": 22501, "location": 6885}, {"symbol": "GraphExecutionTrace", "type": "mannual_defined_class", "byte_location": 26496, "location": 8192}, {"symbol": "DebugDataReader", "type": "mannual_defined_class", "byte_location": 31673, "location": 9729}, {"symbol": "Execution", "type": "mannual_defined_class", "byte_location": 14541, "location": 4374}]}} {"input": "# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Converts a frozen graph into a TFLite FlatBuffer.\"\"\"\n\nimport distutils.spawn\nimport enum\nimport hashlib\nimport os as _os\nimport platform as _platform\nimport subprocess as _subprocess\nimport tempfile as _tempfile\nfrom typing import Optional\nimport warnings\n\nfrom tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2\nfrom tensorflow.compiler.mlir.quantization.stablehlo import quantization_options_pb2 as quant_opts_pb2\nfrom tensorflow.lite.python import lite_constants\nfrom tensorflow.lite.python import util\nfrom tensorflow.lite.python import wrap_toco\nfrom tensorflow.lite.python.convert_phase import Component\nfrom tensorflow.lite.python.convert_phase import convert_phase\nfrom tensorflow.lite.python.convert_phase import ConverterError\nfrom tensorflow.lite.python.convert_phase import SubComponent\nfrom tensorflow.lite.python.metrics import converter_error_data_pb2\nfrom tensorflow.lite.python.metrics.wrapper import metrics_wrapper as _metrics_wrapper\nfrom tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2\nfrom tensorflow.lite.toco import toco_flags_pb2 as _conversion_flags_pb2\nfrom tensorflow.lite.toco import types_pb2 as _types_pb2\nfrom tensorflow.lite.tools import flatbuffer_utils\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import resource_loader as _resource_loader\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export as _tf_export\n\n\ndef _is_quantized_input_stats_required(\n conversion_flags: _conversion_flags_pb2.TocoFlags,\n) -> bool:\n \"\"\"Checks if the `quantized_input_stats` flag is required for conversion.\n\n Args:\n conversion_flags: A protocol buffer describing the conversion process.\n\n Returns:\n True, if the `inference_type` or the `inference_input_type` is a quantized\n type and it is not post training quantization, else False.\n \"\"\"\n quantized_inference_types = [\n _types_pb2.QUANTIZED_UINT8,\n _types_pb2.QUANTIZED_INT8,\n ]\n return (\n conversion_flags.inference_type in quantized_inference_types\n or conversion_flags.inference_input_type in quantized_inference_types\n ) and not conversion_flags.post_training_quantize\n\n\ndef convert_tensor_tf_type_to_tflite_type(\n tf_type: dtypes.DType, usage: str = \"\"\n) -> _types_pb2.IODataType:\n \"\"\"Convert tensor type from tf type to tflite type.\n\n Args:\n tf_type: TensorFlow type.\n usage: Text describing the reason for invoking this function.\n\n Raises:\n ValueError: If `tf_type` is unsupported.\n\n Returns:\n tflite_type: TFLite type. Refer to lite/toco/types.proto.\n \"\"\"\n mapping = {\n dtypes.float16: _types_pb2.FLOAT16,\n dtypes.float32: _types_pb2.FLOAT,\n dtypes.float64: _types_pb2.FLOAT64,\n dtypes.int8: _types_pb2.INT8,\n dtypes.int16: _types_pb2.INT16,\n dtypes.uint16: _types_pb2.UINT16,\n dtypes.int32: _types_pb2.INT32,\n dtypes.int64: _types_pb2.INT64,\n dtypes.uint8: _types_pb2.UINT8,\n dtypes.uint32: _types_pb2.UINT32,\n dtypes.uint64: _types_pb2.UINT64,\n dtypes.string: _types_pb2.STRING,\n dtypes.bool: _types_pb2.BOOL,\n dtypes.complex64: _types_pb2.COMPLEX64,\n dtypes.complex128: _types_pb2.COMPLEX128,\n }\n tflite_type = mapping.get(tf_type)\n if tflite_type is None:\n raise ValueError(\n \"Unsupported TensorFlow type `{0}` provided for the {1}\".format(\n tf_type, usage\n )\n )\n return tflite_type\n\n\n# Only a few restricted tensor types are allowed for explicitly setting\n# inference/input/output types.\ndef convert_inference_tf_type_to_tflite_type(\n tf_type: dtypes.DType, usage: str = \"\"\n) -> _types_pb2.IODataType:\n \"\"\"Convert inference type from tf type to tflite type.\n\n Args:\n tf_type: TensorFlow type.\n usage: Text describing the reason for invoking this function.\n\n Raises:\n ValueError: If `tf_type` is unsupported.\n\n Returns:\n tflite_type: TFLite type. Refer to lite/toco/types.proto.\n \"\"\"\n mapping = {\n dtypes.float32: _types_pb2.FLOAT,\n dtypes.uint8: _types_pb2.QUANTIZED_UINT8,\n dtypes.int8: _types_pb2.QUANTIZED_INT8,\n dtypes.int16: _types_pb2.QUANTIZED_INT16,\n }\n tflite_type = mapping.get(tf_type)\n if tflite_type is None:\n raise ValueError(\n \"Unsupported TensorFlow type `{0}` provided for the {1}\".format(\n tf_type, usage\n )\n )\n return tflite_type\n\n\n# Find the deprecated conversion binary using the resource loader if using from\n# bazel, otherwise we are in a pip where console_scripts already has the tool.\nif lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:\n _deprecated_conversion_binary = \"\"\nelse:\n _deprecated_conversion_binary = _resource_loader.get_path_to_datafile(\n \"../toco/python/toco_from_protos\"\n )\n if not _os.path.exists(_deprecated_conversion_binary):\n _deprecated_conversion_binary = \"toco_from_protos\"\n\n\ndef _try_convert_to_unicode(output):\n if output is None:\n return \"\"\n\n if isinstance(output, bytes):\n try:\n return output.decode(\"utf-8\")\n except UnicodeDecodeError:\n pass\n return output\n\n\n@_tf_export(\"lite.OpsSet\")\nclass OpsSet(enum.Enum):\n \"\"\"Enum class defining the sets of ops available to generate TFLite models.\n\n WARNING: Experimental interface, subject to change.\n \"\"\"\n\n # Convert model using TensorFlow Lite builtin ops.\n TFLITE_BUILTINS = \"TFLITE_BUILTINS\"\n\n # Convert model using TensorFlow ops. Not all TensorFlow ops are available.\n # WARNING: Experimental interface, subject to change.\n SELECT_TF_OPS = \"SELECT_TF_OPS\"\n\n # Convert model using only TensorFlow Lite quantized int8 operations.\n # Specifying this will throw an error for operations that do not yet have\n # quantized implementations.\n TFLITE_BUILTINS_INT8 = \"TFLITE_BUILTINS_INT8\"\n\n # Convert model using only TensorFlow Lite operations with quantized int8\n # weights, int16 activations and int64 bias.\n # Specifying this will throw an error for operations that do not yet have\n # quantized implementations.\n # This quantization mode may be used in models for super-resolution,\n # audio signal processing or image de-noising. It improves accuracy\n # significantly, but only slightly increases the model size.\n # WARNING: These ops are currently experimental and have not yet been\n # finalized.\n # They are only compatible with CPU execution, and have not been optimized for\n # production.\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = (\n \"EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8\"\n )\n\n # Convert model using only stablehlo ops.\n # This option can not be combined with other OpsSets.\n # The feature is in early development.\n # The code to execute StableHLO ops in the runtime is to be implemented\n # and the serialization format is not stabilized yet.\n EXPERIMENTAL_STABLEHLO_OPS = \"EXPERIMENTAL_STABLEHLO_OPS\"\n\n def __str__(self):\n return str(self.value)\n\n @staticmethod\n def get_options():\n \"\"\"Returns a list of OpsSet options as a list of strings.\"\"\"\n return [str(option) for option in list(OpsSet)]\n\n\n@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.QUANTIZE)\ndef mlir_quantize(\n input_data_str,\n disable_per_channel=False,\n fully_quantize=False,\n inference_type=_types_pb2.QUANTIZED_INT8,\n input_data_type=dtypes.float32,\n output_data_type=dtypes.float32,\n enable_numeric_verify=False,\n enable_whole_model_verify=False,\n denylisted_ops=None,\n denylisted_nodes=None,\n enable_variable_quantization=False,\n):\n \"\"\"Quantize `input_data_str` with calibration results.\n\n Args:\n input_data_str: Input data in serialized form (e.g. a TFLITE model with\n calibration results).\n disable_per_channel: Bool indicating whether to do per-channel or per-tensor\n quantization\n fully_quantize: Bool indicating whether to fully quantize the model. Besides\n model body, the input/output will be quantized as well.\n inference_type: Data type for the activations. The default value is int8.\n input_data_type: Data type for the inputs. The default value is float32.\n output_data_type: Data type for the outputs. The default value is float32.\n enable_numeric_verify: Experimental. Subject to change. Bool indicating\n whether to add NumericVerify ops into the debug mode quantized model.\n enable_whole_model_verify: Experimental. Subject to change. Bool indicating\n whether to add verification for layer by layer, or on whole model. When\n disabled (per-layer) float and quantized ops will be run from same input\n (output of previous quantized layer). When enabled, float and quantized\n ops will run with respective float and quantized output of previous ops.\n denylisted_ops: Experimental. Subject to change. Set of ops to denylist.\n denylisted_nodes: Experimental. Subject to change. Set of notes to denylist.\n enable_variable_quantization: Experimental. Subject to change. Bool\n indicating whether to enable quantization of the residual variables\n remaining after the variable freezing pass.\n\n Returns:\n Quantized model in serialized form (e.g. a TFLITE model) with floating-point\n inputs and outputs.\n \"\"\"\n return wrap_toco.wrapped_experimental_mlir_quantize(\n input_data_str,\n disable_per_channel,\n fully_quantize,\n inference_type,\n convert_tensor_tf_type_to_tflite_type(input_data_type),\n convert_tensor_tf_type_to_tflite_type(output_data_type),\n enable_numeric_verify,\n enable_whole_model_verify,\n denylisted_ops,\n denylisted_nodes,\n enable_variable_quantization,\n )\n\n\n@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.SPARSIFY)\ndef mlir_sparsify(input_data_str):\n \"\"\"Sparsify `input_data_str` to encode sparse tensor with proper format.\n\n Args:\n input_data_str: Input data in serialized form (e.g. a TFLITE model).\n\n Returns:\n Sparsified model in serialized form (e.g. a TFLITE model).\n \"\"\"\n return wrap_toco.wrapped_experimental_mlir_sparsify(input_data_str)\n\n\ndef register_custom_opdefs(custom_opdefs_list):\n \"\"\"Register the given custom opdefs to the TensorFlow global op registry.\n\n Args:\n custom_opdefs_list: String representing the custom ops OpDefs that are\n included in the GraphDef.\n\n Returns:\n True if the registration is successfully completed.\n \"\"\"\n return wrap_toco.wrapped_register_custom_opdefs(custom_opdefs_list)\n\n\ndef convert(\n model_flags: _model_flags_pb2.ModelFlags,\n conversion_flags: _conversion_flags_pb2.TocoFlags,\n input_data_str: Optional[str] = None,\n debug_info_str: Optional[str] = None,\n enable_mlir_converter: bool = True,\n):\n \"\"\"Converts `input_data_str` to a TFLite model.\n\n Args:\n model_flags: Proto describing model properties, see `model_flags.proto`.\n conversion_flags: Proto describing conversion properties, see\n `toco/toco_flags.proto`.\n input_data_str: Input data in serialized form (e.g. a graphdef is common, or\n it can be hlo text or proto)\n debug_info_str: Serialized `GraphDebugInfo` proto describing logging\n information.\n enable_mlir_converter: Enables MLIR-based conversion.\n\n Returns:\n Converted model in serialized form (e.g. a TFLITE model is common).\n Raises:\n ConverterError: When conversion fails in TFLiteConverter, usually due to\n ops not being supported.\n RuntimeError: When conversion fails, an exception is raised with the error\n message embedded.\n \"\"\"\n # Historically, deprecated conversion failures would trigger a crash, so we\n # attempt to run the converter out-of-process. The current MLIR conversion\n # pipeline surfaces errors instead, and can be safely run in-process.\n if enable_mlir_converter or not _deprecated_conversion_binary:\n try:\n return wrap_toco.wrapped_toco_convert(\n model_flags.SerializeToString(),\n conversion_flags.SerializeToString(),\n input_data_str,\n debug_info_str,\n enable_mlir_converter,\n )\n except Exception as e:\n converter_error = ConverterError(str(e))\n\n for error_data in _metrics_wrapper.retrieve_collected_errors():\n converter_error.append_error(error_data)\n # Seldom we encounter the case where an unsupported\n # `StatefulPartitionedCallOp` is not inlined and remains in the final\n # IR. If this occurs we can set `guarantee_all_funcs_one_use` and retry.\n # This makes the converter copy functions definitions called by\n # multiple StatefulPartitionedCall, thus allowing them to be properly\n # inlined.\n if (\n error_data.error_code\n == converter_error_data_pb2.ConverterErrorData.ERROR_STATEFUL_PARTITIONED_CALL_IN_FINAL_IR\n and not conversion_flags.guarantee_all_funcs_one_use\n ):\n conversion_flags.guarantee_all_funcs_one_use = True\n return convert(\n model_flags,\n conversion_flags,\n input_data_str,\n debug_info_str,\n enable_mlir_converter,\n )\n raise converter_error\n\n return _run_deprecated_conversion_binary(\n model_flags.SerializeToString(),\n conversion_flags.SerializeToString(),\n input_data_str,\n debug_info_str,\n )\n\n\n@convert_phase(\n Component.CONVERT_TF_TO_TFLITE_MODEL,\n SubComponent.CONVERT_GRAPHDEF_USING_DEPRECATED_CONVERTER,\n)\ndef _run_deprecated_conversion_binary(\n model_flags_str, conversion_flags_str, input_data_str, debug_info_str=None\n):\n \"\"\"Convert `input_data_str` using deprecated conversion binary.\n\n Args:\n model_flags_str: Serialized proto describing model properties, see\n `model_flags.proto`.\n conversion_flags_str: Serialized proto describing TFLite converter\n properties, see `toco/toco_flags.proto`.\n input_data_str: Input data in serialized form (e.g. a graphdef is common)\n debug_info_str: Serialized `GraphDebugInfo` proto describing logging\n information. (default None)\n\n Returns:\n Converted model in serialized form (e.g. a TFLITE model is common).\n Raises:\n ConverterError: When cannot find the deprecated conversion binary.\n RuntimeError: When conversion fails, an exception is raised with the error\n message embedded.\n \"\"\"\n if distutils.spawn.find_executable(_deprecated_conversion_binary) is None:\n raise ConverterError(\"\"\"Could not find `toco_from_protos` binary, make sure\nyour virtualenv bin directory or pip local bin directory is in your path.\nIn particular, if you have installed TensorFlow with --user, make sure you\nadd the install directory to your path.\n\nFor example:\nLinux: export PATH=$PATH:~/.local/bin/\nMac: export PATH=$PATH:~/Library/Python//bin\n\nAlternative, use virtualenv.\"\"\")\n # Windows and TemporaryFile are not that useful together,\n # since you cannot have two readers/writers. So we have to\n # make the temporaries and close and delete them explicitly.\n conversion_filename: str = None\n model_filename: str = None\n input_filename: str = None\n output_filename: str = None\n try:\n # Build all input files\n with _tempfile.NamedTemporaryFile(\n delete=False\n ) as fp_conversion, _tempfile.NamedTemporaryFile(\n delete=False\n ) as fp_model, _tempfile.NamedTemporaryFile(\n delete=False\n ) as fp_input, _tempfile.NamedTemporaryFile(\n delete=False\n ) as fp_debug:\n conversion_filename = fp_conversion.name\n input_filename = fp_input.name\n model_filename = fp_model.name\n debug_filename = fp_debug.name\n\n fp_model.write(model_flags_str)\n fp_conversion.write(conversion_flags_str)\n fp_input.write(input_data_str)\n debug_info_str = debug_info_str if debug_info_str else \"\"\n # if debug_info_str contains a \"string value\", then the call to\n # fp_debug.write(debug_info_str) will fail with the following error\n #\n # TypeError: a bytes-like object is required, not 'str'\n #\n # Some of the subtests within the \"convert_test\" unit-test fail\n # with the error shown above. So watch out for that scenario and\n # convert debug_info_str to bytes where needed\n if not isinstance(debug_info_str, bytes):\n fp_debug.write(debug_info_str.encode(\"utf-8\"))\n else:\n fp_debug.write(debug_info_str)\n\n # Reserve an output file\n with _tempfile.NamedTemporaryFile(delete=False) as fp:\n output_filename = fp.name\n\n # Run\n cmd = [\n _deprecated_conversion_binary,\n model_filename,\n conversion_filename,\n input_filename,\n output_filename,\n \"--debug_proto_file={}\".format(debug_filename),\n ]\n cmdline = \" \".join(cmd)\n is_windows = _platform.system() == \"Windows\"\n proc = _subprocess.Popen(\n cmdline,\n shell=True,\n stdout=_subprocess.PIPE,\n stderr=_subprocess.STDOUT,\n close_fds=not is_windows,\n )\n stdout, stderr = proc.communicate()\n exitcode = proc.returncode\n if exitcode == 0:\n with open(output_filename, \"rb\") as fp:\n return fp.read()\n else:\n stdout = _try_convert_to_unicode(stdout)\n stderr = _try_convert_to_unicode(stderr)\n raise ConverterError(\"See console for info.\\n%s\\n%s\\n\" % (stdout, stderr))\n finally:\n # Must manually cleanup files.\n for filename in [\n conversion_filename,\n input_filename,\n model_filename,\n output_filename,\n ]:\n try:\n _os.unlink(filename)\n except (OSError, TypeError):\n pass\n\n\ndef build_model_flags(\n change_concat_input_ranges=False,\n allow_nonexistent_arrays=False,\n saved_model_dir=None,\n saved_model_version=0,\n saved_model_tags=None,\n saved_model_exported_names=None,\n **_\n):\n \"\"\"Builds the model flags object from params.\n\n Args:\n change_concat_input_ranges: Boolean to change behavior of min/max ranges for\n inputs and outputs of the concat operator for quantized models. Changes\n the ranges of concat operator overlap when true. (default False)\n allow_nonexistent_arrays: Allow specifying array names that don't exist or\n are unused in the final graph. (default False)\n saved_model_dir: Filepath of the saved model to be converted. This value\n will be non-empty only when the saved model import path will be used.\n Otherwises, the graph def-based conversion will be processed.\n saved_model_version: SavedModel file format version of The saved model file\n to be converted. This value will be set only when the SavedModel import\n path will be used.\n saved_model_tags: Set of string saved model tags, formatted in the\n comma-separated value. This value will be set only when the SavedModel\n import path will be used.\n saved_model_exported_names: Names to be exported (default: export all) when\n the saved model import path is on. This value will be set only when the\n SavedModel import path will be used.\n\n Returns:\n model_flags: protocol buffer describing the model.\n \"\"\"\n model_flags = _model_flags_pb2.ModelFlags()\n model_flags.change_concat_input_ranges = change_concat_input_ranges\n model_flags.allow_nonexistent_arrays = allow_nonexistent_arrays\n if saved_model_dir:\n model_flags.saved_model_dir = saved_model_dir\n model_flags.saved_model_version = saved_model_version\n if saved_model_tags:\n model_flags.saved_model_tags.extend(saved_model_tags)\n if saved_model_exported_names:\n model_flags.saved_model_exported_names.extend(saved_model_exported_names)\n return model_flags\n\n\ndef build_conversion_flags(\n inference_type=dtypes.float32,\n inference_input_type=None,\n input_format=lite_constants.TENSORFLOW_GRAPHDEF,\n output_format=lite_constants.TFLITE,\n default_ranges_stats=None,\n drop_control_dependency=True,\n reorder_across_fake_quant=False,\n allow_custom_ops=False,\n post_training_quantize=False,\n quantize_to_float16=False,\n dump_graphviz_dir=None,\n dump_graphviz_video=False,\n target_ops=None,\n conversion_summary_dir=None,\n select_user_tf_ops=None,\n allow_all_select_tf_ops=False,\n enable_tflite_resource_variables=True,\n unfold_batchmatmul=False,\n legalize_custom_tensor_list_ops=False,\n lower_tensor_list_ops=True,\n default_to_single_batch_in_tensor_list_ops=False,\n accumulation_type=None,\n allow_bfloat16=False,\n unfold_large_splat_constant=False,\n supported_backends=None,\n disable_per_channel_quantization=False,\n enable_mlir_dynamic_range_quantizer=False,\n tf_quantization_mode=None,\n disable_infer_tensor_range=False,\n use_fake_quant_num_bits=False,\n enable_dynamic_update_slice=False,\n preserve_assert_op=False,\n guarantee_all_funcs_one_use=False,\n enable_mlir_variable_quantization=False,\n disable_fuse_mul_and_fc=False,\n quantization_options: Optional[quant_opts_pb2.QuantizationOptions] = None,\n ir_dump_dir=None,\n ir_dump_pass_regex=None,\n ir_dump_func_regex=None,\n enable_timing=None,\n print_ir_before=None,\n print_ir_after=None,\n print_ir_module_scope=None,\n elide_elementsattrs_if_larger=None,\n quantization_config: Optional[\n quantization_config_pb2.QuantizationConfig\n ] = None,\n use_buffer_offset=False,\n reduce_type_precision=False,\n qdq_conversion_mode=None,\n **_\n):\n \"\"\"Builds protocol buffer describing a conversion of a model.\n\n Typically this is to convert from TensorFlow GraphDef to TFLite, in which\n case the default `input_format` and `output_format` are sufficient.\n\n Args:\n inference_type: Data type of numeric arrays, excluding the input layer.\n (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8})\n inference_input_type: Data type of the numeric arrays in the input layer. If\n `inference_input_type` is in {tf.int8, tf.uint8}, then\n `quantized_input_stats` must be provided. (default is the value assigned\n to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8})\n input_format: Type of data to read. (default TENSORFLOW_GRAPHDEF, must be in\n {TENSORFLOW_GRAPHDEF})\n output_format: Output file format. (default TFLITE, must be in {TFLITE,\n GRAPHVIZ_DOT})\n default_ranges_stats: Tuple of integers representing (min, max) range values\n for all arrays without a specified range. Intended for experimenting with\n quantization via \"dummy quantization\". (default None)\n drop_control_dependency: Boolean indicating whether to drop control\n dependencies silently. This is due to TFLite not supporting control\n dependencies. (default True)\n reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant\n nodes in unexpected locations. Used when the location of the FakeQuant\n nodes is preventing graph transformations necessary to convert the graph.\n Results in a graph that differs from the quantized training graph,\n potentially causing differing arithmetic behavior. (default False)\n allow_custom_ops: Boolean indicating whether to allow custom operations.\n When false any unknown operation is an error. When true, custom ops are\n created for any op that is unknown. The developer will need to provide\n these to the TensorFlow Lite runtime with a custom resolver. (default\n False)\n post_training_quantize: Boolean indicating whether to quantize the weights\n of the converted float model. Model size will be reduced and there will be\n latency improvements (at the cost of accuracy). (default False) If\n quantization_options is set, all quantization arg will be ignored.\n quantize_to_float16: Boolean indicating whether to convert float buffers to\n float16. (default False)\n dump_graphviz_dir: Full filepath of folder to dump the graphs at various\n stages of processing GraphViz .dot files. Preferred over\n --output_format=GRAPHVIZ_DOT in order to keep the requirements of the\n output file. (default None)\n dump_graphviz_video: Boolean indicating whether to dump the graph after\n every graph transformation. (default False)\n target_ops: Experimental flag, subject to change. Set of OpsSet options\n indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS]))\n conversion_summary_dir: A string, the path to the generated conversion logs.\n select_user_tf_ops: List of user's defined TensorFlow ops need to be\n supported in the TensorFlow Lite runtime. These ops will be supported as\n select TensorFlow ops.\n allow_all_select_tf_ops: If True, automatically add all TF ops (including\n custom TF ops) to the converted model as flex ops.\n enable_tflite_resource_variables: Experimental flag, subject to change.\n Enables conversion of resource variables. (default False)\n unfold_batchmatmul: Whether to unfold tf.BatchMatMul to a set of\n tfl.fully_connected ops. If not, translate to tfl.batch_matmul.\n legalize_custom_tensor_list_ops: Whether to legalize `tf.TensorList*` ops to\n tfl custom if they can all be supported.\n lower_tensor_list_ops: Whether to lower tensor list ops to builtin ops. If\n not, use Flex tensor list ops.\n default_to_single_batch_in_tensor_list_ops: Whether to force to use batch\n size one when the tensor list ops has the unspecified batch size.\n accumulation_type: Data type of the accumulators in quantized inference.\n Typically used for float16 quantization and is either fp16 or fp32.\n allow_bfloat16: Whether the converted model supports reduced precision\n inference with the bfloat16 type.\n unfold_large_splat_constant: Whether to unfold large splat constant tensors\n in the flatbuffer model to reduce size.\n supported_backends: List of TFLite backends which needs to check\n compatibility.\n disable_per_channel_quantization: Disable per-channel quantized weights for\n dynamic range quantization. Only per-tensor quantization will be used.\n enable_mlir_dynamic_range_quantizer: Enable MLIR dynamic range quantization.\n If False, the old converter dynamic range quantizer is used.\n tf_quantization_mode: Indicates the mode of TF Quantization when the output\n model is used for TF Quantization.\n disable_infer_tensor_range: Disable infering tensor ranges.\n use_fake_quant_num_bits: Allow quantization parameters to be calculated from\n num_bits attribute.\n enable_dynamic_update_slice: Enable to convert to DynamicUpdateSlice op.\n (default: False).\n preserve_assert_op: Whether to preserve `TF::AssertOp` (default: False).\n guarantee_all_funcs_one_use: Whether to clone functions so that each\n function only has a single use. This option will be helpful if the\n conversion fails when the `PartitionedCall` or `StatefulPartitionedCall`\n can't be properly inlined (default: False).\n enable_mlir_variable_quantization: Enable MLIR variable quantization. There\n is a variable freezing pass, but some variables may not be fully frozen by\n it. This flag enables quantization of those residual variables in the MLIR\n graph.\n disable_fuse_mul_and_fc: Disable fusing input multiplication with\n fullyconnected operations. Useful when quantizing weights.\n quantization_options: [Deprecated] Config to indicate quantization options\n of each components (ex: weight, bias, activation). This can be a preset\n method or a custom method, and allows finer, modular control. This option\n will override any other existing quantization flags. We plan on gradually\n migrating all quantization-related specs into this option.\n ir_dump_dir: A string specifying the target directory to output MLIR dumps\n produced during conversion. If populated, enables MLIR dumps.\n ir_dump_pass_regex: A string containing a regular expression for filtering\n the pass names to be dumped. Effective only if `ir_dump_dir` is populated.\n ir_dump_func_regex: A string containing a regular expression for filtering\n the function names to be dumped. Effective only if `ir_dump_dir` is\n populated.\n enable_timing: A boolean, if set to true reports the execution time of each\n MLIR pass.\n print_ir_before: A string containing a regular expression. If specified,\n prints MLIR before passes which match.\n print_ir_after: A string containing a regular expression. If specified,\n prints MLIR after passes which match.\n print_ir_module_scope: A boolean, if set to true always print the top-level\n operation when printing IR for print_ir_[before|after].\n elide_elementsattrs_if_larger: An int, if specified elides ElementsAttrs\n with '...' that have more elements than the given upper limit.\n quantization_config: Configures the StableHLO Quantizer. See the comments in\n `QuantizationConfig` protobuf definition for details.\n use_buffer_offset: Force the model use buffer_offset & buffer_size fields\n instead of data. i.e. store the constant tensor and custom op binaries\n outside of Flatbuffers\n reduce_type_precision: Convert some tensor types to a lower precision if all\n values within that tensor are within the range of the lower precision.\n This could have side effects e.g. reduced flatbuffer size.\n qdq_conversion_mode: If set, assume input model is a quantized model\n represented with QDQ ops and convert to quantized kernels.\n\n Returns:\n conversion_flags: protocol buffer describing the conversion process.\n Raises:\n ValueError, if the input tensor type is unknown.\n \"\"\"\n conversion_flags = _conversion_flags_pb2.TocoFlags()\n conversion_flags.inference_type = convert_inference_tf_type_to_tflite_type(\n inference_type, usage=\"inference_type flag\"\n )\n if inference_input_type:\n conversion_flags.inference_input_type = (\n convert_inference_tf_type_to_tflite_type(\n inference_input_type, usage=\"inference_input_type flag\"\n )\n )\n else:\n conversion_flags.inference_input_type = conversion_flags.inference_type\n conversion_flags.input_format = input_format\n conversion_flags.output_format = output_format\n if default_ranges_stats:\n conversion_flags.default_ranges_min = default_ranges_stats[0]\n conversion_flags.default_ranges_max = default_ranges_stats[1]\n conversion_flags.drop_control_dependency = drop_control_dependency\n conversion_flags.reorder_across_fake_quant = reorder_across_fake_quant\n conversion_flags.allow_custom_ops = allow_custom_ops\n conversion_flags.post_training_quantize = post_training_quantize\n conversion_flags.quantize_to_float16 = quantize_to_float16\n if dump_graphviz_dir:\n conversion_flags.dump_graphviz_dir = dump_graphviz_dir\n conversion_flags.dump_graphviz_include_video = dump_graphviz_video\n if target_ops:\n if OpsSet.SELECT_TF_OPS in target_ops:\n conversion_flags.enable_select_tf_ops = True\n if set(target_ops) == {OpsSet.SELECT_TF_OPS}:\n conversion_flags.force_select_tf_ops = True\n if OpsSet.EXPERIMENTAL_STABLEHLO_OPS in target_ops:\n conversion_flags.convert_to_stablehlo = True\n if OpsSet.EXPERIMENTAL_STABLEHLO_OPS in target_ops and len(target_ops) > 1:\n raise ValueError(\n \"StableHLO Ops set can not be specified with other Ops set together\"\n )\n if conversion_summary_dir:\n conversion_flags.conversion_summary_dir = conversion_summary_dir\n if select_user_tf_ops:\n conversion_flags.select_user_tf_ops.extend(select_user_tf_ops)\n conversion_flags.allow_all_select_tf_ops = allow_all_select_tf_ops\n conversion_flags.enable_tflite_resource_variables = (\n enable_tflite_resource_variables\n )\n conversion_flags.unfold_batchmatmul = unfold_batchmatmul\n conversion_flags.legalize_custom_tensor_list_ops = (\n legalize_custom_tensor_list_ops\n )\n conversion_flags.lower_tensor_list_ops = lower_tensor_list_ops\n conversion_flags.default_to_single_batch_in_tensor_list_ops = (\n default_to_single_batch_in_tensor_list_ops\n )\n if accumulation_type:\n conversion_flags.accumulation_type = convert_tensor_tf_type_to_tflite_type(\n accumulation_type, usage=\"accumulation_type flag\"\n )\n conversion_flags.allow_bfloat16 = allow_bfloat16\n conversion_flags.unfold_large_splat_constant = unfold_large_splat_constant\n if supported_backends:\n conversion_flags.supported_backends.extend(supported_backends)\n conversion_flags.disable_per_channel_quantization = (\n disable_per_channel_quantization\n )\n conversion_flags.enable_mlir_dynamic_range_quantizer = (\n enable_mlir_dynamic_range_quantizer\n )\n conversion_flags.enable_dynamic_update_slice = enable_dynamic_update_slice\n conversion_flags.preserve_assert_op = preserve_assert_op\n conversion_flags.guarantee_all_funcs_one_use = guarantee_all_funcs_one_use\n if tf_quantization_mode:\n conversion_flags.tf_quantization_mode = tf_quantization_mode\n conversion_flags.disable_infer_tensor_range = disable_infer_tensor_range\n conversion_flags.use_fake_quant_num_bits = use_fake_quant_num_bits\n conversion_flags.enable_mlir_variable_quantization = (\n enable_mlir_variable_quantization\n )\n conversion_flags.disable_fuse_mul_and_fc = disable_fuse_mul_and_fc\n if quantization_options: # Deprecated\n conversion_flags.quantization_options.CopyFrom(quantization_options)\n if quantization_config:\n conversion_flags.quantization_config.CopyFrom(quantization_config)\n\n # Transfer debug options. Check for existence before populating in order to\n # leverage defaults specified in proto definition.\n # TODO: b/319329480 - Match the debug_options fields with the user-facing\n # flags.\n if ir_dump_dir is not None:\n conversion_flags.debug_options.ir_dump_dir = ir_dump_dir\n if ir_dump_pass_regex is not None:\n conversion_flags.debug_options.ir_dump_pass_regex = ir_dump_pass_regex\n if ir_dump_func_regex is not None:\n conversion_flags.debug_options.ir_dump_func_regex = ir_dump_func_regex\n if enable_timing is not None:\n conversion_flags.debug_options.enable_timing = enable_timing\n if print_ir_before is not None:\n conversion_flags.debug_options.print_ir_before = print_ir_before\n if print_ir_after is not None:\n conversion_flags.debug_options.print_ir_after = print_ir_after\n if print_ir_module_scope is not None:\n conversion_flags.debug_options.print_ir_module_scope = print_ir_module_scope\n if elide_elementsattrs_if_larger is not None:\n conversion_flags.debug_options.elide_elementsattrs_if_larger = (\n elide_elementsattrs_if_larger\n )\n\n if use_buffer_offset is not None:\n conversion_flags.use_buffer_offset = use_buffer_offset\n if reduce_type_precision is not None:\n conversion_flags.reduce_type_precision = reduce_type_precision\n if qdq_conversion_mode is not None:\n conversion_flags.qdq_conversion_mode = qdq_conversion_mode\n return conversion_flags\n\n\n@convert_phase(\n Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_GRAPHDEF\n)\ndef convert_graphdef_with_arrays(\n input_data,\n input_arrays_with_shape,\n output_arrays,\n control_output_arrays,\n **kwargs\n):\n \"\"\"Convert a frozen GraphDef that can't be loaded in TF.\n\n Conversion can be customized by providing arguments that are forwarded to\n `build_model_flags` and `build_conversion_flags` (see documentation).\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_arrays_with_shape: Tuple of strings representing input tensor names\n and list of integers representing input shapes (e.g., [(\"foo\" : [1, 16,\n 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when\n `input_tensors` is None.\n output_arrays: List of output tensors to freeze graph with. Use only when\n graph cannot be loaded into TensorFlow and when `output_tensors` is None.\n control_output_arrays: Control output node names. This is used when\n converting a Graph with no output tensors. For example, if the graph's\n last operation is a Print op, just specify that op's name in this field.\n This can be used together with the `output_arrays` parameter.\n **kwargs: See `build_model_flags` and `build_conversion_flags`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_conversion_flags`.\n \"\"\"\n model_flags = build_model_flags(**kwargs)\n conversion_flags = build_conversion_flags(**kwargs)\n enable_mlir_converter = kwargs.get(\"enable_mlir_converter\", True)\n quantized_input_stats = kwargs.get(\"quantized_input_stats\", None)\n\n for idx, (name, shape) in enumerate(input_arrays_with_shape):\n input_array = model_flags.input_arrays.add()\n if _is_quantized_input_stats_required(conversion_flags):\n if quantized_input_stats:\n input_array.mean_value, input_array.std_value = quantized_input_stats[\n idx\n ]\n else:\n raise ValueError(\n \"The `quantized_input_stats` flag must be defined when either \"\n \"`inference_type` flag or `inference_input_type` flag is set to \"\n \"tf.int8 or tf.uint8.\"\n )\n input_array.name = name\n input_array.shape.dims.extend(list(map(int, shape)))\n\n if output_arrays:\n for name in output_arrays:\n model_flags.output_arrays.append(name)\n if control_output_arrays:\n for name in control_output_arrays:\n model_flags.control_output_arrays.append(name)\n\n data = convert(\n model_flags,\n conversion_flags,\n input_data.SerializeToString(),\n debug_info_str=None,\n enable_mlir_converter=enable_mlir_converter,\n )\n return data\n\n\n@convert_phase(\n Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_GRAPHDEF\n)\ndef convert_graphdef(input_data, input_tensors, output_tensors, **kwargs):\n \"\"\"Convert a frozen GraphDef model using the TF Lite converter.\n\n Conversion can be customized by providing arguments that are forwarded to\n `build_model_flags` and `build_conversion_flags` (see documentation).\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n **kwargs: See `build_model_flags` and `build_conversion_flags`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_conversion_flags`.\n \"\"\"\n model_flags = build_model_flags(**kwargs)\n conversion_flags = build_conversion_flags(**kwargs)\n saved_model_dir = kwargs.get(\"saved_model_dir\", None)\n input_shapes = kwargs.get(\"input_shapes\", None)\n enable_mlir_converter = kwargs.get(\"enable_mlir_converter\", True)\n quantized_input_stats = kwargs.get(\"quantized_input_stats\", None)\n debug_info = kwargs.get(\"debug_info\", None)\n\n for idx, input_tensor in enumerate(input_tensors):\n input_array = model_flags.input_arrays.add()\n if saved_model_dir:\n input_array.name = input_tensor.name\n else:\n input_array.name = util.get_tensor_name(input_tensor)\n input_array.data_type = convert_tensor_tf_type_to_tflite_type(\n input_tensor.dtype, usage=\"input type of the TensorFlow model\"\n )\n\n if _is_quantized_input_stats_required(conversion_flags):\n if quantized_input_stats:\n input_array.mean_value, input_array.std_value = quantized_input_stats[\n idx\n ]\n else:\n # We should ideally raise an error here, but we don't as it would break\n # several models/projects that depend on this workflow.\n warnings.warn(\n \"Statistics for quantized inputs were expected, but not \"\n \"specified; continuing anyway.\"\n )\n\n if input_shapes is None:\n shape = input_tensor.shape\n else:\n shape = input_shapes[idx]\n\n if shape.rank is not None:\n # Create shapes with -1 for unknown dimensions.\n dims = []\n for dim in shape:\n if dim is None or (\n isinstance(dim, tensor_shape.Dimension) and dim.value is None\n ):\n dims.append(-1)\n else:\n dims.append(int(dim))\n input_array.shape.dims.extend(dims)\n input_array.shape.unknown_rank = False\n else:\n input_array.shape.unknown_rank = True\n\n for output_tensor in output_tensors:\n if saved_model_dir:\n model_flags.output_arrays.append(output_tensor.name)\n else:\n model_flags.output_arrays.append(util.get_tensor_name(output_tensor))\n\n data = convert(\n model_flags,\n conversion_flags,\n input_data.SerializeToString(),\n debug_info_str=debug_info.SerializeToString() if debug_info else None,\n enable_mlir_converter=enable_mlir_converter,\n )\n return data\n\n\n@convert_phase(\n Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_SAVED_MODEL\n)\ndef convert_saved_model(**kwargs):\n \"\"\"Converts a SavedModel using TF Lite converter.\"\"\"\n model_flags = build_model_flags(**kwargs)\n conversion_flags = build_conversion_flags(**kwargs)\n data = convert(\n model_flags,\n conversion_flags,\n input_data_str=None,\n debug_info_str=None,\n enable_mlir_converter=True,\n )\n return data\n\n\n@convert_phase(\n Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_JAX_HLO\n)\ndef convert_jax_hlo(input_content, input_names, is_proto_format, **kwargs):\n \"\"\"Converts a Jax hlo-based model using TFLite converter.\"\"\"\n model_flags = _model_flags_pb2.ModelFlags()\n model_flags.use_hlo_import = True\n if is_proto_format:\n model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_PROTO\n else:\n model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_TEXT\n\n # Build input names.\n for input_name in input_names:\n input_array = model_flags.input_arrays.add()\n input_array.name = input_name\n\n conversion_flags = build_conversion_flags(**kwargs)\n data = convert(\n model_flags,\n conversion_flags,\n input_data_str=input_content,\n debug_info_str=None,\n enable_mlir_converter=True,\n )\n return data\n\n\n@_tf_export(v1=[\"lite.toco_convert\"])\n@deprecation.deprecated(None, \"Use `lite.TFLiteConverter` instead.\")\ndef toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):\n \"\"\"Convert a TensorFlow GraphDef to TFLite.\n\n This function is deprecated. Please use `tf.lite.TFLiteConverter` API instead.\n Conversion can be customized by providing arguments that are forwarded to\n `build_model_flags` and `build_conversion_flags` (see documentation for\n details).\n Args:\n input_data: Input data (i.e. often `sess.graph_def`).\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n *args: See `build_model_flags` and `build_conversion_flags`.\n **kwargs: See `build_model_flags` and `build_conversion_flags`.\n\n Returns:\n The converted TensorFlow Lite model in a bytes array.\n\n Raises:\n Defined in `convert`.\n \"\"\"\n kwargs[\"enable_mlir_converter\"] = kwargs.get(\"enable_mlir_converter\", False)\n return convert_graphdef(\n input_data, input_tensors, output_tensors, *args, **kwargs\n )\n\n\ndef deduplicate_readonly_buffers(tflite_model):\n \"\"\"Generates a new model byte array after deduplicating readonly buffers.\n\n This function should be invoked after the model optimization toolkit. The\n model optimization toolkit assumes that each tensor object owns its each\n buffer separately.\n\n Args:\n tflite_model: TFLite flatbuffer in a byte array to be deduplicated.\n\n Returns:\n TFLite flatbuffer in a bytes array, processed with the deduplication method.\n \"\"\"\n # Load TFLite Flatbuffer byte array into an object.\n model = flatbuffer_utils.convert_bytearray_to_object(tflite_model)\n\n # Get all the read-only buffers, which can be modified without causing any\n # issue in the graph invocation stage.\n read_only_buffer_indices = set()\n for subgraph in model.subgraphs:\n # To get all the read-only buffers:\n # (1) Get all read-only input tensors.\n # (2) Discard intermediate or output tensors.\n # (3) Discard the subgraph's input/output tensors.\n # (4) Gather the buffers of the read-only input tensors.\n\n # (1) Get read-only input tensors.\n read_only_input_tensor_indices = set()\n for op in subgraph.operators:\n if op.inputs is None:\n continue\n for i, input_tensor_idx in enumerate(op.inputs):\n # Ignore mutable tensors.\n if op.mutatingVariableInputs is not None:\n # Ignore invalid tensors.\n if (\n i < len(op.mutatingVariableInputs)\n and op.mutatingVariableInputs[i]\n ):\n continue\n # Ignore variable tensors.\n if subgraph.tensors[input_tensor_idx].isVariable:\n continue\n read_only_input_tensor_indices.add(input_tensor_idx)\n\n # (2) Discard intermediate or output tensors.\n for op in subgraph.operators:\n if op.outputs is not None:\n for output_tensor_idx in op.outputs:\n read_only_input_tensor_indices.discard(output_tensor_idx)\n if op.intermediates is not None:\n for intermediate_tensor_idx in op.intermediates:\n read_only_input_tensor_indices.discard(intermediate_tensor_idx)\n\n # (3) Discard the subgraph's input and output tensors.\n if subgraph.inputs is not None:\n for input_tensor_idx in subgraph.inputs:\n read_only_input_tensor_indices.discard(input_tensor_idx)\n if subgraph.outputs is not None:\n for output_tensor_idx in subgraph.outputs:\n read_only_input_tensor_indices.discard(output_tensor_idx)\n\n # (4) Gather the buffers of the read-only input tensors.\n for tensor_idx in read_only_input_tensor_indices:\n read_only_buffer_indices.add(subgraph.tensors[tensor_idx].buffer)\n\n # Ignore invalid negative index or zero-sized buffers.\n for buffer_idx in read_only_buffer_indices.copy():\n if buffer_idx < 0 or (\n model.buffers[buffer_idx].data is None\n or isinstance(model.buffers[buffer_idx].data, list)\n or model.buffers[buffer_idx].data.size == 0\n ):\n read_only_buffer_indices.discard(buffer_idx)\n\n class BufferIndex:\n \"\"\"A class to store index, size, hash of the buffers in TFLite model.\"\"\"\n\n def __init__(self, idx, size, hash_value):\n self.idx = idx\n self.size = size\n self.hash_value = hash_value\n\n read_only_buffers = list(\n map(\n lambda index: BufferIndex( # pylint: disable=g-long-lambda\n index,\n model.buffers[index].data.size,\n hashlib.md5(model.buffers[index].data.data.tobytes()).hexdigest(),\n ),\n read_only_buffer_indices,\n )\n )\n\n # Sort read_only_buffers by buffer size & hash in descending order.\n read_only_buffers = sorted(\n read_only_buffers,\n key=lambda buffer: (buffer.size, buffer.hash_value),\n reverse=True,\n )\n\n # Create a map of duplicate buffers (same size and same type).\n # eg: In [1, 2, 3, 4, 5, 6] if (1, 4, 6) and (2, 5) are each, groups of buffer\n # indices of the same size and type, then the map would be {4:1, 6:1, 5:2}\n duplicate_buffer_map = {}\n for i, buffer_i in enumerate(read_only_buffers):\n # This buffer is a duplicate.\n if buffer_i.idx in duplicate_buffer_map:\n continue\n # This buffer is unique. Scan rest of the list to find duplicates\n # of this buffer and mark them accordingly.\n for buffer_j in read_only_buffers[i + 1 :]:\n if buffer_j.idx in duplicate_buffer_map:\n continue\n if buffer_i.size != buffer_j.size:\n break\n if buffer_i.hash_value != buffer_j.hash_value:\n continue\n # Found duplicate. Nullify j-th buffer and use i-th buffer instead.\n duplicate_buffer_map[buffer_j.idx] = buffer_i.idx\n\n # Make the duplicated tensors use the single shared buffer index.\n for subgraph in model.subgraphs:\n for op in subgraph.operators:\n if op.inputs is None:\n continue\n for input_tensor in op.inputs:\n buffer_idx = subgraph.tensors[input_tensor].buffer\n if buffer_idx in duplicate_buffer_map:\n subgraph.tensors[input_tensor].buffer = duplicate_buffer_map[\n buffer_idx\n ]\n\n # Nullify the unused buffers.\n for idx in duplicate_buffer_map:\n model.buffers[idx].data = None\n\n # Return a TFLite flatbuffer as a byte array.\n return flatbuffer_utils.convert_object_to_bytearray(model)\n", "output": ["convert_inference_tf_type_to_tflite_type", "deduplicate_readonly_buffers", "register_custom_opdefs", "build_conversion_flags", "build_model_flags", "convert", "_is_quantized_input_stats_required", "convert_tensor_tf_type_to_tflite_type", "_try_convert_to_unicode", "OpsSet", "BufferIndex"], "metadata": {"file_path": "tensorflow-master/tensorflow/lite/python/convert.py", "file_length": 15240, "symbol_dict": [{"symbol": "convert_inference_tf_type_to_tflite_type", "type": "mannual_defined_function", "byte_location": 4311, "location": 1384}, {"symbol": "_try_convert_to_unicode", "type": "mannual_defined_function", "byte_location": 5637, "location": 1875}, {"symbol": "build_model_flags", "type": "mannual_defined_function", "byte_location": 18266, "location": 5737}, {"symbol": "convert_tensor_tf_type_to_tflite_type", "type": "mannual_defined_function", "byte_location": 2954, "location": 839}, {"symbol": "register_custom_opdefs", "type": "mannual_defined_function", "byte_location": 10782, "location": 3486}, {"symbol": "build_conversion_flags", "type": "mannual_defined_function", "byte_location": 20293, "location": 6323}, {"symbol": "deduplicate_readonly_buffers", "type": "mannual_defined_function", "byte_location": 44003, "location": 13585}, {"symbol": "convert", "type": "mannual_defined_function", "byte_location": 11168, "location": 3606}, {"symbol": "_is_quantized_input_stats_required", "type": "mannual_defined_function", "byte_location": 2217, "location": 604}, {"symbol": "OpsSet", "type": "mannual_defined_class", "byte_location": 5874, "location": 1957}, {"symbol": "BufferIndex", "type": "mannual_defined_class", "byte_location": 47003, "location": 14511}]}} {"input": "\"\"\"Common preprocessing utilities for working with text data\"\"\"\nimport re\nimport heapq\nimport os.path as op\nfrom collections import Counter, OrderedDict, defaultdict\n\nimport numpy as np\n\n\n# This list of English stop words is taken from the \"Glasgow Information\n# Retrieval Group\". The original list can be found at\n# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words\n_STOP_WORDS = set(\n (\n \"a about above across after afterwards again against all almost alone \"\n \"along already also although always am among amongst amoungst amount an \"\n \"and another any anyhow anyone anything anyway anywhere are around as at \"\n \"back be became because become becomes becoming been before beforehand \"\n \"behind being below beside besides between beyond bill both bottom but by \"\n \"call can cannot cant co con could couldnt cry de describe detail do done \"\n \"down due during each eg eight either eleven else elsewhere empty enough \"\n \"etc even ever every everyone everything everywhere except few fifteen \"\n \"fifty fill find fire first five for former formerly forty found four from \"\n \"front full further get give go had has hasnt have he hence her here \"\n \"hereafter hereby herein hereupon hers herself him himself his how however \"\n \"hundred i ie if in inc indeed interest into is it its itself keep last \"\n \"latter latterly least less ltd made many may me meanwhile might mill mine \"\n \"more moreover most mostly move much must my myself name namely neither \"\n \"never nevertheless next nine no nobody none noone nor not nothing now \"\n \"nowhere of off often on once one only onto or other others otherwise our \"\n \"ours ourselves out over own part per perhaps please put rather re same see \"\n \"seem seemed seeming seems serious several she should show side since \"\n \"sincere six sixty so some somehow someone something sometime sometimes \"\n \"somewhere still such system take ten than that the their them themselves \"\n \"then thence there thereafter thereby therefore therein thereupon these \"\n \"they thick thin third this those though three through throughout thru thus \"\n \"to together too top toward towards twelve twenty two un under until up \"\n \"upon us very via was we well were what whatever when whence whenever where \"\n \"whereafter whereas whereby wherein whereupon wherever whether which while \"\n \"whither who whoever whole whom whose why will with within without would \"\n \"yet you your yours yourself yourselves\"\n ).split(\" \"),\n)\n\n_WORD_REGEX = re.compile(r\"(?u)\\b\\w\\w+\\b\") # sklearn default\n_WORD_REGEX_W_PUNC = re.compile(r\"(?u)\\w+|[^a-zA-Z0-9\\s]\")\n_WORD_REGEX_W_PUNC_AND_WHITESPACE = re.compile(r\"(?u)s?\\w+\\s?|\\s?[^a-zA-Z0-9\\s]\\s?\")\n\n_PUNC_BYTE_REGEX = re.compile(\n r\"(33|34|35|36|37|38|39|40|41|42|43|44|45|\"\n r\"46|47|58|59|60|61|62|63|64|91|92|93|94|\"\n r\"95|96|123|124|125|126)\",\n)\n_PUNCTUATION = \"!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~\"\n_PUNC_TABLE = str.maketrans(\"\", \"\", _PUNCTUATION)\n\n\ndef ngrams(sequence, N):\n \"\"\"Return all `N`-grams of the elements in `sequence`\"\"\"\n assert N >= 1\n return list(zip(*[sequence[i:] for i in range(N)]))\n\n\ndef tokenize_whitespace(\n line, lowercase=True, filter_stopwords=True, filter_punctuation=True, **kwargs,\n):\n \"\"\"\n Split a string at any whitespace characters, optionally removing\n punctuation and stop-words in the process.\n \"\"\"\n line = line.lower() if lowercase else line\n words = line.split()\n line = [strip_punctuation(w) for w in words] if filter_punctuation else line\n return remove_stop_words(words) if filter_stopwords else words\n\n\ndef tokenize_words(\n line, lowercase=True, filter_stopwords=True, filter_punctuation=True, **kwargs,\n):\n \"\"\"\n Split a string into individual words, optionally removing punctuation and\n stop-words in the process.\n \"\"\"\n REGEX = _WORD_REGEX if filter_punctuation else _WORD_REGEX_W_PUNC\n words = REGEX.findall(line.lower() if lowercase else line)\n return remove_stop_words(words) if filter_stopwords else words\n\n\ndef tokenize_words_bytes(\n line,\n lowercase=True,\n filter_stopwords=True,\n filter_punctuation=True,\n encoding=\"utf-8\",\n **kwargs,\n):\n \"\"\"\n Split a string into individual words, optionally removing punctuation and\n stop-words in the process. Translate each word into a list of bytes.\n \"\"\"\n words = tokenize_words(\n line,\n lowercase=lowercase,\n filter_stopwords=filter_stopwords,\n filter_punctuation=filter_punctuation,\n **kwargs,\n )\n words = [\" \".join([str(i) for i in w.encode(encoding)]) for w in words]\n return words\n\n\ndef tokenize_bytes_raw(line, encoding=\"utf-8\", splitter=None, **kwargs):\n \"\"\"\n Convert the characters in `line` to a collection of bytes. Each byte is\n represented in decimal as an integer between 0 and 255.\n\n Parameters\n ----------\n line : str\n The string to tokenize.\n encoding : str\n The encoding scheme for the characters in `line`. Default is `'utf-8'`.\n splitter : {'punctuation', None}\n If `'punctuation'`, split the string at any punctuation character\n before encoding into bytes. If None, do not split `line` at all.\n Default is None.\n\n Returns\n -------\n bytes : list\n A list of the byte-encoded characters in `line`. Each item in the list\n is a string of space-separated integers between 0 and 255 representing\n the bytes encoding the characters in `line`.\n \"\"\"\n byte_str = [\" \".join([str(i) for i in line.encode(encoding)])]\n if splitter == \"punctuation\":\n byte_str = _PUNC_BYTE_REGEX.sub(r\"-\\1-\", byte_str[0]).split(\"-\")\n return byte_str\n\n\ndef bytes_to_chars(byte_list, encoding=\"utf-8\"):\n \"\"\"\n Decode bytes (represented as an integer between 0 and 255) to characters in\n the specified encoding.\n \"\"\"\n hex_array = [hex(a).replace(\"0x\", \"\") for a in byte_list]\n hex_array = \" \".join([h if len(h) > 1 else f\"0{h}\" for h in hex_array])\n return bytearray.fromhex(hex_array).decode(encoding)\n\n\ndef tokenize_chars(line, lowercase=True, filter_punctuation=True, **kwargs):\n \"\"\"\n Split a string into individual characters, optionally removing punctuation\n and stop-words in the process.\n \"\"\"\n line = line.lower() if lowercase else line\n line = strip_punctuation(line) if filter_punctuation else line\n chars = list(re.sub(\" {2,}\", \" \", line).strip())\n return chars\n\n\ndef remove_stop_words(words):\n \"\"\"Remove stop words from a list of word strings\"\"\"\n return [w for w in words if w.lower() not in _STOP_WORDS]\n\n\ndef strip_punctuation(line):\n \"\"\"Remove punctuation from a string\"\"\"\n return line.translate(_PUNC_TABLE).strip()\n\n\n#######################################################################\n# Byte-Pair Encoder #\n#######################################################################\n\n\nclass BytePairEncoder(object):\n def __init__(self, max_merges=3000, encoding=\"utf-8\"):\n \"\"\"\n A byte-pair encoder for sub-word embeddings.\n\n Notes\n -----\n Byte-pair encoding [1][2] is a compression algorithm that iteratively\n replaces the most frequently ocurring byte pairs in a set of documents\n with a new, single token. It has gained popularity as a preprocessing\n step for many NLP tasks due to its simplicity and expressiveness: using\n a base coebook of just 256 unique tokens (bytes), any string can be\n encoded.\n\n References\n ----------\n .. [1] Gage, P. (1994). A new algorithm for data compression. *C\n Users Journal, 12(2)*, 23\u201338.\n .. [2] Sennrich, R., Haddow, B., & Birch, A. (2015). Neural machine\n translation of rare words with subword units, *Proceedings of the\n 54th Annual Meeting of the Association for Computational\n Linguistics,* 1715-1725.\n\n Parameters\n ----------\n max_merges : int\n The maximum number of byte pair merges to perform during the\n :meth:`fit` operation. Default is 3000.\n encoding : str\n The encoding scheme for the documents used to train the encoder.\n Default is `'utf-8'`.\n \"\"\"\n self.parameters = {\n \"max_merges\": max_merges,\n \"encoding\": encoding,\n }\n\n # initialize the byte <-> token and token <-> byte dictionaries. bytes\n # are represented in decimal as integers between 0 and 255. there is a\n # 1:1 correspondence between token and byte representations up to 255.\n self.byte2token = OrderedDict({i: i for i in range(256)})\n self.token2byte = OrderedDict({v: k for k, v in self.byte2token.items()})\n\n def fit(self, corpus_fps, encoding=\"utf-8\"):\n \"\"\"\n Train a byte pair codebook on a set of documents.\n\n Parameters\n ----------\n corpus_fps : str or list of strs\n The filepath / list of filepaths for the document(s) to be used to\n learn the byte pair codebook.\n encoding : str\n The text encoding for documents. Common entries are either 'utf-8'\n (no header byte), or 'utf-8-sig' (header byte). Default is\n 'utf-8'.\n \"\"\"\n vocab = (\n Vocabulary(\n lowercase=False,\n min_count=None,\n max_tokens=None,\n filter_stopwords=False,\n filter_punctuation=False,\n tokenizer=\"bytes\",\n )\n .fit(corpus_fps, encoding=encoding)\n .counts\n )\n\n # iteratively merge the most common byte bigram across the documents\n for _ in range(self.parameters[\"max_merges\"]):\n pair_counts = self._get_counts(vocab)\n most_common_bigram = max(pair_counts, key=pair_counts.get)\n vocab = self._merge(most_common_bigram, vocab)\n\n token_bytes = set()\n for k in vocab.keys():\n token_bytes = token_bytes.union([w for w in k.split(\" \") if \"-\" in w])\n\n for i, t in enumerate(token_bytes):\n byte_tuple = tuple(int(j) for j in t.split(\"-\"))\n self.token2byte[256 + i] = byte_tuple\n self.byte2token[byte_tuple] = 256 + i\n\n return self\n\n def _get_counts(self, vocab):\n \"\"\"Collect bigram counts for the tokens in vocab\"\"\"\n pair_counts = defaultdict(int)\n for word, count in vocab.items():\n pairs = ngrams(word.split(\" \"), 2)\n for p in pairs:\n pair_counts[p] += count\n return pair_counts\n\n def _merge(self, bigram, vocab):\n \"\"\"Replace `bigram` with a single token and update vocab accordingly\"\"\"\n v_out = {}\n bg = re.escape(\" \".join(bigram))\n bigram_regex = re.compile(r\"(?>> B = BytePairEncoder(max_merges=100).fit(\"./example.txt\")\n >>> encoded_tokens = B.transform(\"Hello! How are you \ud83d\ude01 ?\")\n >>> encoded_tokens\n [[72, 879, 474, ...]]\n \"\"\"\n if isinstance(text, str):\n text = [text]\n return [self._transform(string) for string in text]\n\n def _transform(self, text):\n \"\"\"Transform a single text string to a list of byte-pair IDs\"\"\"\n P = self.parameters\n _bytes = tokenize_bytes_raw(text, encoding=P[\"encoding\"])\n\n encoded = []\n for w in _bytes:\n l, r = 0, len(w)\n w = [int(i) for i in w.split(\" \")]\n\n while l < len(w):\n candidate = tuple(w[l:r])\n\n if len(candidate) > 1 and candidate in self.byte2token:\n # candidate is a collection of several bytes and is in our\n # vocab\n encoded.append(self.byte2token[candidate])\n l, r = r, len(w)\n elif len(candidate) == 1:\n # candidate is a single byte and should always be in our\n # vocab\n encoded.append(candidate[0])\n l, r = r, len(w)\n else:\n # candidate is not in vocab, so we decrease our context\n # window by 1 and try again\n r -= 1\n return encoded\n\n def inverse_transform(self, codes):\n \"\"\"\n Transform an encoded sequence of byte pair codeword IDs back into\n human-readable text.\n\n Parameters\n ----------\n codes : list of `N` lists\n A list of `N` lists. Each sublist is a collection of integer\n byte-pair token IDs representing a particular text string.\n\n Returns\n -------\n text: list of `N` strings\n The decoded strings corresponding to the `N` sublists in `codes`.\n\n Examples\n --------\n >>> B = BytePairEncoder(max_merges=100).fit(\"./example.txt\")\n >>> encoded_tokens = B.transform(\"Hello! How are you \ud83d\ude01 ?\")\n >>> encoded_tokens\n [[72, 879, 474, ...]]\n >>> B.inverse_transform(encoded_tokens)\n [\"Hello! How are you \ud83d\ude01 ?\"]\n \"\"\"\n if isinstance(codes[0], int):\n codes = [codes]\n\n decoded = []\n P = self.parameters\n\n for code in codes:\n _bytes = [self.token2byte[t] if t > 255 else [t] for t in code]\n _bytes = [b for blist in _bytes for b in blist]\n decoded.append(bytes_to_chars(_bytes, encoding=P[\"encoding\"]))\n return decoded\n\n @property\n def codebook(self):\n \"\"\"\n A list of the learned byte pair codewords, decoded into human-readable\n format\n \"\"\"\n return [\n self.inverse_transform(t)[0]\n for t in self.byte2token.keys()\n if isinstance(t, tuple)\n ]\n\n @property\n def tokens(self):\n \"\"\"A list of the byte pair codeword IDs\"\"\"\n return list(self.token2byte.keys())\n\n\n#######################################################################\n# Huffman Tree #\n#######################################################################\n\n\nclass Node(object):\n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.left = None\n self.right = None\n\n def __gt__(self, other):\n \"\"\"Greater than\"\"\"\n if not isinstance(other, Node):\n return -1\n return self.val > other.val\n\n def __ge__(self, other):\n \"\"\"Greater than or equal to\"\"\"\n if not isinstance(other, Node):\n return -1\n return self.val >= other.val\n\n def __lt__(self, other):\n \"\"\"Less than\"\"\"\n if not isinstance(other, Node):\n return -1\n return self.val < other.val\n\n def __le__(self, other):\n \"\"\"Less than or equal to\"\"\"\n if not isinstance(other, Node):\n return -1\n return self.val <= other.val\n\n\nclass HuffmanEncoder(object):\n def fit(self, text):\n \"\"\"\n Build a Huffman tree for the tokens in `text` and compute each token's\n binary encoding.\n\n Notes\n -----\n In a Huffman code, tokens that occur more frequently are (generally)\n represented using fewer bits. Huffman codes produce the minimum expected\n codeword length among all methods for encoding tokens individually.\n\n Huffman codes correspond to paths through a binary tree, with 1\n corresponding to \"move right\" and 0 corresponding to \"move left\". In\n contrast to standard binary trees, the Huffman tree is constructed from the\n bottom up. Construction begins by initializing a min-heap priority queue\n consisting of each token in the corpus, with priority corresponding to the\n token frequency. At each step, the two most infrequent tokens in the corpus\n are removed and become the children of a parent pseudotoken whose\n \"frequency\" is the sum of the frequencies of its children. This new parent\n pseudotoken is added to the priority queue and the process is repeated\n recursively until no tokens remain.\n\n Parameters\n ----------\n text: list of strs or :class:`Vocabulary` instance\n The tokenized text or a pretrained :class:`Vocabulary` object to use for\n building the Huffman code.\n \"\"\"\n self._build_tree(text)\n self._generate_codes()\n\n def transform(self, text):\n \"\"\"\n Transform the words in `text` into their Huffman-code representations.\n\n Parameters\n ----------\n text: list of `N` strings\n The list of words to encode\n\n Returns\n -------\n codes : list of `N` binary strings\n The encoded words in `text`\n \"\"\"\n if isinstance(text, str):\n text = [text]\n for token in set(text):\n if token not in self._item2code:\n raise Warning(\"Token '{}' not in Huffman tree. Skipping\".format(token))\n return [self._item2code.get(t, None) for t in text]\n\n def inverse_transform(self, codes):\n \"\"\"\n Transform an encoded sequence of bit-strings back into words.\n\n Parameters\n ----------\n codes : list of `N` binary strings\n A list of encoded bit-strings, represented as strings.\n\n Returns\n -------\n text: list of `N` strings\n The decoded text.\n \"\"\"\n if isinstance(codes, str):\n codes = [codes]\n for code in set(codes):\n if code not in self._code2item:\n raise Warning(\"Code '{}' not in Huffman tree. Skipping\".format(code))\n return [self._code2item.get(c, None) for c in codes]\n\n @property\n def tokens(self):\n \"\"\"A list the unique tokens in `text`\"\"\"\n return list(self._item2code.keys())\n\n @property\n def codes(self):\n \"\"\"A list with the Huffman code for each unique token in `text`\"\"\"\n return list(self._code2item.keys())\n\n def _counter(self, text):\n counts = {}\n for item in text:\n counts[item] = counts.get(item, 0) + 1\n return counts\n\n def _build_tree(self, text):\n \"\"\"Construct Huffman Tree\"\"\"\n PQ = []\n\n if isinstance(text, Vocabulary):\n counts = text.counts\n else:\n counts = self._counter(text)\n\n for (k, c) in counts.items():\n PQ.append(Node(k, c))\n\n # create a priority queue with priority = item frequency\n heapq.heapify(PQ)\n\n while len(PQ) > 1:\n node1 = heapq.heappop(PQ) # item with smallest frequency\n node2 = heapq.heappop(PQ) # item with second smallest frequency\n\n parent = Node(None, node1.val + node2.val)\n parent.left = node1\n parent.right = node2\n\n heapq.heappush(PQ, parent)\n\n self._root = heapq.heappop(PQ)\n\n def _generate_codes(self):\n current_code = \"\"\n self._item2code = {}\n self._code2item = {}\n self._build_code(self._root, current_code)\n\n def _build_code(self, root, current_code):\n if root is None:\n return\n\n if root.key is not None:\n self._item2code[root.key] = current_code\n self._code2item[current_code] = root.key\n return\n\n # 0 = move left, 1 = move right\n self._build_code(root.left, current_code + \"0\")\n self._build_code(root.right, current_code + \"1\")\n\n\n#######################################################################\n# Containers #\n#######################################################################\n\n\nclass Token:\n def __init__(self, word):\n self.count = 0\n self.word = word\n\n def __repr__(self):\n \"\"\"A string representation of the token\"\"\"\n return \"Token(word='{}', count={})\".format(self.word, self.count)\n\n\nclass TFIDFEncoder:\n def __init__(\n self,\n vocab=None,\n lowercase=True,\n min_count=0,\n smooth_idf=True,\n max_tokens=None,\n input_type=\"files\",\n filter_stopwords=True,\n filter_punctuation=True,\n tokenizer=\"words\",\n ):\n r\"\"\"\n An object for compiling and encoding the term-frequency\n inverse-document-frequency (TF-IDF) representation of the tokens in a\n text corpus.\n\n Notes\n -----\n TF-IDF is intended to reflect how important a word is to a document in\n a collection or corpus. For a word token `w` in a document `d`, and a\n corpus, :math:`D = \\{d_1, \\ldots, d_N\\}`, we have:\n\n .. math::\n \\text{TF}(w, d) &= \\text{num. occurences of }w \\text{ in document }d \\\\\n \\text{IDF}(w, D) &= \\log \\frac{|D|}{|\\{ d \\in D: t \\in d \\}|}\n\n Parameters\n ----------\n vocab : :class:`Vocabulary` object or list-like\n An existing vocabulary to filter the tokens in the corpus against.\n Default is None.\n lowercase : bool\n Whether to convert each string to lowercase before tokenization.\n Default is True.\n min_count : int\n Minimum number of times a token must occur in order to be included\n in vocab. Default is 0.\n smooth_idf : bool\n Whether to add 1 to the denominator of the IDF calculation to avoid\n divide-by-zero errors. Default is True.\n max_tokens : int\n Only add the `max_tokens` most frequent tokens that occur more\n than `min_count` to the vocabulary. If None, add all tokens\n greater that occur more than than `min_count`. Default is None.\n input_type : {'files', 'strings'}\n If 'files', the sequence input to `fit` is expected to be a list\n of filepaths. If 'strings', the input is expected to be a list of\n lists, each sublist containing the raw strings for a single\n document in the corpus. Default is 'filename'.\n filter_stopwords : bool\n Whether to remove stopwords before encoding the words in the\n corpus. Default is True.\n filter_punctuation : bool\n Whether to remove punctuation before encoding the words in the\n corpus. Default is True.\n tokenizer : {'whitespace', 'words', 'characters', 'bytes'}\n Strategy to follow when mapping strings to tokens. The\n `'whitespace'` tokenizer splits strings at whitespace characters.\n The `'words'` tokenizer splits strings using a \"word\" regex. The\n `'characters'` tokenizer splits strings into individual characters.\n The `'bytes'` tokenizer splits strings into a collection of\n individual bytes.\n \"\"\"\n # create a function to filter against words in the vocab\n self._filter_vocab = lambda words: words\n if isinstance(vocab, Vocabulary):\n self._filter_vocab = vocab.filter\n elif isinstance(vocab, (list, np.ndarray, set)):\n vocab = set(vocab)\n self._filter_vocab = lambda words: [\n w if w in vocab else \"\" for w in words\n ]\n\n if input_type not in [\"files\", \"strings\"]:\n fstr = \"`input_type` must be either 'files' or 'strings', but got {}\"\n raise ValueError(fstr.format(input_type))\n\n self._tokens = None\n self._idx2doc = None\n self.term_freq = None\n self.idx2token = None\n self.token2idx = None\n self.inv_doc_freq = None\n\n self.hyperparameters = {\n \"id\": \"TFIDFEncoder\",\n \"encoding\": None,\n \"vocab\": vocab\n if not isinstance(vocab, Vocabulary)\n else vocab.hyperparameters,\n \"lowercase\": lowercase,\n \"min_count\": min_count,\n \"input_type\": input_type,\n \"max_tokens\": max_tokens,\n \"smooth_idf\": smooth_idf,\n \"tokenizer\": tokenizer\n if not isinstance(vocab, Vocabulary)\n else vocab.hyperparameters[\"tokenizer\"],\n \"filter_stopwords\": filter_stopwords\n if not isinstance(vocab, Vocabulary)\n else vocab.hyperparameters[\"filter_stopwords\"],\n \"filter_punctuation\": filter_punctuation\n if not isinstance(vocab, Vocabulary)\n else vocab.hyperparameters[\"filter_punctuation\"],\n }\n\n def fit(self, corpus_seq, encoding=\"utf-8-sig\"):\n \"\"\"\n Compute term-frequencies and inverse document frequencies on a\n collection of documents.\n\n Parameters\n ----------\n corpus_seq : str or list of strs\n The filepath / list of filepaths / raw string contents of the\n document(s) to be encoded, in accordance with the `input_type`\n parameter passed to the :meth:`__init__` method. Each document is\n expected to be a string of tokens separated by whitespace.\n encoding : str\n Specifies the text encoding for corpus if `input_type` is `files`.\n Common entries are either 'utf-8' (no header byte), or 'utf-8-sig'\n (header byte). Default is 'utf-8-sig'.\n\n Returns\n -------\n self\n \"\"\"\n H = self.hyperparameters\n\n if isinstance(corpus_seq, str):\n corpus_seq = [corpus_seq]\n\n if H[\"input_type\"] == \"files\":\n for corpus_fp in corpus_seq:\n assert op.isfile(corpus_fp), \"{} does not exist\".format(corpus_fp)\n\n tokens = []\n idx2token, token2idx = {}, {}\n\n # encode special tokens\n for tt in [\"\", \"\", \"\"]:\n token2idx[tt] = len(tokens)\n idx2token[len(tokens)] = tt\n tokens.append(Token(tt))\n\n min_count = H[\"min_count\"]\n max_tokens = H[\"max_tokens\"]\n H[\"encoding\"] = encoding\n\n bol_ix = token2idx[\"\"]\n eol_ix = token2idx[\"\"]\n idx2doc, term_freq = {}, {}\n\n # encode the text in `corpus_fps` without any filtering ...\n for d_ix, doc in enumerate(corpus_seq):\n doc_count = {}\n idx2doc[d_ix] = doc if H[\"input_type\"] == \"files\" else None\n token2idx, idx2token, tokens, doc_count = self._encode_document(\n doc, token2idx, idx2token, tokens, doc_count, bol_ix, eol_ix,\n )\n term_freq[d_ix] = doc_count\n\n self._tokens = tokens\n self._idx2doc = idx2doc\n self.token2idx = token2idx\n self.idx2token = idx2token\n self.term_freq = term_freq\n\n # ... retain only the top `max_tokens` most frequent tokens, coding\n # everything else as ...\n if max_tokens is not None and len(tokens) > max_tokens:\n self._keep_top_n_tokens()\n\n # ... replace all words occurring less than `min_count` by ...\n if min(self._tokens, key=lambda t: t.count).count < min_count:\n self._drop_low_freq_tokens()\n\n # ... sort tokens alphabetically and reindex ...\n self._sort_tokens()\n\n # ... finally, calculate inverse document frequency\n self._calc_idf()\n return self\n\n def _encode_document(\n self, doc, word2idx, idx2word, tokens, doc_count, bol_ix, eol_ix,\n ):\n \"\"\"Perform tokenization and compute token counts for a single document\"\"\"\n H = self.hyperparameters\n lowercase = H[\"lowercase\"]\n filter_stop = H[\"filter_stopwords\"]\n filter_punc = H[\"filter_punctuation\"]\n\n if H[\"input_type\"] == \"files\":\n with open(doc, \"r\", encoding=H[\"encoding\"]) as handle:\n doc = handle.read()\n\n tokenizer_dict = {\n \"words\": tokenize_words,\n \"characters\": tokenize_chars,\n \"whitespace\": tokenize_whitespace,\n \"bytes\": tokenize_bytes_raw,\n }\n tokenizer = tokenizer_dict[H[\"tokenizer\"]]\n\n n_words = 0\n lines = doc.split(\"\\n\")\n for line in lines:\n words = tokenizer(\n line,\n lowercase=lowercase,\n filter_stopwords=filter_stop,\n filter_punctuation=filter_punc,\n encoding=H[\"encoding\"],\n )\n words = self._filter_vocab(words)\n n_words += len(words)\n\n for ww in words:\n if ww not in word2idx:\n word2idx[ww] = len(tokens)\n idx2word[len(tokens)] = ww\n tokens.append(Token(ww))\n\n t_idx = word2idx[ww]\n tokens[t_idx].count += 1\n doc_count[t_idx] = doc_count.get(t_idx, 0) + 1\n\n # wrap line in and tags\n tokens[bol_ix].count += 1\n tokens[eol_ix].count += 1\n\n doc_count[bol_ix] = doc_count.get(bol_ix, 0) + 1\n doc_count[eol_ix] = doc_count.get(eol_ix, 0) + 1\n return word2idx, idx2word, tokens, doc_count\n\n def _keep_top_n_tokens(self):\n N = self.hyperparameters[\"max_tokens\"]\n doc_counts, word2idx, idx2word = {}, {}, {}\n tokens = sorted(self._tokens, key=lambda x: x.count, reverse=True)\n\n # reindex the top-N tokens...\n unk_ix = None\n for idx, tt in enumerate(tokens[:N]):\n word2idx[tt.word] = idx\n idx2word[idx] = tt.word\n\n if tt.word == \"\":\n unk_ix = idx\n\n # ... if isn't in the top-N, add it, replacing the Nth\n # most-frequent word and adjust the count accordingly ...\n if unk_ix is None:\n unk_ix = self.token2idx[\"\"]\n old_count = tokens[N - 1].count\n tokens[N - 1] = self._tokens[unk_ix]\n tokens[N - 1].count += old_count\n word2idx[\"\"] = N - 1\n idx2word[N - 1] = \"\"\n\n # ... and recode all dropped tokens as \"\"\n for tt in tokens[N:]:\n tokens[unk_ix].count += tt.count\n\n # ... finally, reindex the word counts for each document\n doc_counts = {}\n for d_ix in self.term_freq.keys():\n doc_counts[d_ix] = {}\n for old_ix, d_count in self.term_freq[d_ix].items():\n word = self.idx2token[old_ix]\n new_ix = word2idx.get(word, unk_ix)\n doc_counts[d_ix][new_ix] = doc_counts[d_ix].get(new_ix, 0) + d_count\n\n self._tokens = tokens[:N]\n self.token2idx = word2idx\n self.idx2token = idx2word\n self.term_freq = doc_counts\n\n assert len(self._tokens) <= N\n\n def _drop_low_freq_tokens(self):\n \"\"\"\n Replace all tokens that occur less than `min_count` with the ``\n token.\n \"\"\"\n H = self.hyperparameters\n unk_token = self._tokens[self.token2idx[\"\"]]\n eol_token = self._tokens[self.token2idx[\"\"]]\n bol_token = self._tokens[self.token2idx[\"\"]]\n tokens = [unk_token, eol_token, bol_token]\n\n unk_idx = 0\n word2idx = {\"\": 0, \"\": 1, \"\": 2}\n idx2word = {0: \"\", 1: \"\", 2: \"\"}\n special = {\"\", \"\", \"\"}\n\n for tt in self._tokens:\n if tt.word not in special:\n if tt.count < H[\"min_count\"]:\n tokens[unk_idx].count += tt.count\n else:\n word2idx[tt.word] = len(tokens)\n idx2word[len(tokens)] = tt.word\n tokens.append(tt)\n\n # reindex document counts\n doc_counts = {}\n for d_idx in self.term_freq.keys():\n doc_counts[d_idx] = {}\n for old_idx, d_count in self.term_freq[d_idx].items():\n word = self.idx2token[old_idx]\n new_idx = word2idx.get(word, unk_idx)\n doc_counts[d_idx][new_idx] = doc_counts[d_idx].get(new_idx, 0) + d_count\n\n self._tokens = tokens\n self.token2idx = word2idx\n self.idx2token = idx2word\n self.term_freq = doc_counts\n\n def _sort_tokens(self):\n # sort tokens alphabetically and recode\n ix = 0\n token2idx, idx2token, = (\n {},\n {},\n )\n special = [\"\", \"\", \"\"]\n words = sorted(self.token2idx.keys())\n term_freq = {d: {} for d in self.term_freq.keys()}\n\n for w in words:\n if w not in special:\n old_ix = self.token2idx[w]\n token2idx[w], idx2token[ix] = ix, w\n for d in self.term_freq.keys():\n if old_ix in self.term_freq[d]:\n count = self.term_freq[d][old_ix]\n term_freq[d][ix] = count\n ix += 1\n\n for w in special:\n token2idx[w] = len(token2idx)\n idx2token[len(idx2token)] = w\n\n self.token2idx = token2idx\n self.idx2token = idx2token\n self.term_freq = term_freq\n self.vocab_counts = Counter({t.word: t.count for t in self._tokens})\n\n def _calc_idf(self):\n \"\"\"\n Compute the (smoothed-) inverse-document frequency for each token in\n the corpus.\n\n For a word token `w`, the IDF is simply\n\n IDF(w) = log ( |D| / |{ d in D: w in d }| ) + 1\n\n where D is the set of all documents in the corpus,\n\n D = {d1, d2, ..., dD}\n\n If `smooth_idf` is True, we perform additive smoothing on the number of\n documents containing a given word, equivalent to pretending that there\n exists a final D+1st document that contains every word in the corpus:\n\n SmoothedIDF(w) = log ( |D| + 1 / [1 + |{ d in D: w in d }|] ) + 1\n \"\"\"\n inv_doc_freq = {}\n smooth_idf = self.hyperparameters[\"smooth_idf\"]\n tf, doc_idxs = self.term_freq, self._idx2doc.keys()\n\n D = len(self._idx2doc) + int(smooth_idf)\n for word, w_ix in self.token2idx.items():\n d_count = int(smooth_idf)\n d_count += np.sum([1 if w_ix in tf[d_ix] else 0 for d_ix in doc_idxs])\n inv_doc_freq[w_ix] = 1 if d_count == 0 else np.log(D / d_count) + 1\n self.inv_doc_freq = inv_doc_freq\n\n def transform(self, ignore_special_chars=True):\n \"\"\"\n Generate the term-frequency inverse-document-frequency encoding of a\n text corpus.\n\n Parameters\n ----------\n ignore_special_chars : bool\n Whether to drop columns corresponding to \"\", \"\", and\n \"\" tokens from the final tfidf encoding. Default is True.\n\n Returns\n -------\n tfidf : numpy array of shape `(D, M [- 3])`\n The encoded corpus, with each row corresponding to a single\n document, and each column corresponding to a token id. The mapping\n between column numbers and tokens is stored in the `idx2token`\n attribute IFF `ignore_special_chars` is False. Otherwise, the\n mappings are not accurate.\n \"\"\"\n D, N = len(self._idx2doc), len(self._tokens)\n tf = np.zeros((D, N))\n idf = np.zeros((D, N))\n\n for d_ix in self._idx2doc.keys():\n words, counts = zip(*self.term_freq[d_ix].items())\n docs = np.ones(len(words), dtype=int) * d_ix\n tf[docs, words] = counts\n\n words = sorted(self.idx2token.keys())\n idf = np.tile(np.array([self.inv_doc_freq[w] for w in words]), (D, 1))\n tfidf = tf * idf\n\n if ignore_special_chars:\n idxs = [\n self.token2idx[\"\"],\n self.token2idx[\"\"],\n self.token2idx[\"\"],\n ]\n tfidf = np.delete(tfidf, idxs, 1)\n\n return tfidf\n\n\nclass Vocabulary:\n def __init__(\n self,\n lowercase=True,\n min_count=None,\n max_tokens=None,\n filter_stopwords=True,\n filter_punctuation=True,\n tokenizer=\"words\",\n ):\n \"\"\"\n An object for compiling and encoding the unique tokens in a text corpus.\n\n Parameters\n ----------\n lowercase : bool\n Whether to convert each string to lowercase before tokenization.\n Default is True.\n min_count : int\n Minimum number of times a token must occur in order to be included\n in vocab. If `None`, include all tokens from `corpus_fp` in vocab.\n Default is None.\n max_tokens : int\n Only add the `max_tokens` most frequent tokens that occur more\n than `min_count` to the vocabulary. If None, add all tokens\n that occur more than than `min_count`. Default is None.\n filter_stopwords : bool\n Whether to remove stopwords before encoding the words in the\n corpus. Default is True.\n filter_punctuation : bool\n Whether to remove punctuation before encoding the words in the\n corpus. Default is True.\n tokenizer : {'whitespace', 'words', 'characters', 'bytes'}\n Strategy to follow when mapping strings to tokens. The\n `'whitespace'` tokenizer splits strings at whitespace characters.\n The `'words'` tokenizer splits strings using a \"word\" regex. The\n `'characters'` tokenizer splits strings into individual characters.\n The `'bytes'` tokenizer splits strings into a collection of\n individual bytes.\n \"\"\"\n self.hyperparameters = {\n \"id\": \"Vocabulary\",\n \"encoding\": None,\n \"corpus_fps\": None,\n \"lowercase\": lowercase,\n \"min_count\": min_count,\n \"max_tokens\": max_tokens,\n \"filter_stopwords\": filter_stopwords,\n \"filter_punctuation\": filter_punctuation,\n \"tokenizer\": tokenizer,\n }\n\n def __len__(self):\n \"\"\"Return the number of tokens in the vocabulary\"\"\"\n return len(self._tokens)\n\n def __iter__(self):\n \"\"\"Return an iterator over the tokens in the vocabulary\"\"\"\n return iter(self._tokens)\n\n def __contains__(self, word):\n \"\"\"Assert whether `word` is a token in the vocabulary\"\"\"\n return word in self.token2idx\n\n def __getitem__(self, key):\n \"\"\"\n Return the token (if key is an integer) or the index (if key is a string)\n for the key in the vocabulary, if it exists.\n \"\"\"\n if isinstance(key, str):\n return self._tokens[self.token2idx[key]]\n if isinstance(key, int):\n return self._tokens[key]\n\n @property\n def n_tokens(self):\n \"\"\"The number of unique word tokens in the vocabulary\"\"\"\n return len(self.token2idx)\n\n @property\n def n_words(self):\n \"\"\"The total number of words in the corpus\"\"\"\n return sum(self.counts.values())\n\n @property\n def shape(self):\n \"\"\"The number of unique word tokens in the vocabulary\"\"\"\n return self._tokens.shape\n\n def most_common(self, n=5):\n \"\"\"Return the top `n` most common tokens in the corpus\"\"\"\n return self.counts.most_common()[:n]\n\n def words_with_count(self, k):\n \"\"\"Return all tokens that occur `k` times in the corpus\"\"\"\n return [w for w, c in self.counts.items() if c == k]\n\n def filter(self, words, unk=True): # noqa: A003\n \"\"\"\n Filter (or replace) any word in `words` that is not present in\n `Vocabulary`.\n\n Parameters\n ----------\n words : list of strs\n A list of words to filter\n unk : bool\n Whether to replace any out of vocabulary words in `words` with the\n ```` token (True) or skip them entirely (False). Default is\n True.\n\n Returns\n -------\n filtered : list of strs\n The list of words filtered against the words in Vocabulary.\n \"\"\"\n if unk:\n return [w if w in self else \"\" for w in words]\n return [w for w in words if w in self]\n\n def words_to_indices(self, words):\n \"\"\"\n Convert the words in `words` to their token indices. If a word is not\n in the vocabulary, return the index for the ```` token\n\n Parameters\n ----------\n words : list of strs\n A list of words to filter\n\n Returns\n -------\n indices : list of ints\n The token indices for each word in `words`\n \"\"\"\n unk_ix = self.token2idx[\"\"]\n lowercase = self.hyperparameters[\"lowercase\"]\n words = [w.lower() for w in words] if lowercase else words\n return [self.token2idx[w] if w in self else unk_ix for w in words]\n\n def indices_to_words(self, indices):\n \"\"\"\n Convert the indices in `indices` to their word values. If an index is\n not in the vocabulary, return the ```` token.\n\n Parameters\n ----------\n indices : list of ints\n The token indices for each word in `words`\n\n Returns\n -------\n words : list of strs\n The word strings corresponding to each token index in `indices`\n \"\"\"\n unk = \"\"\n return [self.idx2token[i] if i in self.idx2token else unk for i in indices]\n\n def fit(self, corpus_fps, encoding=\"utf-8-sig\"):\n \"\"\"\n Compute the vocabulary across a collection of documents.\n\n Parameters\n ----------\n corpus_fps : str or list of strs\n The filepath / list of filepaths for the document(s) to be encoded.\n Each document is expected to be encoded as newline-separated\n string of text, with adjacent tokens separated by a whitespace\n character.\n encoding : str\n Specifies the text encoding for corpus. Common entries are either\n 'utf-8' (no header byte), or 'utf-8-sig' (header byte). Default is\n 'utf-8-sig'.\n\n Returns\n -------\n self\n \"\"\"\n if isinstance(corpus_fps, str):\n corpus_fps = [corpus_fps]\n\n for corpus_fp in corpus_fps:\n assert op.isfile(corpus_fp), \"{} does not exist\".format(corpus_fp)\n\n tokens = []\n H = self.hyperparameters\n idx2word, word2idx = {}, {}\n\n tokenizer_dict = {\n \"words\": tokenize_words,\n \"characters\": tokenize_chars,\n \"whitespace\": tokenize_whitespace,\n \"bytes\": tokenize_bytes_raw,\n }\n\n min_count = H[\"min_count\"]\n lowercase = H[\"lowercase\"]\n max_tokens = H[\"max_tokens\"]\n filter_stop = H[\"filter_stopwords\"]\n filter_punc = H[\"filter_punctuation\"]\n tokenizer = tokenizer_dict[H[\"tokenizer\"]]\n\n H[\"encoding\"] = encoding\n H[\"corpus_fps\"] = corpus_fps\n\n # encode special tokens\n for tt in [\"\", \"\", \"\"]:\n word2idx[tt] = len(tokens)\n idx2word[len(tokens)] = tt\n tokens.append(Token(tt))\n\n bol_ix = word2idx[\"\"]\n eol_ix = word2idx[\"\"]\n\n for d_ix, doc_fp in enumerate(corpus_fps):\n with open(doc_fp, \"r\", encoding=H[\"encoding\"]) as doc:\n for line in doc:\n words = tokenizer(\n line,\n lowercase=lowercase,\n filter_stopwords=filter_stop,\n filter_punctuation=filter_punc,\n encoding=H[\"encoding\"],\n )\n\n for ww in words:\n if ww not in word2idx:\n word2idx[ww] = len(tokens)\n idx2word[len(tokens)] = ww\n tokens.append(Token(ww))\n\n t_idx = word2idx[ww]\n tokens[t_idx].count += 1\n\n # wrap line in and tags\n tokens[bol_ix].count += 1\n tokens[eol_ix].count += 1\n\n self._tokens = tokens\n self.token2idx = word2idx\n self.idx2token = idx2word\n\n # replace all words occurring less than `min_count` by \n if min_count is not None:\n self._drop_low_freq_tokens()\n\n # retain only the top `max_tokens` most frequent tokens, coding\n # everything else as \n if max_tokens is not None and len(tokens) > max_tokens:\n self._keep_top_n_tokens()\n\n counts = {w: self._tokens[ix].count for w, ix in self.token2idx.items()}\n self.counts = Counter(counts)\n self._tokens = np.array(self._tokens)\n return self\n\n def _keep_top_n_tokens(self):\n word2idx, idx2word = {}, {}\n N = self.hyperparameters[\"max_tokens\"]\n tokens = sorted(self._tokens, key=lambda x: x.count, reverse=True)\n\n # reindex the top-N tokens...\n unk_ix = None\n for idx, tt in enumerate(tokens[:N]):\n word2idx[tt.word] = idx\n idx2word[idx] = tt.word\n\n if tt.word == \"\":\n unk_ix = idx\n\n # ... if isn't in the top-N, add it, replacing the Nth\n # most-frequent word and adjusting the count accordingly ...\n if unk_ix is None:\n unk_ix = self.token2idx[\"\"]\n old_count = tokens[N - 1].count\n tokens[N - 1] = self._tokens[unk_ix]\n tokens[N - 1].count += old_count\n word2idx[\"\"] = N - 1\n idx2word[N - 1] = \"\"\n\n # ... and recode all dropped tokens as \"\"\n for tt in tokens[N:]:\n tokens[unk_ix].count += tt.count\n\n self._tokens = tokens[:N]\n self.token2idx = word2idx\n self.idx2token = idx2word\n\n assert len(self._tokens) <= N\n\n def _drop_low_freq_tokens(self):\n \"\"\"\n Replace all tokens that occur less than `min_count` with the ``\n token.\n \"\"\"\n unk_idx = 0\n unk_token = self._tokens[self.token2idx[\"\"]]\n eol_token = self._tokens[self.token2idx[\"\"]]\n bol_token = self._tokens[self.token2idx[\"\"]]\n\n H = self.hyperparameters\n tokens = [unk_token, eol_token, bol_token]\n word2idx = {\"\": 0, \"\": 1, \"\": 2}\n idx2word = {0: \"\", 1: \"\", 2: \"\"}\n special = {\"\", \"\", \"\"}\n\n for tt in self._tokens:\n if tt.word not in special:\n if tt.count < H[\"min_count\"]:\n tokens[unk_idx].count += tt.count\n else:\n word2idx[tt.word] = len(tokens)\n idx2word[len(tokens)] = tt.word\n tokens.append(tt)\n\n self._tokens = tokens\n self.token2idx = word2idx\n self.idx2token = idx2word\n", "output": ["tokenize_chars", "remove_stop_words", "strip_punctuation", "ngrams", "tokenize_whitespace", "bytes_to_chars", "tokenize_words", "tokenize_bytes_raw", "tokenize_words_bytes", "Token", "Node", "Vocabulary", "BytePairEncoder", "TFIDFEncoder", "HuffmanEncoder"], "metadata": {"file_path": "numpyml-master/data/numpy_ml/preprocessing/nlp.py", "file_length": 14055, "symbol_dict": [{"symbol": "bytes_to_chars", "type": "mannual_defined_function", "byte_location": 5821, "location": 1767}, {"symbol": "tokenize_words", "type": "mannual_defined_function", "byte_location": 3729, "location": 1125}, {"symbol": "tokenize_chars", "type": "mannual_defined_function", "byte_location": 6191, "location": 1899}, {"symbol": "remove_stop_words", "type": "mannual_defined_function", "byte_location": 6584, "location": 2018}, {"symbol": "strip_punctuation", "type": "mannual_defined_function", "byte_location": 6734, "location": 2067}, {"symbol": "tokenize_words_bytes", "type": "mannual_defined_function", "byte_location": 4163, "location": 1266}, {"symbol": "ngrams", "type": "mannual_defined_function", "byte_location": 3101, "location": 927}, {"symbol": "tokenize_bytes_raw", "type": "mannual_defined_function", "byte_location": 4761, "location": 1452}, {"symbol": "tokenize_whitespace", "type": "mannual_defined_function", "byte_location": 3263, "location": 985}, {"symbol": "Token", "type": "mannual_defined_class", "byte_location": 20561, "location": 5907}, {"symbol": "HuffmanEncoder", "type": "mannual_defined_class", "byte_location": 15768, "location": 4628}, {"symbol": "Node", "type": "mannual_defined_class", "byte_location": 14975, "location": 4393}, {"symbol": "TFIDFEncoder", "type": "mannual_defined_class", "byte_location": 20804, "location": 5986}, {"symbol": "BytePairEncoder", "type": "mannual_defined_class", "byte_location": 7073, "location": 2137}, {"symbol": "Vocabulary", "type": "mannual_defined_class", "byte_location": 36666, "location": 10845}]}} {"input": "\"\"\"Common preprocessing utilities for working with text data\"\"\"\nimport re\nimport heapq\nimport os.path as op\nfrom collections import Counter, OrderedDict, defaultdict\n\nimport numpy as np\n\n\n# This list of English stop words is taken from the \"Glasgow Information\n# Retrieval Group\". The original list can be found at\n# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words\n_STOP_WORDS = set(\n (\n \"a about above across after afterwards again against all almost alone \"\n \"along already also although always am among amongst amoungst amount an \"\n \"and another any anyhow anyone anything anyway anywhere are around as at \"\n \"back be became because become becomes becoming been before beforehand \"\n \"behind being below beside besides between beyond bill both bottom but by \"\n \"call can cannot cant co con could couldnt cry de describe detail do done \"\n \"down due during each eg eight either eleven else elsewhere empty enough \"\n \"etc even ever every everyone everything everywhere except few fifteen \"\n \"fifty fill find fire first five for former formerly forty found four from \"\n \"front full further get give go had has hasnt have he hence her here \"\n \"hereafter hereby herein hereupon hers herself him himself his how however \"\n \"hundred i ie if in inc indeed interest into is it its itself keep last \"\n \"latter latterly least less ltd made many may me meanwhile might mill mine \"\n \"more moreover most mostly move much must my myself name namely neither \"\n \"never nevertheless next nine no nobody none noone nor not nothing now \"\n \"nowhere of off often on once one only onto or other others otherwise our \"\n \"ours ourselves out over own part per perhaps please put rather re same see \"\n \"seem seemed seeming seems serious several she should show side since \"\n \"sincere six sixty so some somehow someone something sometime sometimes \"\n \"somewhere still such system take ten than that the their them themselves \"\n \"then thence there thereafter thereby therefore therein thereupon these \"\n \"they thick thin third this those though three through throughout thru thus \"\n \"to together too top toward towards twelve twenty two un under until up \"\n \"upon us very via was we well were what whatever when whence whenever where \"\n \"whereafter whereas whereby wherein whereupon wherever whether which while \"\n \"whither who whoever whole whom whose why will with within without would \"\n \"yet you your yours yourself yourselves\"\n ).split(\" \"),\n)\n\n_WORD_REGEX = re.compile(r\"(?u)\\b\\w\\w+\\b\") # sklearn default\n_WORD_REGEX_W_PUNC = re.compile(r\"(?u)\\w+|[^a-zA-Z0-9\\s]\")\n_WORD_REGEX_W_PUNC_AND_WHITESPACE = re.compile(r\"(?u)s?\\w+\\s?|\\s?[^a-zA-Z0-9\\s]\\s?\")\n\n_PUNC_BYTE_REGEX = re.compile(\n r\"(33|34|35|36|37|38|39|40|41|42|43|44|45|\"\n r\"46|47|58|59|60|61|62|63|64|91|92|93|94|\"\n r\"95|96|123|124|125|126)\",\n)\n_PUNCTUATION = \"!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~\"\n_PUNC_TABLE = str.maketrans(\"\", \"\", _PUNCTUATION)\n\n\ndef ngrams(sequence, N):\n \"\"\"Return all `N`-grams of the elements in `sequence`\"\"\"\n assert N >= 1\n return list(zip(*[sequence[i:] for i in range(N)]))\n\n\ndef tokenize_whitespace(\n line, lowercase=True, filter_stopwords=True, filter_punctuation=True, **kwargs,\n):\n \"\"\"\n Split a string at any whitespace characters, optionally removing\n punctuation and stop-words in the process.\n \"\"\"\n line = line.lower() if lowercase else line\n words = line.split()\n line = [strip_punctuation(w) for w in words] if filter_punctuation else line\n return remove_stop_words(words) if filter_stopwords else words\n\n\ndef tokenize_words(\n line, lowercase=True, filter_stopwords=True, filter_punctuation=True, **kwargs,\n):\n \"\"\"\n Split a string into individual words, optionally removing punctuation and\n stop-words in the process.\n \"\"\"\n REGEX = _WORD_REGEX if filter_punctuation else _WORD_REGEX_W_PUNC\n words = REGEX.findall(line.lower() if lowercase else line)\n return remove_stop_words(words) if filter_stopwords else words\n\n\ndef tokenize_words_bytes(\n line,\n lowercase=True,\n filter_stopwords=True,\n filter_punctuation=True,\n encoding=\"utf-8\",\n **kwargs,\n):\n \"\"\"\n Split a string into individual words, optionally removing punctuation and\n stop-words in the process. Translate each word into a list of bytes.\n \"\"\"\n words = tokenize_words(\n line,\n lowercase=lowercase,\n filter_stopwords=filter_stopwords,\n filter_punctuation=filter_punctuation,\n **kwargs,\n )\n words = [\" \".join([str(i) for i in w.encode(encoding)]) for w in words]\n return words\n\n\ndef tokenize_bytes_raw(line, encoding=\"utf-8\", splitter=None, **kwargs):\n \"\"\"\n Convert the characters in `line` to a collection of bytes. Each byte is\n represented in decimal as an integer between 0 and 255.\n\n Parameters\n ----------\n line : str\n The string to tokenize.\n encoding : str\n The encoding scheme for the characters in `line`. Default is `'utf-8'`.\n splitter : {'punctuation', None}\n If `'punctuation'`, split the string at any punctuation character\n before encoding into bytes. If None, do not split `line` at all.\n Default is None.\n\n Returns\n -------\n bytes : list\n A list of the byte-encoded characters in `line`. Each item in the list\n is a string of space-separated integers between 0 and 255 representing\n the bytes encoding the characters in `line`.\n \"\"\"\n byte_str = [\" \".join([str(i) for i in line.encode(encoding)])]\n if splitter == \"punctuation\":\n byte_str = _PUNC_BYTE_REGEX.sub(r\"-\\1-\", byte_str[0]).split(\"-\")\n return byte_str\n\n\ndef bytes_to_chars(byte_list, encoding=\"utf-8\"):\n \"\"\"\n Decode bytes (represented as an integer between 0 and 255) to characters in\n the specified encoding.\n \"\"\"\n hex_array = [hex(a).replace(\"0x\", \"\") for a in byte_list]\n hex_array = \" \".join([h if len(h) > 1 else f\"0{h}\" for h in hex_array])\n return bytearray.fromhex(hex_array).decode(encoding)\n\n\ndef tokenize_chars(line, lowercase=True, filter_punctuation=True, **kwargs):\n \"\"\"\n Split a string into individual characters, optionally removing punctuation\n and stop-words in the process.\n \"\"\"\n line = line.lower() if lowercase else line\n line = strip_punctuation(line) if filter_punctuation else line\n chars = list(re.sub(\" {2,}\", \" \", line).strip())\n return chars\n\n\ndef remove_stop_words(words):\n \"\"\"Remove stop words from a list of word strings\"\"\"\n return [w for w in words if w.lower() not in _STOP_WORDS]\n\n\ndef strip_punctuation(line):\n \"\"\"Remove punctuation from a string\"\"\"\n return line.translate(_PUNC_TABLE).strip()\n\n\n#######################################################################\n# Byte-Pair Encoder #\n#######################################################################\n\n\nclass BytePairEncoder(object):\n def __init__(self, max_merges=3000, encoding=\"utf-8\"):\n \"\"\"\n A byte-pair encoder for sub-word embeddings.\n\n Notes\n -----\n Byte-pair encoding [1][2] is a compression algorithm that iteratively\n replaces the most frequently ocurring byte pairs in a set of documents\n with a new, single token. It has gained popularity as a preprocessing\n step for many NLP tasks due to its simplicity and expressiveness: using\n a base coebook of just 256 unique tokens (bytes), any string can be\n encoded.\n\n References\n ----------\n .. [1] Gage, P. (1994). A new algorithm for data compression. *C\n Users Journal, 12(2)*, 23\u201338.\n .. [2] Sennrich, R., Haddow, B., & Birch, A. (2015). Neural machine\n translation of rare words with subword units, *Proceedings of the\n 54th Annual Meeting of the Association for Computational\n Linguistics,* 1715-1725.\n\n Parameters\n ----------\n max_merges : int\n The maximum number of byte pair merges to perform during the\n :meth:`fit` operation. Default is 3000.\n encoding : str\n The encoding scheme for the documents used to train the encoder.\n Default is `'utf-8'`.\n \"\"\"\n self.parameters = {\n \"max_merges\": max_merges,\n \"encoding\": encoding,\n }\n\n # initialize the byte <-> token and token <-> byte dictionaries. bytes\n # are represented in decimal as integers between 0 and 255. there is a\n # 1:1 correspondence between token and byte representations up to 255.\n self.byte2token = OrderedDict({i: i for i in range(256)})\n self.token2byte = OrderedDict({v: k for k, v in self.byte2token.items()})\n\n def fit(self, corpus_fps, encoding=\"utf-8\"):\n \"\"\"\n Train a byte pair codebook on a set of documents.\n\n Parameters\n ----------\n corpus_fps : str or list of strs\n The filepath / list of filepaths for the document(s) to be used to\n learn the byte pair codebook.\n encoding : str\n The text encoding for documents. Common entries are either 'utf-8'\n (no header byte), or 'utf-8-sig' (header byte). Default is\n 'utf-8'.\n \"\"\"\n vocab = (\n Vocabulary(\n lowercase=False,\n min_count=None,\n max_tokens=None,\n filter_stopwords=False,\n filter_punctuation=False,\n tokenizer=\"bytes\",\n )\n .fit(corpus_fps, encoding=encoding)\n .counts\n )\n\n # iteratively merge the most common byte bigram across the documents\n for _ in range(self.parameters[\"max_merges\"]):\n pair_counts = self._get_counts(vocab)\n most_common_bigram = max(pair_counts, key=pair_counts.get)\n vocab = self._merge(most_common_bigram, vocab)\n\n token_bytes = set()\n for k in vocab.keys():\n token_bytes = token_bytes.union([w for w in k.split(\" \") if \"-\" in w])\n\n for i, t in enumerate(token_bytes):\n byte_tuple = tuple(int(j) for j in t.split(\"-\"))\n self.token2byte[256 + i] = byte_tuple\n self.byte2token[byte_tuple] = 256 + i\n\n return self\n\n def _get_counts(self, vocab):\n \"\"\"Collect bigram counts for the tokens in vocab\"\"\"\n pair_counts = defaultdict(int)\n for word, count in vocab.items():\n pairs = ngrams(word.split(\" \"), 2)\n for p in pairs:\n pair_counts[p] += count\n return pair_counts\n\n def _merge(self, bigram, vocab):\n \"\"\"Replace `bigram` with a single token and update vocab accordingly\"\"\"\n v_out = {}\n bg = re.escape(\" \".join(bigram))\n bigram_regex = re.compile(r\"(?>> B = BytePairEncoder(max_merges=100).fit(\"./example.txt\")\n >>> encoded_tokens = B.transform(\"Hello! How are you \ud83d\ude01 ?\")\n >>> encoded_tokens\n [[72, 879, 474, ...]]\n \"\"\"\n if isinstance(text, str):\n text = [text]\n return [self._transform(string) for string in text]\n\n def _transform(self, text):\n \"\"\"Transform a single text string to a list of byte-pair IDs\"\"\"\n P = self.parameters\n _bytes = tokenize_bytes_raw(text, encoding=P[\"encoding\"])\n\n encoded = []\n for w in _bytes:\n l, r = 0, len(w)\n w = [int(i) for i in w.split(\" \")]\n\n while l < len(w):\n candidate = tuple(w[l:r])\n\n if len(candidate) > 1 and candidate in self.byte2token:\n # candidate is a collection of several bytes and is in our\n # vocab\n encoded.append(self.byte2token[candidate])\n l, r = r, len(w)\n elif len(candidate) == 1:\n # candidate is a single byte and should always be in our\n # vocab\n encoded.append(candidate[0])\n l, r = r, len(w)\n else:\n # candidate is not in vocab, so we decrease our context\n # window by 1 and try again\n r -= 1\n return encoded\n\n def inverse_transform(self, codes):\n \"\"\"\n Transform an encoded sequence of byte pair codeword IDs back into\n human-readable text.\n\n Parameters\n ----------\n codes : list of `N` lists\n A list of `N` lists. Each sublist is a collection of integer\n byte-pair token IDs representing a particular text string.\n\n Returns\n -------\n text: list of `N` strings\n The decoded strings corresponding to the `N` sublists in `codes`.\n\n Examples\n --------\n >>> B = BytePairEncoder(max_merges=100).fit(\"./example.txt\")\n >>> encoded_tokens = B.transform(\"Hello! How are you \ud83d\ude01 ?\")\n >>> encoded_tokens\n [[72, 879, 474, ...]]\n >>> B.inverse_transform(encoded_tokens)\n [\"Hello! How are you \ud83d\ude01 ?\"]\n \"\"\"\n if isinstance(codes[0], int):\n codes = [codes]\n\n decoded = []\n P = self.parameters\n\n for code in codes:\n _bytes = [self.token2byte[t] if t > 255 else [t] for t in code]\n _bytes = [b for blist in _bytes for b in blist]\n decoded.append(bytes_to_chars(_bytes, encoding=P[\"encoding\"]))\n return decoded\n\n @property\n def codebook(self):\n \"\"\"\n A list of the learned byte pair codewords, decoded into human-readable\n format\n \"\"\"\n return [\n self.inverse_transform(t)[0]\n for t in self.byte2token.keys()\n if isinstance(t, tuple)\n ]\n\n @property\n def tokens(self):\n \"\"\"A list of the byte pair codeword IDs\"\"\"\n return list(self.token2byte.keys())\n\n\n#######################################################################\n# Huffman Tree #\n#######################################################################\n\n\nclass Node(object):\n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.left = None\n self.right = None\n\n def __gt__(self, other):\n \"\"\"Greater than\"\"\"\n if not isinstance(other, Node):\n return -1\n return self.val > other.val\n\n def __ge__(self, other):\n \"\"\"Greater than or equal to\"\"\"\n if not isinstance(other, Node):\n return -1\n return self.val >= other.val\n\n def __lt__(self, other):\n \"\"\"Less than\"\"\"\n if not isinstance(other, Node):\n return -1\n return self.val < other.val\n\n def __le__(self, other):\n \"\"\"Less than or equal to\"\"\"\n if not isinstance(other, Node):\n return -1\n return self.val <= other.val\n\n\nclass HuffmanEncoder(object):\n def fit(self, text):\n \"\"\"\n Build a Huffman tree for the tokens in `text` and compute each token's\n binary encoding.\n\n Notes\n -----\n In a Huffman code, tokens that occur more frequently are (generally)\n represented using fewer bits. Huffman codes produce the minimum expected\n codeword length among all methods for encoding tokens individually.\n\n Huffman codes correspond to paths through a binary tree, with 1\n corresponding to \"move right\" and 0 corresponding to \"move left\". In\n contrast to standard binary trees, the Huffman tree is constructed from the\n bottom up. Construction begins by initializing a min-heap priority queue\n consisting of each token in the corpus, with priority corresponding to the\n token frequency. At each step, the two most infrequent tokens in the corpus\n are removed and become the children of a parent pseudotoken whose\n \"frequency\" is the sum of the frequencies of its children. This new parent\n pseudotoken is added to the priority queue and the process is repeated\n recursively until no tokens remain.\n\n Parameters\n ----------\n text: list of strs or :class:`Vocabulary` instance\n The tokenized text or a pretrained :class:`Vocabulary` object to use for\n building the Huffman code.\n \"\"\"\n self._build_tree(text)\n self._generate_codes()\n\n def transform(self, text):\n \"\"\"\n Transform the words in `text` into their Huffman-code representations.\n\n Parameters\n ----------\n text: list of `N` strings\n The list of words to encode\n\n Returns\n -------\n codes : list of `N` binary strings\n The encoded words in `text`\n \"\"\"\n if isinstance(text, str):\n text = [text]\n for token in set(text):\n if token not in self._item2code:\n raise Warning(\"Token '{}' not in Huffman tree. Skipping\".format(token))\n return [self._item2code.get(t, None) for t in text]\n\n def inverse_transform(self, codes):\n \"\"\"\n Transform an encoded sequence of bit-strings back into words.\n\n Parameters\n ----------\n codes : list of `N` binary strings\n A list of encoded bit-strings, represented as strings.\n\n Returns\n -------\n text: list of `N` strings\n The decoded text.\n \"\"\"\n if isinstance(codes, str):\n codes = [codes]\n for code in set(codes):\n if code not in self._code2item:\n raise Warning(\"Code '{}' not in Huffman tree. Skipping\".format(code))\n return [self._code2item.get(c, None) for c in codes]\n\n @property\n def tokens(self):\n \"\"\"A list the unique tokens in `text`\"\"\"\n return list(self._item2code.keys())\n\n @property\n def codes(self):\n \"\"\"A list with the Huffman code for each unique token in `text`\"\"\"\n return list(self._code2item.keys())\n\n def _counter(self, text):\n counts = {}\n for item in text:\n counts[item] = counts.get(item, 0) + 1\n return counts\n\n def _build_tree(self, text):\n \"\"\"Construct Huffman Tree\"\"\"\n PQ = []\n\n if isinstance(text, Vocabulary):\n counts = text.counts\n else:\n counts = self._counter(text)\n\n for (k, c) in counts.items():\n PQ.append(Node(k, c))\n\n # create a priority queue with priority = item frequency\n heapq.heapify(PQ)\n\n while len(PQ) > 1:\n node1 = heapq.heappop(PQ) # item with smallest frequency\n node2 = heapq.heappop(PQ) # item with second smallest frequency\n\n parent = Node(None, node1.val + node2.val)\n parent.left = node1\n parent.right = node2\n\n heapq.heappush(PQ, parent)\n\n self._root = heapq.heappop(PQ)\n\n def _generate_codes(self):\n current_code = \"\"\n self._item2code = {}\n self._code2item = {}\n self._build_code(self._root, current_code)\n\n def _build_code(self, root, current_code):\n if root is None:\n return\n\n if root.key is not None:\n self._item2code[root.key] = current_code\n self._code2item[current_code] = root.key\n return\n\n # 0 = move left, 1 = move right\n self._build_code(root.left, current_code + \"0\")\n self._build_code(root.right, current_code + \"1\")\n\n\n#######################################################################\n# Containers #\n#######################################################################\n\n\nclass Token:\n def __init__(self, word):\n self.count = 0\n self.word = word\n\n def __repr__(self):\n \"\"\"A string representation of the token\"\"\"\n return \"Token(word='{}', count={})\".format(self.word, self.count)\n\n\nclass TFIDFEncoder:\n def __init__(\n self,\n vocab=None,\n lowercase=True,\n min_count=0,\n smooth_idf=True,\n max_tokens=None,\n input_type=\"files\",\n filter_stopwords=True,\n filter_punctuation=True,\n tokenizer=\"words\",\n ):\n r\"\"\"\n An object for compiling and encoding the term-frequency\n inverse-document-frequency (TF-IDF) representation of the tokens in a\n text corpus.\n\n Notes\n -----\n TF-IDF is intended to reflect how important a word is to a document in\n a collection or corpus. For a word token `w` in a document `d`, and a\n corpus, :math:`D = \\{d_1, \\ldots, d_N\\}`, we have:\n\n .. math::\n \\text{TF}(w, d) &= \\text{num. occurences of }w \\text{ in document }d \\\\\n \\text{IDF}(w, D) &= \\log \\frac{|D|}{|\\{ d \\in D: t \\in d \\}|}\n\n Parameters\n ----------\n vocab : :class:`Vocabulary` object or list-like\n An existing vocabulary to filter the tokens in the corpus against.\n Default is None.\n lowercase : bool\n Whether to convert each string to lowercase before tokenization.\n Default is True.\n min_count : int\n Minimum number of times a token must occur in order to be included\n in vocab. Default is 0.\n smooth_idf : bool\n Whether to add 1 to the denominator of the IDF calculation to avoid\n divide-by-zero errors. Default is True.\n max_tokens : int\n Only add the `max_tokens` most frequent tokens that occur more\n than `min_count` to the vocabulary. If None, add all tokens\n greater that occur more than than `min_count`. Default is None.\n input_type : {'files', 'strings'}\n If 'files', the sequence input to `fit` is expected to be a list\n of filepaths. If 'strings', the input is expected to be a list of\n lists, each sublist containing the raw strings for a single\n document in the corpus. Default is 'filename'.\n filter_stopwords : bool\n Whether to remove stopwords before encoding the words in the\n corpus. Default is True.\n filter_punctuation : bool\n Whether to remove punctuation before encoding the words in the\n corpus. Default is True.\n tokenizer : {'whitespace', 'words', 'characters', 'bytes'}\n Strategy to follow when mapping strings to tokens. The\n `'whitespace'` tokenizer splits strings at whitespace characters.\n The `'words'` tokenizer splits strings using a \"word\" regex. The\n `'characters'` tokenizer splits strings into individual characters.\n The `'bytes'` tokenizer splits strings into a collection of\n individual bytes.\n \"\"\"\n # create a function to filter against words in the vocab\n self._filter_vocab = lambda words: words\n if isinstance(vocab, Vocabulary):\n self._filter_vocab = vocab.filter\n elif isinstance(vocab, (list, np.ndarray, set)):\n vocab = set(vocab)\n self._filter_vocab = lambda words: [\n w if w in vocab else \"\" for w in words\n ]\n\n if input_type not in [\"files\", \"strings\"]:\n fstr = \"`input_type` must be either 'files' or 'strings', but got {}\"\n raise ValueError(fstr.format(input_type))\n\n self._tokens = None\n self._idx2doc = None\n self.term_freq = None\n self.idx2token = None\n self.token2idx = None\n self.inv_doc_freq = None\n\n self.hyperparameters = {\n \"id\": \"TFIDFEncoder\",\n \"encoding\": None,\n \"vocab\": vocab\n if not isinstance(vocab, Vocabulary)\n else vocab.hyperparameters,\n \"lowercase\": lowercase,\n \"min_count\": min_count,\n \"input_type\": input_type,\n \"max_tokens\": max_tokens,\n \"smooth_idf\": smooth_idf,\n \"tokenizer\": tokenizer\n if not isinstance(vocab, Vocabulary)\n else vocab.hyperparameters[\"tokenizer\"],\n \"filter_stopwords\": filter_stopwords\n if not isinstance(vocab, Vocabulary)\n else vocab.hyperparameters[\"filter_stopwords\"],\n \"filter_punctuation\": filter_punctuation\n if not isinstance(vocab, Vocabulary)\n else vocab.hyperparameters[\"filter_punctuation\"],\n }\n\n def fit(self, corpus_seq, encoding=\"utf-8-sig\"):\n \"\"\"\n Compute term-frequencies and inverse document frequencies on a\n collection of documents.\n\n Parameters\n ----------\n corpus_seq : str or list of strs\n The filepath / list of filepaths / raw string contents of the\n document(s) to be encoded, in accordance with the `input_type`\n parameter passed to the :meth:`__init__` method. Each document is\n expected to be a string of tokens separated by whitespace.\n encoding : str\n Specifies the text encoding for corpus if `input_type` is `files`.\n Common entries are either 'utf-8' (no header byte), or 'utf-8-sig'\n (header byte). Default is 'utf-8-sig'.\n\n Returns\n -------\n self\n \"\"\"\n H = self.hyperparameters\n\n if isinstance(corpus_seq, str):\n corpus_seq = [corpus_seq]\n\n if H[\"input_type\"] == \"files\":\n for corpus_fp in corpus_seq:\n assert op.isfile(corpus_fp), \"{} does not exist\".format(corpus_fp)\n\n tokens = []\n idx2token, token2idx = {}, {}\n\n # encode special tokens\n for tt in [\"\", \"\", \"\"]:\n token2idx[tt] = len(tokens)\n idx2token[len(tokens)] = tt\n tokens.append(Token(tt))\n\n min_count = H[\"min_count\"]\n max_tokens = H[\"max_tokens\"]\n H[\"encoding\"] = encoding\n\n bol_ix = token2idx[\"\"]\n eol_ix = token2idx[\"\"]\n idx2doc, term_freq = {}, {}\n\n # encode the text in `corpus_fps` without any filtering ...\n for d_ix, doc in enumerate(corpus_seq):\n doc_count = {}\n idx2doc[d_ix] = doc if H[\"input_type\"] == \"files\" else None\n token2idx, idx2token, tokens, doc_count = self._encode_document(\n doc, token2idx, idx2token, tokens, doc_count, bol_ix, eol_ix,\n )\n term_freq[d_ix] = doc_count\n\n self._tokens = tokens\n self._idx2doc = idx2doc\n self.token2idx = token2idx\n self.idx2token = idx2token\n self.term_freq = term_freq\n\n # ... retain only the top `max_tokens` most frequent tokens, coding\n # everything else as ...\n if max_tokens is not None and len(tokens) > max_tokens:\n self._keep_top_n_tokens()\n\n # ... replace all words occurring less than `min_count` by ...\n if min(self._tokens, key=lambda t: t.count).count < min_count:\n self._drop_low_freq_tokens()\n\n # ... sort tokens alphabetically and reindex ...\n self._sort_tokens()\n\n # ... finally, calculate inverse document frequency\n self._calc_idf()\n return self\n\n def _encode_document(\n self, doc, word2idx, idx2word, tokens, doc_count, bol_ix, eol_ix,\n ):\n \"\"\"Perform tokenization and compute token counts for a single document\"\"\"\n H = self.hyperparameters\n lowercase = H[\"lowercase\"]\n filter_stop = H[\"filter_stopwords\"]\n filter_punc = H[\"filter_punctuation\"]\n\n if H[\"input_type\"] == \"files\":\n with open(doc, \"r\", encoding=H[\"encoding\"]) as handle:\n doc = handle.read()\n\n tokenizer_dict = {\n \"words\": tokenize_words,\n \"characters\": tokenize_chars,\n \"whitespace\": tokenize_whitespace,\n \"bytes\": tokenize_bytes_raw,\n }\n tokenizer = tokenizer_dict[H[\"tokenizer\"]]\n\n n_words = 0\n lines = doc.split(\"\\n\")\n for line in lines:\n words = tokenizer(\n line,\n lowercase=lowercase,\n filter_stopwords=filter_stop,\n filter_punctuation=filter_punc,\n encoding=H[\"encoding\"],\n )\n words = self._filter_vocab(words)\n n_words += len(words)\n\n for ww in words:\n if ww not in word2idx:\n word2idx[ww] = len(tokens)\n idx2word[len(tokens)] = ww\n tokens.append(Token(ww))\n\n t_idx = word2idx[ww]\n tokens[t_idx].count += 1\n doc_count[t_idx] = doc_count.get(t_idx, 0) + 1\n\n # wrap line in and tags\n tokens[bol_ix].count += 1\n tokens[eol_ix].count += 1\n\n doc_count[bol_ix] = doc_count.get(bol_ix, 0) + 1\n doc_count[eol_ix] = doc_count.get(eol_ix, 0) + 1\n return word2idx, idx2word, tokens, doc_count\n\n def _keep_top_n_tokens(self):\n N = self.hyperparameters[\"max_tokens\"]\n doc_counts, word2idx, idx2word = {}, {}, {}\n tokens = sorted(self._tokens, key=lambda x: x.count, reverse=True)\n\n # reindex the top-N tokens...\n unk_ix = None\n for idx, tt in enumerate(tokens[:N]):\n word2idx[tt.word] = idx\n idx2word[idx] = tt.word\n\n if tt.word == \"\":\n unk_ix = idx\n\n # ... if isn't in the top-N, add it, replacing the Nth\n # most-frequent word and adjust the count accordingly ...\n if unk_ix is None:\n unk_ix = self.token2idx[\"\"]\n old_count = tokens[N - 1].count\n tokens[N - 1] = self._tokens[unk_ix]\n tokens[N - 1].count += old_count\n word2idx[\"\"] = N - 1\n idx2word[N - 1] = \"\"\n\n # ... and recode all dropped tokens as \"\"\n for tt in tokens[N:]:\n tokens[unk_ix].count += tt.count\n\n # ... finally, reindex the word counts for each document\n doc_counts = {}\n for d_ix in self.term_freq.keys():\n doc_counts[d_ix] = {}\n for old_ix, d_count in self.term_freq[d_ix].items():\n word = self.idx2token[old_ix]\n new_ix = word2idx.get(word, unk_ix)\n doc_counts[d_ix][new_ix] = doc_counts[d_ix].get(new_ix, 0) + d_count\n\n self._tokens = tokens[:N]\n self.token2idx = word2idx\n self.idx2token = idx2word\n self.term_freq = doc_counts\n\n assert len(self._tokens) <= N\n\n def _drop_low_freq_tokens(self):\n \"\"\"\n Replace all tokens that occur less than `min_count` with the ``\n token.\n \"\"\"\n H = self.hyperparameters\n unk_token = self._tokens[self.token2idx[\"\"]]\n eol_token = self._tokens[self.token2idx[\"\"]]\n bol_token = self._tokens[self.token2idx[\"\"]]\n tokens = [unk_token, eol_token, bol_token]\n\n unk_idx = 0\n word2idx = {\"\": 0, \"\": 1, \"\": 2}\n idx2word = {0: \"\", 1: \"\", 2: \"\"}\n special = {\"\", \"\", \"\"}\n\n for tt in self._tokens:\n if tt.word not in special:\n if tt.count < H[\"min_count\"]:\n tokens[unk_idx].count += tt.count\n else:\n word2idx[tt.word] = len(tokens)\n idx2word[len(tokens)] = tt.word\n tokens.append(tt)\n\n # reindex document counts\n doc_counts = {}\n for d_idx in self.term_freq.keys():\n doc_counts[d_idx] = {}\n for old_idx, d_count in self.term_freq[d_idx].items():\n word = self.idx2token[old_idx]\n new_idx = word2idx.get(word, unk_idx)\n doc_counts[d_idx][new_idx] = doc_counts[d_idx].get(new_idx, 0) + d_count\n\n self._tokens = tokens\n self.token2idx = word2idx\n self.idx2token = idx2word\n self.term_freq = doc_counts\n\n def _sort_tokens(self):\n # sort tokens alphabetically and recode\n ix = 0\n token2idx, idx2token, = (\n {},\n {},\n )\n special = [\"\", \"\", \"\"]\n words = sorted(self.token2idx.keys())\n term_freq = {d: {} for d in self.term_freq.keys()}\n\n for w in words:\n if w not in special:\n old_ix = self.token2idx[w]\n token2idx[w], idx2token[ix] = ix, w\n for d in self.term_freq.keys():\n if old_ix in self.term_freq[d]:\n count = self.term_freq[d][old_ix]\n term_freq[d][ix] = count\n ix += 1\n\n for w in special:\n token2idx[w] = len(token2idx)\n idx2token[len(idx2token)] = w\n\n self.token2idx = token2idx\n self.idx2token = idx2token\n self.term_freq = term_freq\n self.vocab_counts = Counter({t.word: t.count for t in self._tokens})\n\n def _calc_idf(self):\n \"\"\"\n Compute the (smoothed-) inverse-document frequency for each token in\n the corpus.\n\n For a word token `w`, the IDF is simply\n\n IDF(w) = log ( |D| / |{ d in D: w in d }| ) + 1\n\n where D is the set of all documents in the corpus,\n\n D = {d1, d2, ..., dD}\n\n If `smooth_idf` is True, we perform additive smoothing on the number of\n documents containing a given word, equivalent to pretending that there\n exists a final D+1st document that contains every word in the corpus:\n\n SmoothedIDF(w) = log ( |D| + 1 / [1 + |{ d in D: w in d }|] ) + 1\n \"\"\"\n inv_doc_freq = {}\n smooth_idf = self.hyperparameters[\"smooth_idf\"]\n tf, doc_idxs = self.term_freq, self._idx2doc.keys()\n\n D = len(self._idx2doc) + int(smooth_idf)\n for word, w_ix in self.token2idx.items():\n d_count = int(smooth_idf)\n d_count += np.sum([1 if w_ix in tf[d_ix] else 0 for d_ix in doc_idxs])\n inv_doc_freq[w_ix] = 1 if d_count == 0 else np.log(D / d_count) + 1\n self.inv_doc_freq = inv_doc_freq\n\n def transform(self, ignore_special_chars=True):\n \"\"\"\n Generate the term-frequency inverse-document-frequency encoding of a\n text corpus.\n\n Parameters\n ----------\n ignore_special_chars : bool\n Whether to drop columns corresponding to \"\", \"\", and\n \"\" tokens from the final tfidf encoding. Default is True.\n\n Returns\n -------\n tfidf : numpy array of shape `(D, M [- 3])`\n The encoded corpus, with each row corresponding to a single\n document, and each column corresponding to a token id. The mapping\n between column numbers and tokens is stored in the `idx2token`\n attribute IFF `ignore_special_chars` is False. Otherwise, the\n mappings are not accurate.\n \"\"\"\n D, N = len(self._idx2doc), len(self._tokens)\n tf = np.zeros((D, N))\n idf = np.zeros((D, N))\n\n for d_ix in self._idx2doc.keys():\n words, counts = zip(*self.term_freq[d_ix].items())\n docs = np.ones(len(words), dtype=int) * d_ix\n tf[docs, words] = counts\n\n words = sorted(self.idx2token.keys())\n idf = np.tile(np.array([self.inv_doc_freq[w] for w in words]), (D, 1))\n tfidf = tf * idf\n\n if ignore_special_chars:\n idxs = [\n self.token2idx[\"\"],\n self.token2idx[\"\"],\n self.token2idx[\"\"],\n ]\n tfidf = np.delete(tfidf, idxs, 1)\n\n return tfidf\n\n\nclass Vocabulary:\n def __init__(\n self,\n lowercase=True,\n min_count=None,\n max_tokens=None,\n filter_stopwords=True,\n filter_punctuation=True,\n tokenizer=\"words\",\n ):\n \"\"\"\n An object for compiling and encoding the unique tokens in a text corpus.\n\n Parameters\n ----------\n lowercase : bool\n Whether to convert each string to lowercase before tokenization.\n Default is True.\n min_count : int\n Minimum number of times a token must occur in order to be included\n in vocab. If `None`, include all tokens from `corpus_fp` in vocab.\n Default is None.\n max_tokens : int\n Only add the `max_tokens` most frequent tokens that occur more\n than `min_count` to the vocabulary. If None, add all tokens\n that occur more than than `min_count`. Default is None.\n filter_stopwords : bool\n Whether to remove stopwords before encoding the words in the\n corpus. Default is True.\n filter_punctuation : bool\n Whether to remove punctuation before encoding the words in the\n corpus. Default is True.\n tokenizer : {'whitespace', 'words', 'characters', 'bytes'}\n Strategy to follow when mapping strings to tokens. The\n `'whitespace'` tokenizer splits strings at whitespace characters.\n The `'words'` tokenizer splits strings using a \"word\" regex. The\n `'characters'` tokenizer splits strings into individual characters.\n The `'bytes'` tokenizer splits strings into a collection of\n individual bytes.\n \"\"\"\n self.hyperparameters = {\n \"id\": \"Vocabulary\",\n \"encoding\": None,\n \"corpus_fps\": None,\n \"lowercase\": lowercase,\n \"min_count\": min_count,\n \"max_tokens\": max_tokens,\n \"filter_stopwords\": filter_stopwords,\n \"filter_punctuation\": filter_punctuation,\n \"tokenizer\": tokenizer,\n }\n\n def __len__(self):\n \"\"\"Return the number of tokens in the vocabulary\"\"\"\n return len(self._tokens)\n\n def __iter__(self):\n \"\"\"Return an iterator over the tokens in the vocabulary\"\"\"\n return iter(self._tokens)\n\n def __contains__(self, word):\n \"\"\"Assert whether `word` is a token in the vocabulary\"\"\"\n return word in self.token2idx\n\n def __getitem__(self, key):\n \"\"\"\n Return the token (if key is an integer) or the index (if key is a string)\n for the key in the vocabulary, if it exists.\n \"\"\"\n if isinstance(key, str):\n return self._tokens[self.token2idx[key]]\n if isinstance(key, int):\n return self._tokens[key]\n\n @property\n def n_tokens(self):\n \"\"\"The number of unique word tokens in the vocabulary\"\"\"\n return len(self.token2idx)\n\n @property\n def n_words(self):\n \"\"\"The total number of words in the corpus\"\"\"\n return sum(self.counts.values())\n\n @property\n def shape(self):\n \"\"\"The number of unique word tokens in the vocabulary\"\"\"\n return self._tokens.shape\n\n def most_common(self, n=5):\n \"\"\"Return the top `n` most common tokens in the corpus\"\"\"\n return self.counts.most_common()[:n]\n\n def words_with_count(self, k):\n \"\"\"Return all tokens that occur `k` times in the corpus\"\"\"\n return [w for w, c in self.counts.items() if c == k]\n\n def filter(self, words, unk=True): # noqa: A003\n \"\"\"\n Filter (or replace) any word in `words` that is not present in\n `Vocabulary`.\n\n Parameters\n ----------\n words : list of strs\n A list of words to filter\n unk : bool\n Whether to replace any out of vocabulary words in `words` with the\n ```` token (True) or skip them entirely (False). Default is\n True.\n\n Returns\n -------\n filtered : list of strs\n The list of words filtered against the words in Vocabulary.\n \"\"\"\n if unk:\n return [w if w in self else \"\" for w in words]\n return [w for w in words if w in self]\n\n def words_to_indices(self, words):\n \"\"\"\n Convert the words in `words` to their token indices. If a word is not\n in the vocabulary, return the index for the ```` token\n\n Parameters\n ----------\n words : list of strs\n A list of words to filter\n\n Returns\n -------\n indices : list of ints\n The token indices for each word in `words`\n \"\"\"\n unk_ix = self.token2idx[\"\"]\n lowercase = self.hyperparameters[\"lowercase\"]\n words = [w.lower() for w in words] if lowercase else words\n return [self.token2idx[w] if w in self else unk_ix for w in words]\n\n def indices_to_words(self, indices):\n \"\"\"\n Convert the indices in `indices` to their word values. If an index is\n not in the vocabulary, return the ```` token.\n\n Parameters\n ----------\n indices : list of ints\n The token indices for each word in `words`\n\n Returns\n -------\n words : list of strs\n The word strings corresponding to each token index in `indices`\n \"\"\"\n unk = \"\"\n return [self.idx2token[i] if i in self.idx2token else unk for i in indices]\n\n def fit(self, corpus_fps, encoding=\"utf-8-sig\"):\n \"\"\"\n Compute the vocabulary across a collection of documents.\n\n Parameters\n ----------\n corpus_fps : str or list of strs\n The filepath / list of filepaths for the document(s) to be encoded.\n Each document is expected to be encoded as newline-separated\n string of text, with adjacent tokens separated by a whitespace\n character.\n encoding : str\n Specifies the text encoding for corpus. Common entries are either\n 'utf-8' (no header byte), or 'utf-8-sig' (header byte). Default is\n 'utf-8-sig'.\n\n Returns\n -------\n self\n \"\"\"\n if isinstance(corpus_fps, str):\n corpus_fps = [corpus_fps]\n\n for corpus_fp in corpus_fps:\n assert op.isfile(corpus_fp), \"{} does not exist\".format(corpus_fp)\n\n tokens = []\n H = self.hyperparameters\n idx2word, word2idx = {}, {}\n\n tokenizer_dict = {\n \"words\": tokenize_words,\n \"characters\": tokenize_chars,\n \"whitespace\": tokenize_whitespace,\n \"bytes\": tokenize_bytes_raw,\n }\n\n min_count = H[\"min_count\"]\n lowercase = H[\"lowercase\"]\n max_tokens = H[\"max_tokens\"]\n filter_stop = H[\"filter_stopwords\"]\n filter_punc = H[\"filter_punctuation\"]\n tokenizer = tokenizer_dict[H[\"tokenizer\"]]\n\n H[\"encoding\"] = encoding\n H[\"corpus_fps\"] = corpus_fps\n\n # encode special tokens\n for tt in [\"\", \"\", \"\"]:\n word2idx[tt] = len(tokens)\n idx2word[len(tokens)] = tt\n tokens.append(Token(tt))\n\n bol_ix = word2idx[\"\"]\n eol_ix = word2idx[\"\"]\n\n for d_ix, doc_fp in enumerate(corpus_fps):\n with open(doc_fp, \"r\", encoding=H[\"encoding\"]) as doc:\n for line in doc:\n words = tokenizer(\n line,\n lowercase=lowercase,\n filter_stopwords=filter_stop,\n filter_punctuation=filter_punc,\n encoding=H[\"encoding\"],\n )\n\n for ww in words:\n if ww not in word2idx:\n word2idx[ww] = len(tokens)\n idx2word[len(tokens)] = ww\n tokens.append(Token(ww))\n\n t_idx = word2idx[ww]\n tokens[t_idx].count += 1\n\n # wrap line in and tags\n tokens[bol_ix].count += 1\n tokens[eol_ix].count += 1\n\n self._tokens = tokens\n self.token2idx = word2idx\n self.idx2token = idx2word\n\n # replace all words occurring less than `min_count` by \n if min_count is not None:\n self._drop_low_freq_tokens()\n\n # retain only the top `max_tokens` most frequent tokens, coding\n # everything else as \n if max_tokens is not None and len(tokens) > max_tokens:\n self._keep_top_n_tokens()\n\n counts = {w: self._tokens[ix].count for w, ix in self.token2idx.items()}\n self.counts = Counter(counts)\n self._tokens = np.array(self._tokens)\n return self\n\n def _keep_top_n_tokens(self):\n word2idx, idx2word = {}, {}\n N = self.hyperparameters[\"max_tokens\"]\n tokens = sorted(self._tokens, key=lambda x: x.count, reverse=True)\n\n # reindex the top-N tokens...\n unk_ix = None\n for idx, tt in enumerate(tokens[:N]):\n word2idx[tt.word] = idx\n idx2word[idx] = tt.word\n\n if tt.word == \"\":\n unk_ix = idx\n\n # ... if isn't in the top-N, add it, replacing the Nth\n # most-frequent word and adjusting the count accordingly ...\n if unk_ix is None:\n unk_ix = self.token2idx[\"\"]\n old_count = tokens[N - 1].count\n tokens[N - 1] = self._tokens[unk_ix]\n tokens[N - 1].count += old_count\n word2idx[\"\"] = N - 1\n idx2word[N - 1] = \"\"\n\n # ... and recode all dropped tokens as \"\"\n for tt in tokens[N:]:\n tokens[unk_ix].count += tt.count\n\n self._tokens = tokens[:N]\n self.token2idx = word2idx\n self.idx2token = idx2word\n\n assert len(self._tokens) <= N\n\n def _drop_low_freq_tokens(self):\n \"\"\"\n Replace all tokens that occur less than `min_count` with the ``\n token.\n \"\"\"\n unk_idx = 0\n unk_token = self._tokens[self.token2idx[\"\"]]\n eol_token = self._tokens[self.token2idx[\"\"]]\n bol_token = self._tokens[self.token2idx[\"\"]]\n\n H = self.hyperparameters\n tokens = [unk_token, eol_token, bol_token]\n word2idx = {\"\": 0, \"\": 1, \"\": 2}\n idx2word = {0: \"\", 1: \"\", 2: \"\"}\n special = {\"\", \"\", \"\"}\n\n for tt in self._tokens:\n if tt.word not in special:\n if tt.count < H[\"min_count\"]:\n tokens[unk_idx].count += tt.count\n else:\n word2idx[tt.word] = len(tokens)\n idx2word[len(tokens)] = tt.word\n tokens.append(tt)\n\n self._tokens = tokens\n self.token2idx = word2idx\n self.idx2token = idx2word\n", "output": ["tokenize_chars", "remove_stop_words", "strip_punctuation", "ngrams", "tokenize_whitespace", "bytes_to_chars", "tokenize_words", "tokenize_bytes_raw", "tokenize_words_bytes", "Token", "Node", "Vocabulary", "BytePairEncoder", "TFIDFEncoder", "HuffmanEncoder"], "metadata": {"file_path": "numpyml-master/raw/numpyml/numpy-ml-master/numpy_ml/preprocessing/nlp.py", "file_length": 14055, "symbol_dict": [{"symbol": "bytes_to_chars", "type": "mannual_defined_function", "byte_location": 5821, "location": 1767}, {"symbol": "tokenize_words", "type": "mannual_defined_function", "byte_location": 3729, "location": 1125}, {"symbol": "tokenize_chars", "type": "mannual_defined_function", "byte_location": 6191, "location": 1899}, {"symbol": "remove_stop_words", "type": "mannual_defined_function", "byte_location": 6584, "location": 2018}, {"symbol": "strip_punctuation", "type": "mannual_defined_function", "byte_location": 6734, "location": 2067}, {"symbol": "tokenize_words_bytes", "type": "mannual_defined_function", "byte_location": 4163, "location": 1266}, {"symbol": "ngrams", "type": "mannual_defined_function", "byte_location": 3101, "location": 927}, {"symbol": "tokenize_bytes_raw", "type": "mannual_defined_function", "byte_location": 4761, "location": 1452}, {"symbol": "tokenize_whitespace", "type": "mannual_defined_function", "byte_location": 3263, "location": 985}, {"symbol": "Token", "type": "mannual_defined_class", "byte_location": 20561, "location": 5907}, {"symbol": "HuffmanEncoder", "type": "mannual_defined_class", "byte_location": 15768, "location": 4628}, {"symbol": "Node", "type": "mannual_defined_class", "byte_location": 14975, "location": 4393}, {"symbol": "TFIDFEncoder", "type": "mannual_defined_class", "byte_location": 20804, "location": 5986}, {"symbol": "BytePairEncoder", "type": "mannual_defined_class", "byte_location": 7073, "location": 2137}, {"symbol": "Vocabulary", "type": "mannual_defined_class", "byte_location": 36666, "location": 10845}]}} {"input": "\"\"\"\nDefines helper methods useful for loading and caching Interface examples.\n\"\"\"\nfrom __future__ import annotations\n\nimport ast\nimport csv\nimport inspect\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nimport warnings\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Iterable, Literal, Optional\n\nimport numpy as np\nimport PIL\nimport PIL.Image\nfrom gradio_client import utils as client_utils\nfrom gradio_client.documentation import document, set_documentation_group\n\nfrom gradio import components, oauth, processing_utils, routes, utils, wasm_utils\nfrom gradio.context import Context, LocalContext\nfrom gradio.data_classes import GradioModel, GradioRootModel\nfrom gradio.events import EventData\nfrom gradio.exceptions import Error\nfrom gradio.flagging import CSVLogger\n\nif TYPE_CHECKING: # Only import for type checking (to avoid circular imports).\n from gradio.components import Component\n\nLOG_FILE = \"log.csv\"\n\nset_documentation_group(\"helpers\")\n\n\ndef create_examples(\n examples: list[Any] | list[list[Any]] | str,\n inputs: Component | list[Component],\n outputs: Component | list[Component] | None = None,\n fn: Callable | None = None,\n cache_examples: bool = False,\n examples_per_page: int = 10,\n _api_mode: bool = False,\n label: str | None = None,\n elem_id: str | None = None,\n run_on_click: bool = False,\n preprocess: bool = True,\n postprocess: bool = True,\n api_name: str | Literal[False] = \"load_example\",\n batch: bool = False,\n):\n \"\"\"Top-level synchronous function that creates Examples. Provided for backwards compatibility, i.e. so that gr.Examples(...) can be used to create the Examples component.\"\"\"\n examples_obj = Examples(\n examples=examples,\n inputs=inputs,\n outputs=outputs,\n fn=fn,\n cache_examples=cache_examples,\n examples_per_page=examples_per_page,\n _api_mode=_api_mode,\n label=label,\n elem_id=elem_id,\n run_on_click=run_on_click,\n preprocess=preprocess,\n postprocess=postprocess,\n api_name=api_name,\n batch=batch,\n _initiated_directly=False,\n )\n examples_obj.create()\n return examples_obj\n\n\n@document()\nclass Examples:\n \"\"\"\n This class is a wrapper over the Dataset component and can be used to create Examples\n for Blocks / Interfaces. Populates the Dataset component with examples and\n assigns event listener so that clicking on an example populates the input/output\n components. Optionally handles example caching for fast inference.\n\n Demos: blocks_inputs, fake_gan\n Guides: more-on-examples-and-flagging, using-hugging-face-integrations, image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, create-your-own-friends-with-a-gan\n \"\"\"\n\n def __init__(\n self,\n examples: list[Any] | list[list[Any]] | str,\n inputs: Component | list[Component],\n outputs: Component | list[Component] | None = None,\n fn: Callable | None = None,\n cache_examples: bool = False,\n examples_per_page: int = 10,\n _api_mode: bool = False,\n label: str | None = \"Examples\",\n elem_id: str | None = None,\n run_on_click: bool = False,\n preprocess: bool = True,\n postprocess: bool = True,\n api_name: str | Literal[False] = \"load_example\",\n batch: bool = False,\n _initiated_directly: bool = True,\n ):\n \"\"\"\n Parameters:\n examples: example inputs that can be clicked to populate specific components. Should be nested list, in which the outer list consists of samples and each inner list consists of an input corresponding to each input component. A string path to a directory of examples can also be provided but it should be within the directory with the python file running the gradio app. If there are multiple input components and a directory is provided, a log.csv file must be present in the directory to link corresponding inputs.\n inputs: the component or list of components corresponding to the examples\n outputs: optionally, provide the component or list of components corresponding to the output of the examples. Required if `cache_examples` is True.\n fn: optionally, provide the function to run to generate the outputs corresponding to the examples. Required if `cache_examples` is True.\n cache_examples: if True, caches examples for fast runtime. If True, then `fn` and `outputs` must be provided. If `fn` is a generator function, then the last yielded value will be used as the output.\n examples_per_page: how many examples to show per page.\n label: the label to use for the examples component (by default, \"Examples\")\n elem_id: an optional string that is assigned as the id of this component in the HTML DOM.\n run_on_click: if cache_examples is False, clicking on an example does not run the function when an example is clicked. Set this to True to run the function when an example is clicked. Has no effect if cache_examples is True.\n preprocess: if True, preprocesses the example input before running the prediction function and caching the output. Only applies if `cache_examples` is True.\n postprocess: if True, postprocesses the example output after running the prediction function and before caching. Only applies if `cache_examples` is True.\n api_name: Defines how the event associated with clicking on the examples appears in the API docs. Can be a string or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use the example function.\n batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. Used only if cache_examples is True.\n \"\"\"\n if _initiated_directly:\n warnings.warn(\n \"Please use gr.Examples(...) instead of gr.examples.Examples(...) to create the Examples.\",\n )\n\n if cache_examples and (fn is None or outputs is None):\n raise ValueError(\"If caching examples, `fn` and `outputs` must be provided\")\n\n if not isinstance(inputs, list):\n inputs = [inputs]\n if outputs and not isinstance(outputs, list):\n outputs = [outputs]\n\n working_directory = Path().absolute()\n\n if examples is None:\n raise ValueError(\"The parameter `examples` cannot be None\")\n elif isinstance(examples, list) and (\n len(examples) == 0 or isinstance(examples[0], list)\n ):\n pass\n elif (\n isinstance(examples, list) and len(inputs) == 1\n ): # If there is only one input component, examples can be provided as a regular list instead of a list of lists\n examples = [[e] for e in examples]\n elif isinstance(examples, str):\n if not Path(examples).exists():\n raise FileNotFoundError(\n f\"Could not find examples directory: {examples}\"\n )\n working_directory = examples\n if not (Path(examples) / LOG_FILE).exists():\n if len(inputs) == 1:\n examples = [[e] for e in os.listdir(examples)]\n else:\n raise FileNotFoundError(\n \"Could not find log file (required for multiple inputs): \"\n + LOG_FILE\n )\n else:\n with open(Path(examples) / LOG_FILE) as logs:\n examples = list(csv.reader(logs))\n examples = [\n examples[i][: len(inputs)] for i in range(1, len(examples))\n ] # remove header and unnecessary columns\n\n else:\n raise ValueError(\n \"The parameter `examples` must either be a string directory or a list\"\n \"(if there is only 1 input component) or (more generally), a nested \"\n \"list, where each sublist represents a set of inputs.\"\n )\n\n input_has_examples = [False] * len(inputs)\n for example in examples:\n for idx, example_for_input in enumerate(example):\n if example_for_input is not None:\n try:\n input_has_examples[idx] = True\n except IndexError:\n pass # If there are more example components than inputs, ignore. This can sometimes be intentional (e.g. loading from a log file where outputs and timestamps are also logged)\n\n inputs_with_examples = [\n inp for (inp, keep) in zip(inputs, input_has_examples) if keep\n ]\n non_none_examples = [\n [ex for (ex, keep) in zip(example, input_has_examples) if keep]\n for example in examples\n ]\n\n self.examples = examples\n self.non_none_examples = non_none_examples\n self.inputs = inputs\n self.inputs_with_examples = inputs_with_examples\n self.outputs = outputs or []\n self.fn = fn\n self.cache_examples = cache_examples\n self._api_mode = _api_mode\n self.preprocess = preprocess\n self.postprocess = postprocess\n self.api_name: str | Literal[False] = api_name\n self.batch = batch\n\n with utils.set_directory(working_directory):\n self.processed_examples = []\n for example in examples:\n sub = []\n for component, sample in zip(inputs, example):\n prediction_value = component.postprocess(sample)\n if isinstance(prediction_value, (GradioRootModel, GradioModel)):\n prediction_value = prediction_value.model_dump()\n prediction_value = processing_utils.move_files_to_cache(\n prediction_value, component, postprocess=True\n )\n sub.append(prediction_value)\n self.processed_examples.append(sub)\n\n self.non_none_processed_examples = [\n [ex for (ex, keep) in zip(example, input_has_examples) if keep]\n for example in self.processed_examples\n ]\n if cache_examples:\n for example in self.examples:\n if len([ex for ex in example if ex is not None]) != len(self.inputs):\n warnings.warn(\n \"Examples are being cached but not all input components have \"\n \"example values. This may result in an exception being thrown by \"\n \"your function. If you do get an error while caching examples, make \"\n \"sure all of your inputs have example values for all of your examples \"\n \"or you provide default values for those particular parameters in your function.\"\n )\n break\n\n from gradio import components\n\n with utils.set_directory(working_directory):\n self.dataset = components.Dataset(\n components=inputs_with_examples,\n samples=non_none_examples,\n type=\"index\",\n label=label,\n samples_per_page=examples_per_page,\n elem_id=elem_id,\n )\n\n self.cached_folder = utils.get_cache_folder() / str(self.dataset._id)\n self.cached_file = Path(self.cached_folder) / \"log.csv\"\n self.cache_examples = cache_examples\n self.run_on_click = run_on_click\n\n def create(self) -> None:\n \"\"\"Caches the examples if self.cache_examples is True and creates the Dataset\n component to hold the examples\"\"\"\n\n async def load_example(example_id):\n processed_example = self.non_none_processed_examples[example_id]\n if len(self.inputs_with_examples) == 1:\n return update(\n value=processed_example[0], **self.dataset.component_props[0]\n )\n return [\n update(value=processed_example[i], **self.dataset.component_props[i])\n for i in range(len(self.inputs_with_examples))\n ]\n\n if Context.root_block:\n self.load_input_event = self.dataset.click(\n load_example,\n inputs=[self.dataset],\n outputs=self.inputs_with_examples, # type: ignore\n show_progress=\"hidden\",\n postprocess=False,\n queue=False,\n api_name=self.api_name,\n show_api=False,\n )\n if self.run_on_click and not self.cache_examples:\n if self.fn is None:\n raise ValueError(\"Cannot run_on_click if no function is provided\")\n self.load_input_event.then(\n self.fn,\n inputs=self.inputs, # type: ignore\n outputs=self.outputs, # type: ignore\n show_api=False,\n )\n\n if self.cache_examples:\n if wasm_utils.IS_WASM:\n # In the Wasm mode, the `threading` module is not supported,\n # so `client_utils.synchronize_async` is also not available.\n # And `self.cache()` should be waited for to complete before this method returns,\n # (otherwise, an error \"Cannot cache examples if not in a Blocks context\" will be raised anyway)\n # so `eventloop.create_task(self.cache())` is also not an option.\n raise wasm_utils.WasmUnsupportedError(\n \"Caching examples is not supported in the Wasm mode.\"\n )\n client_utils.synchronize_async(self.cache)\n\n async def cache(self) -> None:\n \"\"\"\n Caches all of the examples so that their predictions can be shown immediately.\n \"\"\"\n if Context.root_block is None:\n raise ValueError(\"Cannot cache examples if not in a Blocks context\")\n if Path(self.cached_file).exists():\n print(\n f\"Using cache from '{utils.abspath(self.cached_folder)}' directory. If method or examples have changed since last caching, delete this folder to clear cache.\\n\"\n )\n else:\n print(f\"Caching examples at: '{utils.abspath(self.cached_folder)}'\")\n cache_logger = CSVLogger()\n\n generated_values = []\n if inspect.isgeneratorfunction(self.fn):\n\n def get_final_item(*args): # type: ignore\n x = None\n generated_values.clear()\n for x in self.fn(*args): # noqa: B007 # type: ignore\n generated_values.append(x)\n return x\n\n fn = get_final_item\n elif inspect.isasyncgenfunction(self.fn):\n\n async def get_final_item(*args):\n x = None\n generated_values.clear()\n async for x in self.fn(*args): # noqa: B007 # type: ignore\n generated_values.append(x)\n return x\n\n fn = get_final_item\n else:\n fn = self.fn\n\n # create a fake dependency to process the examples and get the predictions\n from gradio.events import EventListenerMethod\n\n dependency, fn_index = Context.root_block.set_event_trigger(\n [EventListenerMethod(Context.root_block, \"load\")],\n fn=fn,\n inputs=self.inputs_with_examples, # type: ignore\n outputs=self.outputs, # type: ignore\n preprocess=self.preprocess and not self._api_mode,\n postprocess=self.postprocess and not self._api_mode,\n batch=self.batch,\n )\n\n assert self.outputs is not None\n cache_logger.setup(self.outputs, self.cached_folder)\n for example_id, _ in enumerate(self.examples):\n print(f\"Caching example {example_id + 1}/{len(self.examples)}\")\n processed_input = self.processed_examples[example_id]\n if self.batch:\n processed_input = [[value] for value in processed_input]\n with utils.MatplotlibBackendMananger():\n prediction = await Context.root_block.process_api(\n fn_index=fn_index,\n inputs=processed_input,\n request=None,\n )\n output = prediction[\"data\"]\n if len(generated_values):\n output = merge_generated_values_into_output(\n self.outputs, generated_values, output\n )\n\n if self.batch:\n output = [value[0] for value in output]\n cache_logger.flag(output)\n # Remove the \"fake_event\" to prevent bugs in loading interfaces from spaces\n Context.root_block.dependencies.remove(dependency)\n Context.root_block.fns.pop(fn_index)\n\n # Remove the original load_input_event and replace it with one that\n # also populates the input. We do it this way to to allow the cache()\n # method to be called independently of the create() method\n index = Context.root_block.dependencies.index(self.load_input_event)\n Context.root_block.dependencies.pop(index)\n Context.root_block.fns.pop(index)\n\n def load_example(example_id):\n processed_example = self.non_none_processed_examples[\n example_id\n ] + self.load_from_cache(example_id)\n return utils.resolve_singleton(processed_example)\n\n self.load_input_event = self.dataset.click(\n load_example,\n inputs=[self.dataset],\n outputs=self.inputs_with_examples + self.outputs, # type: ignore\n show_progress=\"hidden\",\n postprocess=False,\n queue=False,\n api_name=self.api_name,\n show_api=False,\n )\n\n def load_from_cache(self, example_id: int) -> list[Any]:\n \"\"\"Loads a particular cached example for the interface.\n Parameters:\n example_id: The id of the example to process (zero-indexed).\n \"\"\"\n with open(self.cached_file, encoding=\"utf-8\") as cache:\n examples = list(csv.reader(cache))\n example = examples[example_id + 1] # +1 to adjust for header\n output = []\n assert self.outputs is not None\n for component, value in zip(self.outputs, example):\n value_to_use = value\n try:\n value_as_dict = ast.literal_eval(value)\n # File components that output multiple files get saved as a python list\n # need to pass the parsed list to serialize\n # TODO: Better file serialization in 4.0\n if isinstance(value_as_dict, list) and isinstance(\n component, components.File\n ):\n value_to_use = value_as_dict\n assert utils.is_update(value_as_dict)\n output.append(value_as_dict)\n except (ValueError, TypeError, SyntaxError, AssertionError):\n output.append(\n component.read_from_flag(\n value_to_use,\n self.cached_folder,\n )\n )\n return output\n\n\ndef merge_generated_values_into_output(\n components: list[Component], generated_values: list, output: list\n):\n from gradio.components.base import StreamingOutput\n\n for output_index, output_component in enumerate(components):\n if isinstance(output_component, StreamingOutput) and output_component.streaming:\n binary_chunks = []\n for i, chunk in enumerate(generated_values):\n if len(components) > 1:\n chunk = chunk[output_index]\n processed_chunk = output_component.postprocess(chunk)\n if isinstance(processed_chunk, (GradioModel, GradioRootModel)):\n processed_chunk = processed_chunk.model_dump()\n binary_chunks.append(\n output_component.stream_output(processed_chunk, \"\", i == 0)[0]\n )\n binary_data = b\"\".join(binary_chunks)\n tempdir = os.environ.get(\"GRADIO_TEMP_DIR\") or str(\n Path(tempfile.gettempdir()) / \"gradio\"\n )\n os.makedirs(tempdir, exist_ok=True)\n temp_file = tempfile.NamedTemporaryFile(dir=tempdir, delete=False)\n with open(temp_file.name, \"wb\") as f:\n f.write(binary_data)\n\n output[output_index] = {\n \"path\": temp_file.name,\n }\n\n return output\n\n\nclass TrackedIterable:\n def __init__(\n self,\n iterable: Iterable | None,\n index: int | None,\n length: int | None,\n desc: str | None,\n unit: str | None,\n _tqdm=None,\n progress: float | None = None,\n ) -> None:\n self.iterable = iterable\n self.index = index\n self.length = length\n self.desc = desc\n self.unit = unit\n self._tqdm = _tqdm\n self.progress = progress\n\n\n@document(\"__call__\", \"tqdm\")\nclass Progress(Iterable):\n \"\"\"\n The Progress class provides a custom progress tracker that is used in a function signature.\n To attach a Progress tracker to a function, simply add a parameter right after the input parameters that has a default value set to a `gradio.Progress()` instance.\n The Progress tracker can then be updated in the function by calling the Progress object or using the `tqdm` method on an Iterable.\n The Progress tracker is currently only available with `queue()`.\n Example:\n import gradio as gr\n import time\n def my_function(x, progress=gr.Progress()):\n progress(0, desc=\"Starting...\")\n time.sleep(1)\n for i in progress.tqdm(range(100)):\n time.sleep(0.1)\n return x\n gr.Interface(my_function, gr.Textbox(), gr.Textbox()).queue().launch()\n Demos: progress\n \"\"\"\n\n def __init__(\n self,\n track_tqdm: bool = False,\n ):\n \"\"\"\n Parameters:\n track_tqdm: If True, the Progress object will track any tqdm.tqdm iterations with the tqdm library in the function.\n \"\"\"\n if track_tqdm:\n patch_tqdm()\n self.track_tqdm = track_tqdm\n self.iterables: list[TrackedIterable] = []\n\n def __len__(self):\n return self.iterables[-1].length\n\n def __iter__(self):\n return self\n\n def __next__(self):\n \"\"\"\n Updates progress tracker with next item in iterable.\n \"\"\"\n callback = self._progress_callback()\n if callback:\n current_iterable = self.iterables[-1]\n while (\n not hasattr(current_iterable.iterable, \"__next__\")\n and len(self.iterables) > 0\n ):\n current_iterable = self.iterables.pop()\n callback(self.iterables)\n if current_iterable.index is None:\n raise IndexError(\"Index not set.\")\n current_iterable.index += 1\n try:\n return next(current_iterable.iterable) # type: ignore\n except StopIteration:\n self.iterables.pop()\n raise\n else:\n return self\n\n def __call__(\n self,\n progress: float | tuple[int, int | None] | None,\n desc: str | None = None,\n total: int | None = None,\n unit: str = \"steps\",\n _tqdm=None,\n ):\n \"\"\"\n Updates progress tracker with progress and message text.\n Parameters:\n progress: If float, should be between 0 and 1 representing completion. If Tuple, first number represents steps completed, and second value represents total steps or None if unknown. If None, hides progress bar.\n desc: description to display.\n total: estimated total number of steps.\n unit: unit of iterations.\n \"\"\"\n callback = self._progress_callback()\n if callback:\n if isinstance(progress, tuple):\n index, total = progress\n progress = None\n else:\n index = None\n callback(\n self.iterables\n + [TrackedIterable(None, index, total, desc, unit, _tqdm, progress)]\n )\n else:\n return progress\n\n def tqdm(\n self,\n iterable: Iterable | None,\n desc: str | None = None,\n total: int | None = None,\n unit: str = \"steps\",\n _tqdm=None,\n ):\n \"\"\"\n Attaches progress tracker to iterable, like tqdm.\n Parameters:\n iterable: iterable to attach progress tracker to.\n desc: description to display.\n total: estimated total number of steps.\n unit: unit of iterations.\n \"\"\"\n callback = self._progress_callback()\n if callback:\n if iterable is None:\n new_iterable = TrackedIterable(None, 0, total, desc, unit, _tqdm)\n self.iterables.append(new_iterable)\n callback(self.iterables)\n return self\n length = len(iterable) if hasattr(iterable, \"__len__\") else None # type: ignore\n self.iterables.append(\n TrackedIterable(iter(iterable), 0, length, desc, unit, _tqdm)\n )\n return self\n\n def update(self, n=1):\n \"\"\"\n Increases latest iterable with specified number of steps.\n Parameters:\n n: number of steps completed.\n \"\"\"\n callback = self._progress_callback()\n if callback and len(self.iterables) > 0:\n current_iterable = self.iterables[-1]\n if current_iterable.index is None:\n raise IndexError(\"Index not set.\")\n current_iterable.index += n\n callback(self.iterables)\n else:\n return\n\n def close(self, _tqdm):\n \"\"\"\n Removes iterable with given _tqdm.\n \"\"\"\n callback = self._progress_callback()\n if callback:\n for i in range(len(self.iterables)):\n if id(self.iterables[i]._tqdm) == id(_tqdm):\n self.iterables.pop(i)\n break\n callback(self.iterables)\n else:\n return\n\n @staticmethod\n def _progress_callback():\n blocks = LocalContext.blocks.get()\n event_id = LocalContext.event_id.get()\n if not (blocks and event_id):\n return None\n return partial(blocks._queue.set_progress, event_id)\n\n\ndef patch_tqdm() -> None:\n try:\n _tqdm = __import__(\"tqdm\")\n except ModuleNotFoundError:\n return\n\n def init_tqdm(\n self, iterable=None, desc=None, total=None, unit=\"steps\", *args, **kwargs\n ):\n self._progress = LocalContext.progress.get()\n if self._progress is not None:\n self._progress.tqdm(iterable, desc, total, unit, _tqdm=self)\n kwargs[\"file\"] = open(os.devnull, \"w\") # noqa: SIM115\n self.__init__orig__(iterable, desc, total, *args, unit=unit, **kwargs)\n\n def iter_tqdm(self):\n if self._progress is not None:\n return self._progress\n return self.__iter__orig__()\n\n def update_tqdm(self, n=1):\n if self._progress is not None:\n self._progress.update(n)\n return self.__update__orig__(n)\n\n def close_tqdm(self):\n if self._progress is not None:\n self._progress.close(self)\n return self.__close__orig__()\n\n def exit_tqdm(self, exc_type, exc_value, traceback):\n if self._progress is not None:\n self._progress.close(self)\n return self.__exit__orig__(exc_type, exc_value, traceback)\n\n # Backup\n if not hasattr(_tqdm.tqdm, \"__init__orig__\"):\n _tqdm.tqdm.__init__orig__ = _tqdm.tqdm.__init__\n if not hasattr(_tqdm.tqdm, \"__update__orig__\"):\n _tqdm.tqdm.__update__orig__ = _tqdm.tqdm.update\n if not hasattr(_tqdm.tqdm, \"__close__orig__\"):\n _tqdm.tqdm.__close__orig__ = _tqdm.tqdm.close\n if not hasattr(_tqdm.tqdm, \"__exit__orig__\"):\n _tqdm.tqdm.__exit__orig__ = _tqdm.tqdm.__exit__\n if not hasattr(_tqdm.tqdm, \"__iter__orig__\"):\n _tqdm.tqdm.__iter__orig__ = _tqdm.tqdm.__iter__\n\n # Patch\n _tqdm.tqdm.__init__ = init_tqdm\n _tqdm.tqdm.update = update_tqdm\n _tqdm.tqdm.close = close_tqdm\n _tqdm.tqdm.__exit__ = exit_tqdm\n _tqdm.tqdm.__iter__ = iter_tqdm\n\n if hasattr(_tqdm, \"auto\") and hasattr(_tqdm.auto, \"tqdm\"):\n _tqdm.auto.tqdm = _tqdm.tqdm\n\n\ndef create_tracker(fn, track_tqdm):\n progress = Progress(track_tqdm=track_tqdm)\n if not track_tqdm:\n return progress, fn\n return progress, utils.function_wrapper(\n f=fn,\n before_fn=LocalContext.progress.set,\n before_args=(progress,),\n after_fn=LocalContext.progress.set,\n after_args=(None,),\n )\n\n\ndef special_args(\n fn: Callable,\n inputs: list[Any] | None = None,\n request: routes.Request | None = None,\n event_data: EventData | None = None,\n) -> tuple[list, int | None, int | None]:\n \"\"\"\n Checks if function has special arguments Request or EventData (via annotation) or Progress (via default value).\n If inputs is provided, these values will be loaded into the inputs array.\n Parameters:\n fn: function to check.\n inputs: array to load special arguments into.\n request: request to load into inputs.\n event_data: event-related data to load into inputs.\n Returns:\n updated inputs, progress index, event data index.\n \"\"\"\n try:\n signature = inspect.signature(fn)\n except ValueError:\n return inputs or [], None, None\n type_hints = utils.get_type_hints(fn)\n positional_args = []\n for param in signature.parameters.values():\n if param.kind not in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD):\n break\n positional_args.append(param)\n progress_index = None\n event_data_index = None\n for i, param in enumerate(positional_args):\n type_hint = type_hints.get(param.name)\n if isinstance(param.default, Progress):\n progress_index = i\n if inputs is not None:\n inputs.insert(i, param.default)\n elif type_hint == routes.Request:\n if inputs is not None:\n inputs.insert(i, request)\n elif (\n type_hint == Optional[oauth.OAuthProfile]\n or type_hint == oauth.OAuthProfile\n or type_hint == Optional[oauth.OAuthToken]\n or type_hint == oauth.OAuthToken\n # Note: \"OAuthProfile | None\" is equals to Optional[OAuthProfile] in Python\n # => it is automatically handled as well by the above condition\n # (adding explicit \"OAuthProfile | None\" would break in Python3.9)\n # (same for \"OAuthToken\")\n ):\n if inputs is not None:\n # Retrieve session from gr.Request, if it exists (i.e. if user is logged in)\n session = (\n # request.session (if fastapi.Request obj i.e. direct call)\n getattr(request, \"session\", {})\n or\n # or request.request.session (if gr.Request obj i.e. websocket call)\n getattr(getattr(request, \"request\", None), \"session\", {})\n )\n\n # Inject user profile\n if (\n type_hint == Optional[oauth.OAuthProfile]\n or type_hint == oauth.OAuthProfile\n ):\n oauth_profile = (\n session[\"oauth_info\"][\"userinfo\"]\n if \"oauth_info\" in session\n else None\n )\n if oauth_profile is not None:\n oauth_profile = oauth.OAuthProfile(oauth_profile)\n elif type_hint == oauth.OAuthProfile:\n raise Error(\n \"This action requires a logged in user. Please sign in and retry.\"\n )\n inputs.insert(i, oauth_profile)\n\n # Inject user token\n elif (\n type_hint == Optional[oauth.OAuthToken]\n or type_hint == oauth.OAuthToken\n ):\n oauth_info = (\n session[\"oauth_info\"] if \"oauth_info\" in session else None\n )\n oauth_token = (\n oauth.OAuthToken(\n token=oauth_info[\"access_token\"],\n scope=oauth_info[\"scope\"],\n expires_at=oauth_info[\"expires_at\"],\n )\n if oauth_info is not None\n else None\n )\n if oauth_token is None and type_hint == oauth.OAuthToken:\n raise Error(\n \"This action requires a logged in user. Please sign in and retry.\"\n )\n inputs.insert(i, oauth_token)\n elif (\n type_hint\n and inspect.isclass(type_hint)\n and issubclass(type_hint, EventData)\n ):\n event_data_index = i\n if inputs is not None and event_data is not None:\n inputs.insert(i, type_hint(event_data.target, event_data._data))\n elif (\n param.default is not param.empty and inputs is not None and len(inputs) <= i\n ):\n inputs.insert(i, param.default)\n if inputs is not None:\n while len(inputs) < len(positional_args):\n i = len(inputs)\n param = positional_args[i]\n if param.default == param.empty:\n warnings.warn(\"Unexpected argument. Filling with None.\")\n inputs.append(None)\n else:\n inputs.append(param.default)\n return inputs or [], progress_index, event_data_index\n\n\ndef update(\n elem_id: str | None = None,\n elem_classes: list[str] | str | None = None,\n visible: bool | None = None,\n **kwargs,\n) -> dict:\n \"\"\"\n Updates a component's properties. When a function passed into a Gradio Interface or a Blocks events returns a value, it typically updates the value of the output component. But it is also possible to update the *properties* of an output component (such as the number of lines of a `Textbox` or the visibility of an `Row`) by returning a component and passing in the parameters to update in the constructor of the component. Alternatively, you can return `gr.update(...)` with any arbitrary parameters to update. (This is useful as a shorthand or if the same function can be called with different components to update.)\n\n Parameters:\n elem_id: Use this to update the id of the component in the HTML DOM\n elem_classes: Use this to update the classes of the component in the HTML DOM\n visible: Use this to update the visibility of the component\n kwargs: Any other keyword arguments to update the component's properties.\n Example:\n import gradio as gr\n with gr.Blocks() as demo:\n radio = gr.Radio([1, 2, 4], label=\"Set the value of the number\")\n number = gr.Number(value=2, interactive=True)\n radio.change(fn=lambda value: gr.update(value=value), inputs=radio, outputs=number)\n demo.launch()\n \"\"\"\n kwargs[\"__type__\"] = \"update\"\n if elem_id is not None:\n kwargs[\"elem_id\"] = elem_id\n if elem_classes is not None:\n kwargs[\"elem_classes\"] = elem_classes\n if visible is not None:\n kwargs[\"visible\"] = visible\n return kwargs\n\n\ndef skip() -> dict:\n return {\"__type__\": \"update\"}\n\n\n@document()\ndef make_waveform(\n audio: str | tuple[int, np.ndarray],\n *,\n bg_color: str = \"#f3f4f6\",\n bg_image: str | None = None,\n fg_alpha: float = 0.75,\n bars_color: str | tuple[str, str] = (\"#fbbf24\", \"#ea580c\"),\n bar_count: int = 50,\n bar_width: float = 0.6,\n animate: bool = False,\n) -> str:\n \"\"\"\n Generates a waveform video from an audio file. Useful for creating an easy to share audio visualization. The output should be passed into a `gr.Video` component.\n Parameters:\n audio: Audio file path or tuple of (sample_rate, audio_data)\n bg_color: Background color of waveform (ignored if bg_image is provided)\n bg_image: Background image of waveform\n fg_alpha: Opacity of foreground waveform\n bars_color: Color of waveform bars. Can be a single color or a tuple of (start_color, end_color) of gradient\n bar_count: Number of bars in waveform\n bar_width: Width of bars in waveform. 1 represents full width, 0.5 represents half width, etc.\n animate: If true, the audio waveform overlay will be animated, if false, it will be static.\n Returns:\n A filepath to the output video in mp4 format.\n \"\"\"\n import matplotlib.pyplot as plt\n from matplotlib.animation import FuncAnimation\n\n if isinstance(audio, str):\n audio_file = audio\n audio = processing_utils.audio_from_file(audio)\n else:\n tmp_wav = tempfile.NamedTemporaryFile(suffix=\".wav\", delete=False)\n processing_utils.audio_to_file(audio[0], audio[1], tmp_wav.name, format=\"wav\")\n audio_file = tmp_wav.name\n\n if not os.path.isfile(audio_file):\n raise ValueError(\"Audio file not found.\")\n\n ffmpeg = shutil.which(\"ffmpeg\")\n if not ffmpeg:\n raise RuntimeError(\"ffmpeg not found.\")\n\n duration = round(len(audio[1]) / audio[0], 4)\n\n # Helper methods to create waveform\n def hex_to_rgb(hex_str):\n return [int(hex_str[i : i + 2], 16) for i in range(1, 6, 2)]\n\n def get_color_gradient(c1, c2, n):\n assert n > 1\n c1_rgb = np.array(hex_to_rgb(c1)) / 255\n c2_rgb = np.array(hex_to_rgb(c2)) / 255\n mix_pcts = [x / (n - 1) for x in range(n)]\n rgb_colors = [((1 - mix) * c1_rgb + (mix * c2_rgb)) for mix in mix_pcts]\n return [\n \"#\" + \"\".join(f\"{int(round(val * 255)):02x}\" for val in item)\n for item in rgb_colors\n ]\n\n # Reshape audio to have a fixed number of bars\n samples = audio[1]\n if len(samples.shape) > 1:\n samples = np.mean(samples, 1)\n bins_to_pad = bar_count - (len(samples) % bar_count)\n samples = np.pad(samples, [(0, bins_to_pad)])\n samples = np.reshape(samples, (bar_count, -1))\n samples = np.abs(samples)\n samples = np.max(samples, 1)\n\n with utils.MatplotlibBackendMananger():\n plt.clf()\n # Plot waveform\n color = (\n bars_color\n if isinstance(bars_color, str)\n else get_color_gradient(bars_color[0], bars_color[1], bar_count)\n )\n\n if animate:\n fig = plt.figure(figsize=(5, 1), dpi=200, frameon=False)\n fig.subplots_adjust(left=0, bottom=0, right=1, top=1)\n plt.axis(\"off\")\n plt.margins(x=0)\n\n bar_alpha = fg_alpha if animate else 1.0\n barcollection = plt.bar(\n np.arange(0, bar_count),\n samples * 2,\n bottom=(-1 * samples),\n width=bar_width,\n color=color,\n alpha=bar_alpha,\n )\n\n tmp_img = tempfile.NamedTemporaryFile(suffix=\".png\", delete=False)\n\n savefig_kwargs: dict[str, Any] = {\"bbox_inches\": \"tight\"}\n if bg_image is not None:\n savefig_kwargs[\"transparent\"] = True\n if animate:\n savefig_kwargs[\"facecolor\"] = \"none\"\n else:\n savefig_kwargs[\"facecolor\"] = bg_color\n plt.savefig(tmp_img.name, **savefig_kwargs)\n\n if not animate:\n waveform_img = PIL.Image.open(tmp_img.name)\n waveform_img = waveform_img.resize((1000, 400))\n\n # Composite waveform with background image\n if bg_image is not None:\n waveform_array = np.array(waveform_img)\n waveform_array[:, :, 3] = waveform_array[:, :, 3] * fg_alpha\n waveform_img = PIL.Image.fromarray(waveform_array)\n\n bg_img = PIL.Image.open(bg_image)\n waveform_width, waveform_height = waveform_img.size\n bg_width, bg_height = bg_img.size\n if waveform_width != bg_width:\n bg_img = bg_img.resize(\n (\n waveform_width,\n 2 * int(bg_height * waveform_width / bg_width / 2),\n )\n )\n bg_width, bg_height = bg_img.size\n composite_height = max(bg_height, waveform_height)\n composite = PIL.Image.new(\n \"RGBA\", (waveform_width, composite_height), \"#FFFFFF\"\n )\n composite.paste(bg_img, (0, composite_height - bg_height))\n composite.paste(\n waveform_img, (0, composite_height - waveform_height), waveform_img\n )\n composite.save(tmp_img.name)\n img_width, img_height = composite.size\n else:\n img_width, img_height = waveform_img.size\n waveform_img.save(tmp_img.name)\n else:\n\n def _animate(_):\n for idx, b in enumerate(barcollection):\n rand_height = np.random.uniform(0.8, 1.2)\n b.set_height(samples[idx] * rand_height * 2)\n b.set_y((-rand_height * samples)[idx])\n\n frames = int(duration * 10)\n anim = FuncAnimation(\n fig, # type: ignore\n _animate,\n repeat=False,\n blit=False,\n frames=frames,\n interval=100,\n )\n anim.save(\n tmp_img.name,\n writer=\"pillow\",\n fps=10,\n codec=\"png\",\n savefig_kwargs=savefig_kwargs,\n )\n\n # Convert waveform to video with ffmpeg\n output_mp4 = tempfile.NamedTemporaryFile(suffix=\".mp4\", delete=False)\n\n if animate and bg_image is not None:\n ffmpeg_cmd = [\n ffmpeg,\n \"-loop\",\n \"1\",\n \"-i\",\n bg_image,\n \"-i\",\n tmp_img.name,\n \"-i\",\n audio_file,\n \"-filter_complex\",\n \"[0:v]scale=w=trunc(iw/2)*2:h=trunc(ih/2)*2[bg];[1:v]format=rgba,colorchannelmixer=aa=1.0[ov];[bg][ov]overlay=(main_w-overlay_w*0.9)/2:main_h-overlay_h*0.9/2[output]\",\n \"-t\",\n str(duration),\n \"-map\",\n \"[output]\",\n \"-map\",\n \"2:a\",\n \"-c:v\",\n \"libx264\",\n \"-c:a\",\n \"aac\",\n \"-shortest\",\n \"-y\",\n output_mp4.name,\n ]\n elif animate and bg_image is None:\n ffmpeg_cmd = [\n ffmpeg,\n \"-i\",\n tmp_img.name,\n \"-i\",\n audio_file,\n \"-filter_complex\",\n \"[0:v][1:a]concat=n=1:v=1:a=1[v];[v]scale=1000:400,format=yuv420p[v_scaled]\",\n \"-map\",\n \"[v_scaled]\",\n \"-map\",\n \"1:a\",\n \"-c:v\",\n \"libx264\",\n \"-c:a\",\n \"aac\",\n \"-shortest\",\n \"-y\",\n output_mp4.name,\n ]\n else:\n ffmpeg_cmd = [\n ffmpeg,\n \"-loop\",\n \"1\",\n \"-i\",\n tmp_img.name,\n \"-i\",\n audio_file,\n \"-vf\",\n f\"color=c=#FFFFFF77:s={img_width}x{img_height}[bar];[0][bar]overlay=-w+(w/{duration})*t:H-h:shortest=1\", # type: ignore\n \"-t\",\n str(duration),\n \"-y\",\n output_mp4.name,\n ]\n\n subprocess.check_call(ffmpeg_cmd)\n return output_mp4.name\n\n\ndef log_message(message: str, level: Literal[\"info\", \"warning\"] = \"info\"):\n from gradio.context import LocalContext\n\n blocks = LocalContext.blocks.get()\n event_id = LocalContext.event_id.get()\n if blocks is None or event_id is None:\n # Function called outside of Gradio if blocks is None\n # Or from /api/predict if event_id is None\n if level == \"info\":\n print(message)\n elif level == \"warning\":\n warnings.warn(message)\n return\n blocks._queue.log_message(event_id=event_id, log=message, level=level)\n\n\nset_documentation_group(\"modals\")\n\n\n@document()\ndef Warning(message: str = \"Warning issued.\"): # noqa: N802\n \"\"\"\n This function allows you to pass custom warning messages to the user. You can do so simply by writing `gr.Warning('message here')` in your function, and when that line is executed the custom message will appear in a modal on the demo. The modal is yellow by default and has the heading: \"Warning.\" Queue must be enabled for this behavior; otherwise, the warning will be printed to the console using the `warnings` library.\n Demos: blocks_chained_events\n Parameters:\n message: The warning message to be displayed to the user.\n Example:\n import gradio as gr\n def hello_world():\n gr.Warning('This is a warning message.')\n return \"hello world\"\n with gr.Blocks() as demo:\n md = gr.Markdown()\n demo.load(hello_world, inputs=None, outputs=[md])\n demo.queue().launch()\n \"\"\"\n log_message(message, level=\"warning\")\n\n\n@document()\ndef Info(message: str = \"Info issued.\"): # noqa: N802\n \"\"\"\n This function allows you to pass custom info messages to the user. You can do so simply by writing `gr.Info('message here')` in your function, and when that line is executed the custom message will appear in a modal on the demo. The modal is gray by default and has the heading: \"Info.\" Queue must be enabled for this behavior; otherwise, the message will be printed to the console.\n Demos: blocks_chained_events\n Parameters:\n message: The info message to be displayed to the user.\n Example:\n import gradio as gr\n def hello_world():\n gr.Info('This is some info.')\n return \"hello world\"\n with gr.Blocks() as demo:\n md = gr.Markdown()\n demo.load(hello_world, inputs=None, outputs=[md])\n demo.queue().launch()\n \"\"\"\n log_message(message, level=\"info\")\n", "output": ["create_tracker", "create_examples", "skip", "update", "log_message", "patch_tqdm", "special_args", "merge_generated_values_into_output", "TrackedIterable", "Examples", "Progress"], "metadata": {"file_path": "gradio-main/gradio/helpers.py", "file_length": 12938, "symbol_dict": [{"symbol": "patch_tqdm", "type": "mannual_defined_function", "byte_location": 27262, "location": 6966}, {"symbol": "skip", "type": "mannual_defined_function", "byte_location": 36532, "location": 9622}, {"symbol": "create_examples", "type": "mannual_defined_function", "byte_location": 1019, "location": 272}, {"symbol": "log_message", "type": "mannual_defined_function", "byte_location": 44778, "location": 12265}, {"symbol": "special_args", "type": "mannual_defined_function", "byte_location": 29620, "location": 7860}, {"symbol": "merge_generated_values_into_output", "type": "mannual_defined_function", "byte_location": 19857, "location": 4992}, {"symbol": "create_tracker", "type": "mannual_defined_function", "byte_location": 29269, "location": 7744}, {"symbol": "update", "type": "mannual_defined_function", "byte_location": 34824, "location": 9177}, {"symbol": "Progress", "type": "mannual_defined_class", "byte_location": 21724, "location": 5518}, {"symbol": "Examples", "type": "mannual_defined_class", "byte_location": 2256, "location": 658}, {"symbol": "TrackedIterable", "type": "mannual_defined_class", "byte_location": 21222, "location": 5363}]}} {"input": "# coding=utf-8\n# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Testing suite for the PyTorch BLIP-2 model. \"\"\"\n\n\nimport inspect\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport requests\n\nfrom transformers import CONFIG_MAPPING, Blip2Config, Blip2QFormerConfig, Blip2VisionConfig\nfrom transformers.testing_utils import (\n require_torch,\n require_torch_multi_accelerator,\n require_vision,\n slow,\n torch_device,\n)\nfrom transformers.utils import is_torch_available, is_vision_available\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import (\n ModelTesterMixin,\n _config_zero_init,\n floats_tensor,\n ids_tensor,\n random_attention_mask,\n)\nfrom ...test_pipeline_mixin import PipelineTesterMixin\n\n\nif is_torch_available():\n import torch\n from torch import nn\n\n from transformers import Blip2ForConditionalGeneration, Blip2Model, Blip2VisionModel\n from transformers.models.blip_2.modeling_blip_2 import BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST\n\n\nif is_vision_available():\n from PIL import Image\n\n from transformers import Blip2Processor\n\n\nclass Blip2VisionModelTester:\n def __init__(\n self,\n parent,\n batch_size=12,\n image_size=30,\n patch_size=2,\n num_channels=3,\n is_training=True,\n hidden_size=32,\n projection_dim=32,\n num_hidden_layers=2,\n num_attention_heads=4,\n intermediate_size=37,\n dropout=0.1,\n attention_dropout=0.1,\n initializer_range=1e-10,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.is_training = is_training\n self.hidden_size = hidden_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.initializer_range = initializer_range\n self.scope = scope\n\n # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)\n num_patches = (image_size // patch_size) ** 2\n self.seq_length = num_patches + 1\n\n def prepare_config_and_inputs(self):\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n config = self.get_config()\n\n return config, pixel_values\n\n def get_config(self):\n return Blip2VisionConfig(\n image_size=self.image_size,\n patch_size=self.patch_size,\n num_channels=self.num_channels,\n hidden_size=self.hidden_size,\n projection_dim=self.projection_dim,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n dropout=self.dropout,\n attention_dropout=self.attention_dropout,\n initializer_range=self.initializer_range,\n )\n\n def create_and_check_model(self, config, pixel_values):\n model = Blip2VisionModel(config=config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n result = model(pixel_values)\n # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)\n image_size = (self.image_size, self.image_size)\n patch_size = (self.patch_size, self.patch_size)\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))\n self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, pixel_values = config_and_inputs\n inputs_dict = {\"pixel_values\": pixel_values}\n return config, inputs_dict\n\n\n@require_torch\nclass Blip2VisionModelTest(ModelTesterMixin, unittest.TestCase):\n \"\"\"\n Here we also overwrite some of the tests of test_modeling_common.py, as BLIP-2's vision encoder does not use input_ids, inputs_embeds,\n attention_mask and seq_length.\n \"\"\"\n\n all_model_classes = (Blip2VisionModel,) if is_torch_available() else ()\n fx_compatible = False\n test_pruning = False\n test_resize_embeddings = False\n test_head_masking = False\n\n def setUp(self):\n self.model_tester = Blip2VisionModelTester(self)\n self.config_tester = ConfigTester(\n self, config_class=Blip2VisionConfig, has_text_modality=False, hidden_size=37\n )\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n @unittest.skip(reason=\"BLIP-2's vision encoder does not use inputs_embeds\")\n def test_inputs_embeds(self):\n pass\n\n def test_model_common_attributes(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n self.assertIsInstance(model.get_input_embeddings(), (nn.Module))\n x = model.get_output_embeddings()\n self.assertTrue(x is None or isinstance(x, nn.Linear))\n\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.forward)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"pixel_values\"]\n self.assertListEqual(arg_names[:1], expected_arg_names)\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_training(self):\n pass\n\n def test_training_gradient_checkpointing(self):\n pass\n\n @unittest.skip(\n reason=\"This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124\"\n )\n def test_training_gradient_checkpointing_use_reentrant(self):\n pass\n\n @unittest.skip(\n reason=\"This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124\"\n )\n def test_training_gradient_checkpointing_use_reentrant_false(self):\n pass\n\n @unittest.skip(reason=\"Blip2VisionModel has no base class and is not available in MODEL_MAPPING\")\n def test_save_load_fast_init_from_base(self):\n pass\n\n @unittest.skip(reason=\"Blip2VisionModel has no base class and is not available in MODEL_MAPPING\")\n def test_save_load_fast_init_to_base(self):\n pass\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = Blip2VisionModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\nclass Blip2QFormerModelTester:\n def __init__(\n self,\n parent,\n batch_size=12,\n seq_length=7,\n is_training=True,\n use_input_mask=True,\n use_labels=True,\n vocab_size=99,\n hidden_size=32,\n projection_dim=32,\n num_hidden_layers=2,\n num_attention_heads=4,\n intermediate_size=37,\n dropout=0.1,\n attention_dropout=0.1,\n max_position_embeddings=512,\n initializer_range=0.02,\n bos_token_id=0,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_input_mask = use_input_mask\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.max_position_embeddings = max_position_embeddings\n self.initializer_range = initializer_range\n self.scope = scope\n self.bos_token_id = bos_token_id\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n if input_mask is not None:\n batch_size, seq_length = input_mask.shape\n rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))\n for batch_idx, start_index in enumerate(rnd_start_indices):\n input_mask[batch_idx, :start_index] = 1\n input_mask[batch_idx, start_index:] = 0\n\n config = self.get_config()\n\n return config, input_ids, input_mask\n\n def get_config(self):\n return Blip2QFormerConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n projection_dim=self.projection_dim,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n dropout=self.dropout,\n attention_dropout=self.attention_dropout,\n max_position_embeddings=self.max_position_embeddings,\n initializer_range=self.initializer_range,\n bos_token_id=self.bos_token_id,\n )\n\n\n# this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py\nclass Blip2TextModelDecoderOnlyTester:\n def __init__(\n self,\n parent,\n batch_size=12,\n seq_length=7,\n is_training=True,\n use_labels=False,\n vocab_size=99,\n hidden_size=16,\n num_hidden_layers=2,\n num_attention_heads=4,\n intermediate_size=4,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=20,\n eos_token_id=2,\n pad_token_id=1,\n bos_token_id=0,\n embed_dim=16,\n num_labels=3,\n word_embed_proj_dim=16,\n type_sequence_label_size=2,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.eos_token_id = eos_token_id\n self.pad_token_id = pad_token_id\n self.bos_token_id = bos_token_id\n self.embed_dim = embed_dim\n self.num_labels = num_labels\n self.type_sequence_label_size = type_sequence_label_size\n self.word_embed_proj_dim = word_embed_proj_dim\n self.is_encoder_decoder = False\n\n def prepare_config_and_inputs(self):\n config = self.get_config()\n\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3)\n input_ids[:, -1] = self.eos_token_id # Eos Token\n\n attention_mask = input_ids.ne(self.pad_token_id)\n\n return config, input_ids, attention_mask\n\n def get_config(self):\n return CONFIG_MAPPING[\"opt\"](\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n ffn_dim=self.intermediate_size,\n dropout=self.hidden_dropout_prob,\n attention_dropout=self.attention_probs_dropout_prob,\n max_position_embeddings=self.max_position_embeddings,\n eos_token_id=self.eos_token_id,\n bos_token_id=self.bos_token_id,\n pad_token_id=self.pad_token_id,\n embed_dim=self.embed_dim,\n is_encoder_decoder=False,\n word_embed_proj_dim=self.word_embed_proj_dim,\n )\n\n\n# this model tester uses a decoder-only language model (OPT)\nclass Blip2ForConditionalGenerationDecoderOnlyModelTester:\n def __init__(\n self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10\n ):\n if vision_kwargs is None:\n vision_kwargs = {}\n if qformer_kwargs is None:\n qformer_kwargs = {}\n if text_kwargs is None:\n text_kwargs = {}\n\n self.parent = parent\n self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs)\n self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs)\n self.text_model_tester = Blip2TextModelDecoderOnlyTester(parent, **text_kwargs)\n self.is_training = is_training\n self.num_query_tokens = num_query_tokens\n\n def prepare_config_and_inputs(self):\n _, pixel_values = self.vision_model_tester.prepare_config_and_inputs()\n _, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()\n\n config = self.get_config()\n\n return config, input_ids, attention_mask, pixel_values\n\n def get_config(self):\n return Blip2Config.from_vision_qformer_text_configs(\n vision_config=self.vision_model_tester.get_config(),\n qformer_config=self.qformer_model_tester.get_config(),\n text_config=self.text_model_tester.get_config(),\n num_query_tokens=self.num_query_tokens,\n )\n\n def create_and_check_for_conditional_generation(self, config, input_ids, attention_mask, pixel_values):\n model = Blip2ForConditionalGeneration(config).to(torch_device).eval()\n with torch.no_grad():\n result = model(pixel_values, input_ids, attention_mask)\n\n expected_seq_length = self.num_query_tokens + self.text_model_tester.seq_length\n self.parent.assertEqual(\n result.logits.shape,\n (self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size),\n )\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, input_ids, attention_mask, pixel_values = config_and_inputs\n inputs_dict = {\n \"pixel_values\": pixel_values,\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"labels\": input_ids,\n }\n return config, inputs_dict\n\n\n@require_torch\nclass Blip2ForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, unittest.TestCase):\n all_model_classes = (Blip2ForConditionalGeneration,) if is_torch_available() else ()\n fx_compatible = False\n test_head_masking = False\n test_pruning = False\n test_resize_embeddings = False\n test_attention_outputs = False\n test_torchscript = False\n\n def setUp(self):\n self.model_tester = Blip2ForConditionalGenerationDecoderOnlyModelTester(self)\n\n def test_for_conditional_generation(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs)\n\n @unittest.skip(reason=\"Hidden_states is tested in individual model tests\")\n def test_hidden_states_output(self):\n pass\n\n @unittest.skip(reason=\"Inputs_embeds is tested in individual model tests\")\n def test_inputs_embeds(self):\n pass\n\n @unittest.skip(reason=\"Retain_grad is tested in individual model tests\")\n def test_retain_grad_hidden_states_attentions(self):\n pass\n\n @unittest.skip(reason=\"Blip2Model does not have input/output embeddings\")\n def test_model_common_attributes(self):\n pass\n\n @unittest.skip(reason=\"There's no base Blip2Model\")\n def test_save_load_fast_init_from_base(self):\n pass\n\n @unittest.skip(reason=\"There's no base Blip2Model\")\n def test_save_load_fast_init_to_base(self):\n pass\n\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.forward)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"pixel_values\"]\n self.assertListEqual(arg_names[:1], expected_arg_names)\n\n def test_load_vision_qformer_text_config(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n # Save Blip2Config and check if we can load Blip2VisionConfig from it\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n config.save_pretrained(tmp_dir_name)\n vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name)\n self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())\n\n # Save Blip2Config and check if we can load Blip2QFormerConfig from it\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n config.save_pretrained(tmp_dir_name)\n qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name)\n self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict())\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST:\n model = Blip2ForConditionalGeneration.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n# this class is based on `T5ModelTester` found in tests/models/t5/test_modeling_t5.py\nclass Blip2TextModelTester:\n def __init__(\n self,\n parent,\n vocab_size=99,\n batch_size=12,\n encoder_seq_length=7,\n decoder_seq_length=9,\n # For common tests\n is_training=True,\n use_attention_mask=True,\n use_labels=True,\n hidden_size=32,\n num_hidden_layers=2,\n num_attention_heads=4,\n d_ff=37,\n relative_attention_num_buckets=8,\n dropout_rate=0.1,\n initializer_factor=0.002,\n eos_token_id=1,\n pad_token_id=0,\n decoder_start_token_id=0,\n scope=None,\n decoder_layers=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.encoder_seq_length = encoder_seq_length\n self.decoder_seq_length = decoder_seq_length\n # For common tests\n self.seq_length = self.decoder_seq_length\n self.is_training = is_training\n self.use_attention_mask = use_attention_mask\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.d_ff = d_ff\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.dropout_rate = dropout_rate\n self.initializer_factor = initializer_factor\n self.eos_token_id = eos_token_id\n self.pad_token_id = pad_token_id\n self.decoder_start_token_id = decoder_start_token_id\n self.scope = None\n self.decoder_layers = decoder_layers\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)\n decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)\n\n attention_mask = None\n decoder_attention_mask = None\n if self.use_attention_mask:\n attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)\n decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)\n\n lm_labels = None\n if self.use_labels:\n lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)\n\n config = self.get_config()\n\n return (\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n )\n\n def get_config(self):\n return CONFIG_MAPPING[\"t5\"](\n vocab_size=self.vocab_size,\n d_model=self.hidden_size,\n d_ff=self.d_ff,\n d_kv=self.hidden_size // self.num_attention_heads,\n num_layers=self.num_hidden_layers,\n num_decoder_layers=self.decoder_layers,\n num_heads=self.num_attention_heads,\n relative_attention_num_buckets=self.relative_attention_num_buckets,\n dropout_rate=self.dropout_rate,\n initializer_factor=self.initializer_factor,\n eos_token_id=self.eos_token_id,\n bos_token_id=self.pad_token_id,\n pad_token_id=self.pad_token_id,\n decoder_start_token_id=self.decoder_start_token_id,\n )\n\n\n# this model tester uses an encoder-decoder language model (T5)\nclass Blip2ModelTester:\n def __init__(\n self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10\n ):\n if vision_kwargs is None:\n vision_kwargs = {}\n if qformer_kwargs is None:\n qformer_kwargs = {}\n if text_kwargs is None:\n text_kwargs = {}\n\n self.parent = parent\n self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs)\n self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs)\n self.text_model_tester = Blip2TextModelTester(parent, **text_kwargs)\n self.is_training = is_training\n self.num_query_tokens = num_query_tokens\n\n def prepare_config_and_inputs(self):\n _, pixel_values = self.vision_model_tester.prepare_config_and_inputs()\n (\n _,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ) = self.text_model_tester.prepare_config_and_inputs()\n\n config = self.get_config()\n\n return config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, lm_labels\n\n def get_config(self):\n return Blip2Config.from_vision_qformer_text_configs(\n vision_config=self.vision_model_tester.get_config(),\n qformer_config=self.qformer_model_tester.get_config(),\n text_config=self.text_model_tester.get_config(),\n num_query_tokens=self.num_query_tokens,\n )\n\n def create_and_check_for_conditional_generation(\n self, config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, labels\n ):\n model = Blip2ForConditionalGeneration(config).to(torch_device).eval()\n with torch.no_grad():\n result = model(pixel_values, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask)\n\n self.parent.assertEqual(\n result.logits.shape,\n (\n self.vision_model_tester.batch_size,\n self.text_model_tester.seq_length,\n self.text_model_tester.vocab_size,\n ),\n )\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n attention_mask,\n pixel_values,\n decoder_input_ids,\n decoder_attention_mask,\n labels,\n ) = config_and_inputs\n inputs_dict = {\n \"pixel_values\": pixel_values,\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"decoder_input_ids\": decoder_input_ids,\n \"decoder_attention_mask\": decoder_attention_mask,\n \"labels\": labels,\n }\n return config, inputs_dict\n\n\n@require_torch\nclass Blip2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):\n all_model_classes = (Blip2ForConditionalGeneration, Blip2Model) if is_torch_available() else ()\n pipeline_model_mapping = (\n {\n \"feature-extraction\": Blip2Model,\n \"image-to-text\": Blip2ForConditionalGeneration,\n \"visual-question-answering\": Blip2ForConditionalGeneration,\n }\n if is_torch_available()\n else {}\n )\n fx_compatible = False\n test_head_masking = False\n test_pruning = False\n test_resize_embeddings = False\n test_attention_outputs = False\n test_torchscript = False\n\n def setUp(self):\n self.model_tester = Blip2ModelTester(self)\n\n def test_for_conditional_generation(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs)\n\n @unittest.skip(reason=\"Hidden_states is tested in individual model tests\")\n def test_hidden_states_output(self):\n pass\n\n @unittest.skip(reason=\"Inputs_embeds is tested in individual model tests\")\n def test_inputs_embeds(self):\n pass\n\n @unittest.skip(reason=\"Retain_grad is tested in individual model tests\")\n def test_retain_grad_hidden_states_attentions(self):\n pass\n\n @unittest.skip(reason=\"Blip2Model does not have input/output embeddings\")\n def test_model_common_attributes(self):\n pass\n\n @unittest.skip(reason=\"There's no base Blip2Model\")\n def test_save_load_fast_init_from_base(self):\n pass\n\n @unittest.skip(reason=\"There's no base Blip2Model\")\n def test_save_load_fast_init_to_base(self):\n pass\n\n @unittest.skip(reason=\"Does not work on the tiny model as we keep hitting edge cases.\")\n def test_cpu_offload(self):\n pass\n\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.forward)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"pixel_values\"]\n self.assertListEqual(arg_names[:1], expected_arg_names)\n\n def test_load_vision_qformer_text_config(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n # Save Blip2Config and check if we can load Blip2VisionConfig from it\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n config.save_pretrained(tmp_dir_name)\n vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name)\n self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())\n\n # Save Blip2Config and check if we can load Blip2QFormerConfig from it\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n config.save_pretrained(tmp_dir_name)\n qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name)\n self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict())\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST:\n model = Blip2ForConditionalGeneration.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n def test_get_text_features(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n inputs_dict = {\n \"input_ids\": torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]).to(torch_device),\n \"attention_mask\": torch.LongTensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]).to(torch_device),\n \"decoder_input_ids\": torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]).to(torch_device),\n }\n\n model = Blip2Model(config).to(torch_device)\n model.eval()\n text_features = model.get_text_features(**inputs_dict)\n self.assertEqual(text_features[0].shape, (1, 10, config.text_config.vocab_size))\n\n def test_get_image_features(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n keys_to_pop = [\"input_ids\", \"attention_mask\", \"decoder_input_ids\", \"decoder_attention_mask\", \"labels\"]\n\n for key in keys_to_pop:\n inputs_dict.pop(key)\n\n model = Blip2Model(config).to(torch_device)\n model.eval()\n image_features = model.get_image_features(**inputs_dict)\n self.assertEqual(\n image_features[0].shape,\n (\n self.model_tester.vision_model_tester.batch_size,\n self.model_tester.vision_model_tester.seq_length,\n config.vision_config.hidden_size,\n ),\n )\n\n def test_get_qformer_features(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n keys_to_pop = [\"input_ids\", \"attention_mask\", \"decoder_input_ids\", \"decoder_attention_mask\", \"labels\"]\n\n for key in keys_to_pop:\n inputs_dict.pop(key)\n\n model = Blip2Model(config).to(torch_device)\n model.eval()\n qformer_features = model.get_qformer_features(**inputs_dict)\n self.assertEqual(\n qformer_features[0].shape,\n (self.model_tester.vision_model_tester.batch_size, 10, config.vision_config.hidden_size),\n )\n\n # override from common to deal with nested configurations (`vision_config`, `text_config` and `qformer_config`)\n def test_initialization(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n configs_no_init = _config_zero_init(config)\n for key in [\"vision_config\", \"qformer_config\", \"text_config\"]:\n setattr(configs_no_init, key, _config_zero_init(getattr(configs_no_init, key)))\n for model_class in self.all_model_classes:\n model = model_class(config=configs_no_init)\n for name, param in model.named_parameters():\n if param.requires_grad:\n self.assertIn(\n ((param.data.mean() * 1e9).round() / 1e9).item(),\n [0.0, 1.0],\n msg=f\"Parameter {name} of model {model_class} seems not properly initialized\",\n )\n\n\n# We will verify our results on an image of cute cats\ndef prepare_img():\n url = \"https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg\"\n image = Image.open(requests.get(url, stream=True).raw)\n return image\n\n\n@require_vision\n@require_torch\n@slow\nclass Blip2ModelIntegrationTest(unittest.TestCase):\n def test_inference_opt(self):\n processor = Blip2Processor.from_pretrained(\"Salesforce/blip2-opt-2.7b\")\n model = Blip2ForConditionalGeneration.from_pretrained(\n \"Salesforce/blip2-opt-2.7b\", torch_dtype=torch.float16\n ).to(torch_device)\n\n # prepare image\n image = prepare_img()\n inputs = processor(images=image, return_tensors=\"pt\").to(torch_device, dtype=torch.float16)\n\n predictions = model.generate(**inputs)\n generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()\n\n # Test output\n self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118])\n self.assertEqual(\"a woman sitting on the beach with a dog\", generated_text)\n\n # image and context\n prompt = \"Question: which city is this? Answer:\"\n inputs = processor(images=image, text=prompt, return_tensors=\"pt\").to(torch_device, dtype=torch.float16)\n\n predictions = model.generate(**inputs)\n generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()\n\n # Test output\n self.assertEqual(\n predictions[0].tolist(),\n [2, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118],\n )\n self.assertEqual(generated_text, \"it's not a city, it's a beach\")\n\n def test_inference_opt_batched_beam_search(self):\n processor = Blip2Processor.from_pretrained(\"Salesforce/blip2-opt-2.7b\")\n model = Blip2ForConditionalGeneration.from_pretrained(\n \"Salesforce/blip2-opt-2.7b\", torch_dtype=torch.float16\n ).to(torch_device)\n\n # prepare image\n image = prepare_img()\n inputs = processor(images=[image, image], return_tensors=\"pt\").to(torch_device, dtype=torch.float16)\n\n predictions = model.generate(**inputs, num_beams=2)\n\n # Test output (in this case, slightly different from greedy search)\n self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118])\n self.assertEqual(predictions[1].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118])\n\n def test_inference_t5(self):\n processor = Blip2Processor.from_pretrained(\"Salesforce/blip2-flan-t5-xl\")\n model = Blip2ForConditionalGeneration.from_pretrained(\n \"Salesforce/blip2-flan-t5-xl\", torch_dtype=torch.float16\n ).to(torch_device)\n\n # prepare image\n image = prepare_img()\n inputs = processor(images=image, return_tensors=\"pt\").to(torch_device, dtype=torch.float16)\n\n predictions = model.generate(**inputs)\n generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()\n\n # Test output\n self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1])\n self.assertEqual(\"woman playing with dog on the beach\", generated_text)\n\n # image and context\n prompt = \"Question: which city is this? Answer:\"\n inputs = processor(images=image, text=prompt, return_tensors=\"pt\").to(torch_device, dtype=torch.float16)\n\n predictions = model.generate(**inputs)\n generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()\n\n # Test output\n self.assertEqual(\n predictions[0].tolist(),\n [0, 3, 7, 152, 67, 839, 1],\n )\n self.assertEqual(generated_text, \"san diego\")\n\n def test_inference_t5_batched_beam_search(self):\n processor = Blip2Processor.from_pretrained(\"Salesforce/blip2-flan-t5-xl\")\n model = Blip2ForConditionalGeneration.from_pretrained(\n \"Salesforce/blip2-flan-t5-xl\", torch_dtype=torch.float16\n ).to(torch_device)\n\n # prepare image\n image = prepare_img()\n inputs = processor(images=[image, image], return_tensors=\"pt\").to(torch_device, dtype=torch.float16)\n\n predictions = model.generate(**inputs, num_beams=2)\n\n # Test output (in this case, slightly different from greedy search)\n self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1])\n self.assertEqual(predictions[1].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1])\n\n @require_torch_multi_accelerator\n def test_inference_opt_multi_accelerator(self):\n processor = Blip2Processor.from_pretrained(\"Salesforce/blip2-opt-2.7b\")\n model = Blip2ForConditionalGeneration.from_pretrained(\n \"Salesforce/blip2-opt-2.7b\", torch_dtype=torch.float16, device_map=\"balanced\"\n )\n\n # prepare image\n image = prepare_img()\n inputs = processor(images=image, return_tensors=\"pt\").to(0, dtype=torch.float16)\n\n predictions = model.generate(**inputs)\n generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()\n\n # Test output\n self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118])\n self.assertEqual(\"a woman sitting on the beach with a dog\", generated_text)\n\n # image and context\n prompt = \"Question: which city is this? Answer:\"\n inputs = processor(images=image, text=prompt, return_tensors=\"pt\").to(0, dtype=torch.float16)\n\n predictions = model.generate(**inputs)\n generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()\n\n # Test output\n self.assertEqual(\n predictions[0].tolist(),\n [2, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118],\n )\n self.assertEqual(generated_text, \"it's not a city, it's a beach\")\n\n @require_torch_multi_accelerator\n def test_inference_t5_multi_accelerator(self):\n processor = Blip2Processor.from_pretrained(\"Salesforce/blip2-flan-t5-xl\")\n device_map = device_map = {\n \"query_tokens\": 0,\n \"vision_model\": 0,\n \"language_model\": 1,\n \"language_projection\": 0,\n \"qformer\": 0,\n }\n\n model = Blip2ForConditionalGeneration.from_pretrained(\n \"Salesforce/blip2-flan-t5-xl\", torch_dtype=torch.float16, device_map=device_map\n )\n\n # prepare image\n image = prepare_img()\n inputs = processor(images=image, return_tensors=\"pt\").to(0, dtype=torch.float16)\n\n predictions = model.generate(**inputs)\n generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()\n\n # Test output\n self.assertEqual(predictions[0].tolist(), [0, 2335, 1556, 28, 1782, 30, 8, 2608, 1])\n self.assertEqual(\"woman playing with dog on the beach\", generated_text)\n\n # image and context\n prompt = \"Question: which city is this? Answer:\"\n inputs = processor(images=image, text=prompt, return_tensors=\"pt\").to(0, dtype=torch.float16)\n\n predictions = model.generate(**inputs)\n generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()\n\n # Test output\n self.assertEqual(\n predictions[0].tolist(),\n [0, 3, 7, 152, 67, 839, 1],\n )\n self.assertEqual(generated_text, \"san diego\")\n", "output": ["prepare_img", "Blip2VisionModelTest", "Blip2TextModelTester", "Blip2ForConditionalGenerationDecoderOnlyTest", "Blip2ModelTest", "Blip2ForConditionalGenerationDecoderOnlyModelTester", "Blip2ModelTester", "Blip2QFormerModelTester", "Blip2VisionModelTester", "Blip2TextModelDecoderOnlyTester", "Blip2ModelIntegrationTest"], "metadata": {"file_path": "transformers-main/tests/models/blip_2/test_modeling_blip_2.py", "file_length": 12895, "symbol_dict": [{"symbol": "prepare_img", "type": "mannual_defined_function", "byte_location": 31797, "location": 10179}, {"symbol": "Blip2VisionModelTest", "type": "mannual_defined_class", "byte_location": 4768, "location": 1509}, {"symbol": "Blip2ModelIntegrationTest", "type": "mannual_defined_class", "byte_location": 32024, "location": 10261}, {"symbol": "Blip2QFormerModelTester", "type": "mannual_defined_class", "byte_location": 7930, "location": 2523}, {"symbol": "Blip2TextModelDecoderOnlyTester", "type": "mannual_defined_class", "byte_location": 10660, "location": 3389}, {"symbol": "Blip2TextModelTester", "type": "mannual_defined_class", "byte_location": 19043, "location": 6079}, {"symbol": "Blip2ModelTester", "type": "mannual_defined_class", "byte_location": 22397, "location": 7188}, {"symbol": "Blip2ForConditionalGenerationDecoderOnlyTest", "type": "mannual_defined_class", "byte_location": 15875, "location": 5078}, {"symbol": "Blip2ForConditionalGenerationDecoderOnlyModelTester", "type": "mannual_defined_class", "byte_location": 13460, "location": 4327}, {"symbol": "Blip2VisionModelTester", "type": "mannual_defined_class", "byte_location": 1687, "location": 533}, {"symbol": "Blip2ModelTest", "type": "mannual_defined_class", "byte_location": 25315, "location": 8078}]}} {"input": "# coding=utf-8\n# Copyright 2022 The HuggingFace Team Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a clone of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport gc\nimport importlib.metadata\nimport tempfile\nimport unittest\n\nfrom packaging import version\n\nfrom transformers import (\n AutoConfig,\n AutoModel,\n AutoModelForCausalLM,\n AutoModelForSeq2SeqLM,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n BitsAndBytesConfig,\n pipeline,\n)\nfrom transformers.testing_utils import (\n is_accelerate_available,\n is_torch_available,\n require_accelerate,\n require_bitsandbytes,\n require_torch,\n require_torch_gpu,\n require_torch_multi_gpu,\n slow,\n)\n\n\ndef get_some_linear_layer(model):\n if model.config.model_type == \"gpt2\":\n return model.transformer.h[0].mlp.c_fc\n return model.transformer.h[0].mlp.dense_4h_to_h\n\n\nif is_accelerate_available():\n from accelerate import PartialState\n from accelerate.logging import get_logger\n\n logger = get_logger(__name__)\n _ = PartialState()\n\nif is_torch_available():\n import torch\n import torch.nn as nn\n\n class LoRALayer(nn.Module):\n \"\"\"Wraps a linear layer with LoRA-like adapter - Used for testing purposes only\"\"\"\n\n def __init__(self, module: nn.Module, rank: int):\n super().__init__()\n self.module = module\n self.adapter = nn.Sequential(\n nn.Linear(module.in_features, rank, bias=False),\n nn.Linear(rank, module.out_features, bias=False),\n )\n small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5\n nn.init.normal_(self.adapter[0].weight, std=small_std)\n nn.init.zeros_(self.adapter[1].weight)\n self.adapter.to(module.weight.device)\n\n def forward(self, input, *args, **kwargs):\n return self.module(input, *args, **kwargs) + self.adapter(input)\n\n\n@require_bitsandbytes\n@require_accelerate\n@require_torch\n@require_torch_gpu\n@slow\nclass BaseMixedInt8Test(unittest.TestCase):\n # We keep the constants inside the init function and model loading inside setUp function\n\n # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)\n # Therefore here we use only bloom-1b3 to test our module\n model_name = \"bigscience/bloom-1b7\"\n\n # Constant values\n EXPECTED_RELATIVE_DIFFERENCE = (\n 1.540025 # This was obtained on a Quadro RTX 8000 so the number might slightly change\n )\n\n input_text = \"Hello my name is\"\n EXPECTED_OUTPUTS = set()\n EXPECTED_OUTPUTS.add(\"Hello my name is John.\\nI am a friend of the family.\\n\")\n MAX_NEW_TOKENS = 10\n\n def setUp(self):\n # Models and tokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)\n\n\nclass MixedInt8Test(BaseMixedInt8Test):\n def setUp(self):\n super().setUp()\n\n # Models and tokenizer\n self.model_fp16 = AutoModelForCausalLM.from_pretrained(\n self.model_name, torch_dtype=torch.float16, device_map=\"auto\"\n )\n self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map=\"auto\")\n\n def tearDown(self):\n r\"\"\"\n TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to\n avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27\n \"\"\"\n del self.model_fp16\n del self.model_8bit\n\n gc.collect()\n torch.cuda.empty_cache()\n\n def test_get_keys_to_not_convert_trust_remote_code(self):\n r\"\"\"\n Test the `get_keys_to_not_convert` function with `trust_remote_code` models.\n \"\"\"\n from accelerate import init_empty_weights\n\n from transformers.integrations.bitsandbytes import get_keys_to_not_convert\n\n model_id = \"mosaicml/mpt-7b\"\n config = AutoConfig.from_pretrained(\n model_id, trust_remote_code=True, revision=\"ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7\"\n )\n with init_empty_weights():\n model = AutoModelForCausalLM.from_config(\n config, trust_remote_code=True, code_revision=\"ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7\"\n )\n self.assertEqual(get_keys_to_not_convert(model), [\"transformer.wte\"])\n\n def test_get_keys_to_not_convert(self):\n r\"\"\"\n Test the `get_keys_to_not_convert` function.\n \"\"\"\n from accelerate import init_empty_weights\n\n from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM\n from transformers.integrations.bitsandbytes import get_keys_to_not_convert\n\n model_id = \"mosaicml/mpt-7b\"\n config = AutoConfig.from_pretrained(model_id, revision=\"72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7\")\n with init_empty_weights():\n model = MptForCausalLM(config)\n # The order of the keys does not matter, so we sort them before comparing, same for the other tests.\n self.assertEqual(get_keys_to_not_convert(model).sort(), [\"lm_head\", \"transformer.wte\"].sort())\n\n model_id = \"Salesforce/blip2-opt-2.7b\"\n config = AutoConfig.from_pretrained(model_id, revision=\"1ef7f63a8f0a144c13fdca8103eb7b4691c74cec\")\n with init_empty_weights():\n model = Blip2ForConditionalGeneration(config)\n self.assertEqual(\n get_keys_to_not_convert(model).sort(),\n [\"language_model.lm_head\", \"language_model.model.decoder.embed_tokens\"].sort(),\n )\n\n model_id = \"facebook/opt-350m\"\n config = AutoConfig.from_pretrained(model_id, revision=\"cb32f77e905cccbca1d970436fb0f5e6b58ee3c5\")\n with init_empty_weights():\n model = OPTForCausalLM(config)\n self.assertEqual(get_keys_to_not_convert(model).sort(), [\"lm_head\", \"model.decoder.embed_tokens\"].sort())\n\n model_id = \"roberta-large\"\n config = AutoConfig.from_pretrained(model_id, revision=\"716877d372b884cad6d419d828bac6c85b3b18d9\")\n with init_empty_weights():\n model = AutoModelForMaskedLM.from_config(config)\n self.assertEqual(\n get_keys_to_not_convert(model).sort(),\n [\"'roberta.embeddings.word_embeddings', 'lm_head', 'lm_head.decoder\"].sort(),\n )\n\n def test_quantization_config_json_serialization(self):\n r\"\"\"\n A simple test to check if the quantization config is correctly serialized and deserialized\n \"\"\"\n config = self.model_8bit.config\n\n self.assertTrue(hasattr(config, \"quantization_config\"))\n\n _ = config.to_dict()\n _ = config.to_diff_dict()\n\n _ = config.to_json_string()\n\n def test_original_dtype(self):\n r\"\"\"\n A simple test to check if the model succesfully stores the original dtype\n \"\"\"\n self.assertTrue(hasattr(self.model_8bit.config, \"_pre_quantization_dtype\"))\n self.assertFalse(hasattr(self.model_fp16.config, \"_pre_quantization_dtype\"))\n self.assertTrue(self.model_8bit.config._pre_quantization_dtype == torch.float16)\n\n def test_memory_footprint(self):\n r\"\"\"\n A simple test to check if the model conversion has been done correctly by checking on the\n memory footprint of the converted model and the class type of the linear layers of the converted models\n \"\"\"\n from bitsandbytes.nn import Int8Params\n\n mem_fp16 = self.model_fp16.get_memory_footprint()\n mem_8bit = self.model_8bit.get_memory_footprint()\n\n self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE)\n self.assertTrue(get_some_linear_layer(self.model_8bit).weight.__class__ == Int8Params)\n\n def test_linear_are_8bit(self):\n r\"\"\"\n A simple test to check if the model conversion has been done correctly by checking on the\n memory footprint of the converted model and the class type of the linear layers of the converted models\n \"\"\"\n from transformers import T5PreTrainedModel\n\n self.model_fp16.get_memory_footprint()\n self.model_8bit.get_memory_footprint()\n\n for name, module in self.model_8bit.named_modules():\n if isinstance(module, torch.nn.Linear):\n if name not in [\"lm_head\"] + T5PreTrainedModel._keep_in_fp32_modules:\n self.assertTrue(module.weight.dtype == torch.int8)\n\n def test_llm_skip(self):\n r\"\"\"\n A simple test to check if `llm_int8_skip_modules` works as expected\n \"\"\"\n import bitsandbytes as bnb\n\n quantization_config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=[\"classifier\"])\n seq_classification_model = AutoModelForSequenceClassification.from_pretrained(\n \"roberta-large-mnli\", quantization_config=quantization_config\n )\n self.assertTrue(seq_classification_model.roberta.encoder.layer[0].output.dense.weight.dtype == torch.int8)\n self.assertTrue(\n isinstance(seq_classification_model.roberta.encoder.layer[0].output.dense, bnb.nn.Linear8bitLt)\n )\n\n self.assertTrue(isinstance(seq_classification_model.classifier.dense, nn.Linear))\n self.assertTrue(seq_classification_model.classifier.dense.weight.dtype != torch.int8)\n self.assertTrue(isinstance(seq_classification_model.classifier.out_proj, nn.Linear))\n self.assertTrue(seq_classification_model.classifier.out_proj != torch.int8)\n\n def test_generate_quality(self):\n r\"\"\"\n Test the generation quality of the quantized model and see that we are matching the expected output.\n Given that we are operating on small numbers + the testing model is relatively small, we might not get\n the same output across GPUs. So we'll generate few tokens (5-10) and check their output.\n \"\"\"\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n output_sequences = self.model_8bit.generate(input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10)\n\n self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)\n\n def test_generate_quality_config(self):\n r\"\"\"\n Test that loading the model with the config is equivalent\n \"\"\"\n bnb_config = BitsAndBytesConfig()\n bnb_config.load_in_8bit = True\n\n model_8bit_from_config = AutoModelForCausalLM.from_pretrained(\n self.model_name, quantization_config=bnb_config, device_map=\"auto\"\n )\n\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n output_sequences = model_8bit_from_config.generate(\n input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10\n )\n\n self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)\n\n def test_raise_if_config_and_load_in_8bit(self):\n r\"\"\"\n Test that loading the model with the config and `load_in_8bit` raises an error\n \"\"\"\n bnb_config = BitsAndBytesConfig()\n\n with self.assertRaises(ValueError):\n _ = AutoModelForCausalLM.from_pretrained(\n self.model_name,\n quantization_config=bnb_config,\n load_in_8bit=True,\n device_map=\"auto\",\n llm_int8_enable_fp32_cpu_offload=True,\n )\n\n def test_device_and_dtype_assignment(self):\n r\"\"\"\n Test whether trying to cast (or assigning a device to) a model after converting it in 8-bit will throw an error.\n Checks also if other models are casted correctly.\n \"\"\"\n with self.assertRaises(ValueError):\n # Tries with `str`\n self.model_8bit.to(\"cpu\")\n\n with self.assertRaises(ValueError):\n # Tries with a `dtype``\n self.model_8bit.to(torch.float16)\n\n with self.assertRaises(ValueError):\n # Tries with a `device`\n self.model_8bit.to(torch.device(\"cuda:0\"))\n\n with self.assertRaises(ValueError):\n # Tries with a `device`\n self.model_8bit.float()\n\n with self.assertRaises(ValueError):\n # Tries with a `device`\n self.model_8bit.half()\n\n # Test if we did not break anything\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n\n self.model_fp16 = self.model_fp16.to(torch.float32)\n _ = self.model_fp16.generate(input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10)\n\n # Check this does not throw an error\n _ = self.model_fp16.to(\"cpu\")\n\n # Check this does not throw an error\n _ = self.model_fp16.half()\n\n # Check this does not throw an error\n _ = self.model_fp16.float()\n\n def test_fp32_int8_conversion(self):\n r\"\"\"\n Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly.\n \"\"\"\n model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-small\", load_in_8bit=True, device_map=\"auto\")\n self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)\n\n def test_int8_serialization(self):\n r\"\"\"\n Test whether it is possible to serialize a model in 8-bit.\n \"\"\"\n from bitsandbytes.nn import Int8Params\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n self.model_8bit.save_pretrained(tmpdirname)\n\n # check that the file `quantization_config` is present\n config = AutoConfig.from_pretrained(tmpdirname)\n self.assertTrue(hasattr(config, \"quantization_config\"))\n\n model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map=\"auto\")\n\n linear = get_some_linear_layer(model_from_saved)\n self.assertTrue(linear.weight.__class__ == Int8Params)\n self.assertTrue(hasattr(linear.weight, \"SCB\"))\n\n # generate\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n output_sequences = model_from_saved.generate(input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10)\n\n self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)\n\n def test_int8_serialization_regression(self):\n r\"\"\"\n Test whether it is possible to serialize a model in 8-bit - using not safetensors\n \"\"\"\n from bitsandbytes.nn import Int8Params\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False)\n\n # check that the file `quantization_config` is present\n config = AutoConfig.from_pretrained(tmpdirname)\n self.assertTrue(hasattr(config, \"quantization_config\"))\n\n model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map=\"auto\")\n\n linear = get_some_linear_layer(model_from_saved)\n self.assertTrue(linear.weight.__class__ == Int8Params)\n self.assertTrue(hasattr(linear.weight, \"SCB\"))\n\n # generate\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n output_sequences = model_from_saved.generate(input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10)\n\n self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)\n\n def test_int8_serialization_sharded(self):\n r\"\"\"\n Test whether it is possible to serialize a model in 8-bit - sharded version.\n \"\"\"\n from bitsandbytes.nn import Int8Params\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n self.model_8bit.save_pretrained(tmpdirname, max_shard_size=\"200MB\")\n\n # check that the file `quantization_config` is present\n config = AutoConfig.from_pretrained(tmpdirname)\n self.assertTrue(hasattr(config, \"quantization_config\"))\n\n model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname)\n\n linear = get_some_linear_layer(model_from_saved)\n self.assertTrue(linear.weight.__class__ == Int8Params)\n self.assertTrue(hasattr(linear.weight, \"SCB\"))\n\n # generate\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n output_sequences = model_from_saved.generate(input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10)\n\n self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)\n\n def test_int8_from_pretrained(self):\n r\"\"\"\n Test whether loading a 8bit model from the Hub works as expected\n \"\"\"\n from bitsandbytes.nn import Int8Params\n\n model_id = \"ybelkada/bloom-1b7-8bit\"\n\n model = AutoModelForCausalLM.from_pretrained(model_id)\n\n linear = get_some_linear_layer(model)\n self.assertTrue(linear.weight.__class__ == Int8Params)\n self.assertTrue(hasattr(linear.weight, \"SCB\"))\n\n # generate\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n output_sequences = model.generate(input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10)\n\n self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)\n\n\n@require_bitsandbytes\n@require_accelerate\n@require_torch\n@require_torch_gpu\n@slow\nclass MixedInt8T5Test(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.model_name = \"t5-small\"\n cls.dense_act_model_name = \"google/flan-t5-small\" # flan-t5 uses dense-act instead of dense-relu-dense\n cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)\n cls.input_text = \"Translate in German: Hello, my dog is cute\"\n\n def tearDown(self):\n r\"\"\"\n TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to\n avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27\n \"\"\"\n gc.collect()\n torch.cuda.empty_cache()\n\n def test_inference_without_keep_in_fp32(self):\n r\"\"\"\n Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly.\n `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test\n both cases.\n \"\"\"\n from transformers import T5ForConditionalGeneration\n\n modules = T5ForConditionalGeneration._keep_in_fp32_modules\n T5ForConditionalGeneration._keep_in_fp32_modules = None\n\n # test with `t5-small`\n model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map=\"auto\")\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\").to(0)\n _ = model.generate(**encoded_input)\n\n # test with `flan-t5-small`\n model = T5ForConditionalGeneration.from_pretrained(\n self.dense_act_model_name, load_in_8bit=True, device_map=\"auto\"\n )\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\").to(0)\n _ = model.generate(**encoded_input)\n T5ForConditionalGeneration._keep_in_fp32_modules = modules\n\n def test_inference_with_keep_in_fp32(self):\n r\"\"\"\n Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly.\n `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test\n both cases.\n \"\"\"\n import bitsandbytes as bnb\n\n from transformers import T5ForConditionalGeneration\n\n # test with `t5-small`\n model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map=\"auto\")\n\n # there was a bug with decoders - this test checks that it is fixed\n self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt))\n\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\").to(0)\n _ = model.generate(**encoded_input)\n\n # test with `flan-t5-small`\n model = T5ForConditionalGeneration.from_pretrained(\n self.dense_act_model_name, load_in_8bit=True, device_map=\"auto\"\n )\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\").to(0)\n _ = model.generate(**encoded_input)\n\n def test_inference_with_keep_in_fp32_serialized(self):\n r\"\"\"\n Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on\n a serialized model.\n `flan-t5-small` uses `T5DenseGatedActDense` whereas `t5-small` uses `T5DenseReluDense`. We need to test\n both cases.\n \"\"\"\n import bitsandbytes as bnb\n\n from transformers import T5ForConditionalGeneration\n\n # test with `t5-small`\n model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map=\"auto\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n model.save_pretrained(tmp_dir)\n\n model = T5ForConditionalGeneration.from_pretrained(tmp_dir)\n\n # there was a bug with decoders - this test checks that it is fixed\n self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt))\n\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\").to(0)\n _ = model.generate(**encoded_input)\n\n # test with `flan-t5-small`\n model = T5ForConditionalGeneration.from_pretrained(\n self.dense_act_model_name, load_in_8bit=True, device_map=\"auto\"\n )\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\").to(0)\n _ = model.generate(**encoded_input)\n\n\nclass MixedInt8ModelClassesTest(BaseMixedInt8Test):\n def setUp(self):\n super().setUp()\n # model_name\n self.model_name = \"bigscience/bloom-560m\"\n self.seq_to_seq_name = \"t5-small\"\n\n # Different types of model\n\n self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map=\"auto\")\n # Sequence classification model\n self.sequence_model = AutoModelForSequenceClassification.from_pretrained(\n self.model_name, load_in_8bit=True, device_map=\"auto\"\n )\n # CausalLM model\n self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map=\"auto\")\n # Seq2seq model\n self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained(\n self.seq_to_seq_name, load_in_8bit=True, device_map=\"auto\"\n )\n\n def tearDown(self):\n r\"\"\"\n TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to\n avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27\n \"\"\"\n del self.base_model\n del self.sequence_model\n del self.model_8bit\n del self.seq_to_seq_model\n\n gc.collect()\n torch.cuda.empty_cache()\n\n def test_correct_head_class(self):\n r\"\"\"\n A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification)\n are kept in their native class.\n \"\"\"\n from bitsandbytes.nn import Int8Params\n\n # last param of a base model should be a linear8bit module\n self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params)\n\n # Other heads should be nn.Parameter\n self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter)\n self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)\n self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)\n\n\nclass MixedInt8TestPipeline(BaseMixedInt8Test):\n def setUp(self):\n super().setUp()\n\n def tearDown(self):\n r\"\"\"\n TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to\n avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27\n \"\"\"\n del self.pipe\n\n gc.collect()\n torch.cuda.empty_cache()\n\n def test_pipeline(self):\n r\"\"\"\n The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since\n we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything\n on pipline.\n \"\"\"\n # self._clear_cuda_cache()\n self.pipe = pipeline(\n \"text-generation\",\n model=self.model_name,\n model_kwargs={\"device_map\": \"auto\", \"load_in_8bit\": True},\n max_new_tokens=self.MAX_NEW_TOKENS,\n )\n\n # Real second forward pass\n pipeline_output = self.pipe(self.input_text)\n self.assertIn(pipeline_output[0][\"generated_text\"], self.EXPECTED_OUTPUTS)\n\n\n@require_torch_multi_gpu\nclass MixedInt8TestMultiGpu(BaseMixedInt8Test):\n def setUp(self):\n super().setUp()\n\n def test_multi_gpu_loading(self):\n r\"\"\"\n This tests that the model has been loaded and can be used correctly on a multi-GPU setup.\n Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice\n \"\"\"\n\n model_parallel = AutoModelForCausalLM.from_pretrained(\n self.model_name, load_in_8bit=True, device_map=\"balanced\"\n )\n\n # Check correct device map\n self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1})\n\n # Check that inference pass works on the model\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n\n # Second real batch\n output_parallel = model_parallel.generate(input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10)\n self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)\n\n\n@require_torch_multi_gpu\nclass MixedInt8TestCpuGpu(BaseMixedInt8Test):\n def setUp(self):\n super().setUp()\n\n def check_inference_correctness(self, model):\n # Check that inference pass works on the model\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n\n # Check the exactness of the results\n output_parallel = model.generate(input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10)\n\n # Get the generation\n output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True)\n self.assertIn(output_text, self.EXPECTED_OUTPUTS)\n\n def test_cpu_gpu_loading_random_device_map(self):\n r\"\"\"\n A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`.\n \"\"\"\n device_map = {\n \"transformer.word_embeddings\": 0,\n \"transformer.word_embeddings_layernorm\": 0,\n \"lm_head\": 0,\n \"transformer.h.0\": \"cpu\",\n \"transformer.h.1\": \"cpu\",\n \"transformer.h.2\": 0,\n \"transformer.h.3\": 0,\n \"transformer.h.4\": 0,\n \"transformer.h.5\": 0,\n \"transformer.h.6\": 0,\n \"transformer.h.7\": 0,\n \"transformer.h.8\": 0,\n \"transformer.h.9\": 1,\n \"transformer.h.10\": 0,\n \"transformer.h.11\": 1,\n \"transformer.h.12\": 0,\n \"transformer.h.13\": 0,\n \"transformer.h.14\": 1,\n \"transformer.h.15\": 0,\n \"transformer.h.16\": 0,\n \"transformer.h.17\": 1,\n \"transformer.h.18\": 1,\n \"transformer.h.19\": 0,\n \"transformer.h.20\": 1,\n \"transformer.h.21\": 1,\n \"transformer.h.22\": 0,\n \"transformer.h.23\": 0,\n \"transformer.ln_f\": 1,\n }\n\n bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True)\n\n model_8bit = AutoModelForCausalLM.from_pretrained(\n self.model_name,\n device_map=device_map,\n quantization_config=bnb_config,\n )\n\n # Check that the model has been correctly set on device 0, 1, and `cpu`.\n self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, \"cpu\"})\n\n self.check_inference_correctness(model_8bit)\n\n def test_cpu_gpu_loading_custom_device_map(self):\n r\"\"\"\n A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.\n This time the device map is more organized than the test above and uses the abstraction\n `transformer.h` to encapsulate all the decoder layers.\n \"\"\"\n device_map = {\n \"transformer.word_embeddings\": \"cpu\",\n \"transformer.word_embeddings_layernorm\": \"cpu\",\n \"lm_head\": \"cpu\",\n \"transformer.h\": 0,\n \"transformer.ln_f\": 1,\n }\n bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True)\n\n # Load model\n model_8bit = AutoModelForCausalLM.from_pretrained(\n self.model_name,\n device_map=device_map,\n quantization_config=bnb_config,\n )\n\n # Check that the model has been correctly set on device 0, 1, and `cpu`.\n self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, \"cpu\"})\n\n self.check_inference_correctness(model_8bit)\n\n def test_cpu_gpu_disk_loading_custom_device_map(self):\n r\"\"\"\n A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.\n This time we also add `disk` on the device_map.\n \"\"\"\n device_map = {\n \"transformer.word_embeddings\": 0,\n \"transformer.word_embeddings_layernorm\": \"cpu\",\n \"lm_head\": 0,\n \"transformer.h\": 1,\n \"transformer.ln_f\": \"disk\",\n }\n bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True)\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Load model\n model_8bit = AutoModelForCausalLM.from_pretrained(\n self.model_name,\n device_map=device_map,\n quantization_config=bnb_config,\n offload_folder=tmpdirname,\n )\n\n # Check that the model has been correctly set on device 0, 1, and `cpu`.\n self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, \"cpu\", \"disk\"})\n\n self.check_inference_correctness(model_8bit)\n\n def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self):\n r\"\"\"\n A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.\n This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config\n \"\"\"\n device_map = {\n \"transformer.word_embeddings\": 0,\n \"transformer.word_embeddings_layernorm\": \"cpu\",\n \"lm_head\": 0,\n \"transformer.h\": 1,\n \"transformer.ln_f\": \"disk\",\n }\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Load model\n model_8bit = AutoModelForCausalLM.from_pretrained(\n self.model_name,\n device_map=device_map,\n load_in_8bit=True,\n llm_int8_enable_fp32_cpu_offload=True,\n offload_folder=tmpdirname,\n )\n\n # Check that the model has been correctly set on device 0, 1, and `cpu`.\n self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, \"cpu\", \"disk\"})\n\n self.check_inference_correctness(model_8bit)\n\n\nclass MixedInt8TestTraining(BaseMixedInt8Test):\n def setUp(self):\n self.model_name = \"facebook/opt-350m\"\n super().setUp()\n\n def test_training(self):\n if version.parse(importlib.metadata.version(\"bitsandbytes\")) < version.parse(\"0.37.0\"):\n return\n\n # Step 1: freeze all parameters\n model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True)\n\n self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()})\n\n for param in model.parameters():\n param.requires_grad = False # freeze the model - train adapters later\n if param.ndim == 1:\n # cast the small parameters (e.g. layernorm) to fp32 for stability\n param.data = param.data.to(torch.float32)\n\n # Step 2: add adapters\n for _, module in model.named_modules():\n if \"OPTAttention\" in repr(type(module)):\n module.q_proj = LoRALayer(module.q_proj, rank=16)\n module.k_proj = LoRALayer(module.k_proj, rank=16)\n module.v_proj = LoRALayer(module.v_proj, rank=16)\n\n # Step 3: dummy batch\n batch = self.tokenizer(\"Test batch \", return_tensors=\"pt\").to(0)\n\n # Step 4: Check if the gradient is not None\n with torch.cuda.amp.autocast():\n out = model.forward(**batch)\n out.logits.norm().backward()\n\n for module in model.modules():\n if isinstance(module, LoRALayer):\n self.assertTrue(module.adapter[1].weight.grad is not None)\n self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)\n elif isinstance(module, nn.Embedding):\n self.assertTrue(module.weight.grad is None)\n\n\nclass MixedInt8GPT2Test(MixedInt8Test):\n model_name = \"gpt2-xl\"\n EXPECTED_RELATIVE_DIFFERENCE = 1.8720077507258357\n EXPECTED_OUTPUTS = set()\n EXPECTED_OUTPUTS.add(\"Hello my name is John Doe, and I'm a big fan of\")\n EXPECTED_OUTPUTS.add(\"Hello my name is John Doe, and I'm a fan of the\")\n\n def test_int8_from_pretrained(self):\n r\"\"\"\n Test whether loading a 8bit model from the Hub works as expected\n \"\"\"\n from bitsandbytes.nn import Int8Params\n\n model_id = \"ybelkada/gpt2-xl-8bit\"\n\n model = AutoModelForCausalLM.from_pretrained(model_id)\n\n linear = get_some_linear_layer(model)\n self.assertTrue(linear.weight.__class__ == Int8Params)\n self.assertTrue(hasattr(linear.weight, \"SCB\"))\n\n # generate\n encoded_input = self.tokenizer(self.input_text, return_tensors=\"pt\")\n output_sequences = model.generate(input_ids=encoded_input[\"input_ids\"].to(0), max_new_tokens=10)\n\n self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)\n", "output": ["get_some_linear_layer", "LoRALayer", "MixedInt8ModelClassesTest", "BaseMixedInt8Test", "MixedInt8TestMultiGpu", "MixedInt8T5Test", "MixedInt8TestCpuGpu", "MixedInt8TestTraining", "MixedInt8GPT2Test", "MixedInt8TestPipeline", "MixedInt8Test"], "metadata": {"file_path": "transformers-main/tests/quantization/bnb/test_mixed_int8.py", "file_length": 11504, "symbol_dict": [{"symbol": "get_some_linear_layer", "type": "mannual_defined_function", "byte_location": 1144, "location": 355}, {"symbol": "MixedInt8TestCpuGpu", "type": "mannual_defined_class", "byte_location": 26955, "location": 8693}, {"symbol": "MixedInt8TestPipeline", "type": "mannual_defined_class", "byte_location": 24676, "location": 7979}, {"symbol": "MixedInt8TestTraining", "type": "mannual_defined_class", "byte_location": 32661, "location": 10551}, {"symbol": "MixedInt8ModelClassesTest", "type": "mannual_defined_class", "byte_location": 22591, "location": 7307}, {"symbol": "LoRALayer", "type": "mannual_defined_class", "byte_location": 1569, "location": 505}, {"symbol": "MixedInt8GPT2Test", "type": "mannual_defined_class", "byte_location": 34430, "location": 11107}, {"symbol": "BaseMixedInt8Test", "type": "mannual_defined_class", "byte_location": 2469, "location": 789}, {"symbol": "MixedInt8TestMultiGpu", "type": "mannual_defined_class", "byte_location": 25901, "location": 8356}, {"symbol": "MixedInt8T5Test", "type": "mannual_defined_class", "byte_location": 18080, "location": 5808}, {"symbol": "MixedInt8Test", "type": "mannual_defined_class", "byte_location": 3291, "location": 1059}]}} {"input": "# MIT License\n\n# Copyright (c) 2019 Yang Liu and the HuggingFace team\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport copy\nimport math\n\nimport numpy as np\nimport torch\nfrom configuration_bertabs import BertAbsConfig\nfrom torch import nn\nfrom torch.nn.init import xavier_uniform_\n\nfrom transformers import BertConfig, BertModel, PreTrainedModel\n\n\nMAX_SIZE = 5000\n\nBERTABS_FINETUNED_MODEL_ARCHIVE_LIST = [\n \"remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization\",\n]\n\n\nclass BertAbsPreTrainedModel(PreTrainedModel):\n config_class = BertAbsConfig\n load_tf_weights = False\n base_model_prefix = \"bert\"\n\n\nclass BertAbs(BertAbsPreTrainedModel):\n def __init__(self, args, checkpoint=None, bert_extractive_checkpoint=None):\n super().__init__(args)\n self.args = args\n self.bert = Bert()\n\n # If pre-trained weights are passed for Bert, load these.\n load_bert_pretrained_extractive = True if bert_extractive_checkpoint else False\n if load_bert_pretrained_extractive:\n self.bert.model.load_state_dict(\n {n[11:]: p for n, p in bert_extractive_checkpoint.items() if n.startswith(\"bert.model\")},\n strict=True,\n )\n\n self.vocab_size = self.bert.model.config.vocab_size\n\n if args.max_pos > 512:\n my_pos_embeddings = nn.Embedding(args.max_pos, self.bert.model.config.hidden_size)\n my_pos_embeddings.weight.data[:512] = self.bert.model.embeddings.position_embeddings.weight.data\n my_pos_embeddings.weight.data[512:] = self.bert.model.embeddings.position_embeddings.weight.data[-1][\n None, :\n ].repeat(args.max_pos - 512, 1)\n self.bert.model.embeddings.position_embeddings = my_pos_embeddings\n tgt_embeddings = nn.Embedding(self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0)\n\n tgt_embeddings.weight = copy.deepcopy(self.bert.model.embeddings.word_embeddings.weight)\n\n self.decoder = TransformerDecoder(\n self.args.dec_layers,\n self.args.dec_hidden_size,\n heads=self.args.dec_heads,\n d_ff=self.args.dec_ff_size,\n dropout=self.args.dec_dropout,\n embeddings=tgt_embeddings,\n vocab_size=self.vocab_size,\n )\n\n gen_func = nn.LogSoftmax(dim=-1)\n self.generator = nn.Sequential(nn.Linear(args.dec_hidden_size, args.vocab_size), gen_func)\n self.generator[0].weight = self.decoder.embeddings.weight\n\n load_from_checkpoints = False if checkpoint is None else True\n if load_from_checkpoints:\n self.load_state_dict(checkpoint)\n\n def init_weights(self):\n for module in self.decoder.modules():\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n for p in self.generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n else:\n p.data.zero_()\n\n def forward(\n self,\n encoder_input_ids,\n decoder_input_ids,\n token_type_ids,\n encoder_attention_mask,\n decoder_attention_mask,\n ):\n encoder_output = self.bert(\n input_ids=encoder_input_ids,\n token_type_ids=token_type_ids,\n attention_mask=encoder_attention_mask,\n )\n encoder_hidden_states = encoder_output[0]\n dec_state = self.decoder.init_decoder_state(encoder_input_ids, encoder_hidden_states)\n decoder_outputs, _ = self.decoder(decoder_input_ids[:, :-1], encoder_hidden_states, dec_state)\n return decoder_outputs\n\n\nclass Bert(nn.Module):\n \"\"\"This class is not really necessary and should probably disappear.\"\"\"\n\n def __init__(self):\n super().__init__()\n config = BertConfig.from_pretrained(\"bert-base-uncased\")\n self.model = BertModel(config)\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None, **kwargs):\n self.eval()\n with torch.no_grad():\n encoder_outputs, _ = self.model(\n input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, **kwargs\n )\n return encoder_outputs\n\n\nclass TransformerDecoder(nn.Module):\n \"\"\"\n The Transformer decoder from \"Attention is All You Need\".\n\n Args:\n num_layers (int): number of encoder layers.\n d_model (int): size of the model\n heads (int): number of heads\n d_ff (int): size of the inner FF layer\n dropout (float): dropout parameters\n embeddings (:obj:`onmt.modules.Embeddings`):\n embeddings to use, should have positional encodings\n attn_type (str): if using a separate copy attention\n \"\"\"\n\n def __init__(self, num_layers, d_model, heads, d_ff, dropout, embeddings, vocab_size):\n super().__init__()\n\n # Basic attributes.\n self.decoder_type = \"transformer\"\n self.num_layers = num_layers\n self.embeddings = embeddings\n self.pos_emb = PositionalEncoding(dropout, self.embeddings.embedding_dim)\n\n # Build TransformerDecoder.\n self.transformer_layers = nn.ModuleList(\n [TransformerDecoderLayer(d_model, heads, d_ff, dropout) for _ in range(num_layers)]\n )\n\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n # forward(input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask)\n # def forward(self, input_ids, state, attention_mask=None, memory_lengths=None,\n # step=None, cache=None, encoder_attention_mask=None, encoder_hidden_states=None, memory_masks=None):\n def forward(\n self,\n input_ids,\n encoder_hidden_states=None,\n state=None,\n attention_mask=None,\n memory_lengths=None,\n step=None,\n cache=None,\n encoder_attention_mask=None,\n ):\n \"\"\"\n See :obj:`onmt.modules.RNNDecoderBase.forward()`\n memory_bank = encoder_hidden_states\n \"\"\"\n # Name conversion\n tgt = input_ids\n memory_bank = encoder_hidden_states\n memory_mask = encoder_attention_mask\n\n # src_words = state.src\n src_words = state.src\n src_batch, src_len = src_words.size()\n\n padding_idx = self.embeddings.padding_idx\n\n # Decoder padding mask\n tgt_words = tgt\n tgt_batch, tgt_len = tgt_words.size()\n tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1).expand(tgt_batch, tgt_len, tgt_len)\n\n # Encoder padding mask\n if memory_mask is not None:\n src_len = memory_mask.size(-1)\n src_pad_mask = memory_mask.expand(src_batch, tgt_len, src_len)\n else:\n src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1).expand(src_batch, tgt_len, src_len)\n\n # Pass through the embeddings\n emb = self.embeddings(input_ids)\n output = self.pos_emb(emb, step)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n if state.cache is None:\n saved_inputs = []\n\n for i in range(self.num_layers):\n prev_layer_input = None\n if state.cache is None:\n if state.previous_input is not None:\n prev_layer_input = state.previous_layer_inputs[i]\n\n output, all_input = self.transformer_layers[i](\n output,\n memory_bank,\n src_pad_mask,\n tgt_pad_mask,\n previous_input=prev_layer_input,\n layer_cache=state.cache[\"layer_{}\".format(i)] if state.cache is not None else None,\n step=step,\n )\n if state.cache is None:\n saved_inputs.append(all_input)\n\n if state.cache is None:\n saved_inputs = torch.stack(saved_inputs)\n\n output = self.layer_norm(output)\n\n if state.cache is None:\n state = state.update_state(tgt, saved_inputs)\n\n # Decoders in transformers return a tuple. Beam search will fail\n # if we don't follow this convention.\n return output, state # , state\n\n def init_decoder_state(self, src, memory_bank, with_cache=False):\n \"\"\"Init decoder state\"\"\"\n state = TransformerDecoderState(src)\n if with_cache:\n state._init_cache(memory_bank, self.num_layers)\n return state\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, dropout, dim, max_len=5000):\n pe = torch.zeros(max_len, dim)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim)))\n pe[:, 0::2] = torch.sin(position.float() * div_term)\n pe[:, 1::2] = torch.cos(position.float() * div_term)\n pe = pe.unsqueeze(0)\n super().__init__()\n self.register_buffer(\"pe\", pe)\n self.dropout = nn.Dropout(p=dropout)\n self.dim = dim\n\n def forward(self, emb, step=None):\n emb = emb * math.sqrt(self.dim)\n if step:\n emb = emb + self.pe[:, step][:, None, :]\n\n else:\n emb = emb + self.pe[:, : emb.size(1)]\n emb = self.dropout(emb)\n return emb\n\n def get_emb(self, emb):\n return self.pe[:, : emb.size(1)]\n\n\nclass TransformerDecoderLayer(nn.Module):\n \"\"\"\n Args:\n d_model (int): the dimension of keys/values/queries in\n MultiHeadedAttention, also the input size of\n the first-layer of the PositionwiseFeedForward.\n heads (int): the number of heads for MultiHeadedAttention.\n d_ff (int): the second-layer of the PositionwiseFeedForward.\n dropout (float): dropout probability(0-1.0).\n self_attn_type (string): type of self-attention scaled-dot, average\n \"\"\"\n\n def __init__(self, d_model, heads, d_ff, dropout):\n super().__init__()\n\n self.self_attn = MultiHeadedAttention(heads, d_model, dropout=dropout)\n\n self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)\n self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)\n self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)\n self.drop = nn.Dropout(dropout)\n mask = self._get_attn_subsequent_mask(MAX_SIZE)\n # Register self.mask as a saved_state in TransformerDecoderLayer, so\n # it gets TransformerDecoderLayer's cuda behavior automatically.\n self.register_buffer(\"mask\", mask)\n\n def forward(\n self,\n inputs,\n memory_bank,\n src_pad_mask,\n tgt_pad_mask,\n previous_input=None,\n layer_cache=None,\n step=None,\n ):\n \"\"\"\n Args:\n inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`\n memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`\n src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`\n tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`\n\n Returns:\n (`FloatTensor`, `FloatTensor`, `FloatTensor`):\n\n * output `[batch_size x 1 x model_dim]`\n * attn `[batch_size x 1 x src_len]`\n * all_input `[batch_size x current_step x model_dim]`\n\n \"\"\"\n dec_mask = torch.gt(tgt_pad_mask + self.mask[:, : tgt_pad_mask.size(1), : tgt_pad_mask.size(1)], 0)\n input_norm = self.layer_norm_1(inputs)\n all_input = input_norm\n if previous_input is not None:\n all_input = torch.cat((previous_input, input_norm), dim=1)\n dec_mask = None\n\n query = self.self_attn(\n all_input,\n all_input,\n input_norm,\n mask=dec_mask,\n layer_cache=layer_cache,\n type=\"self\",\n )\n\n query = self.drop(query) + inputs\n\n query_norm = self.layer_norm_2(query)\n mid = self.context_attn(\n memory_bank,\n memory_bank,\n query_norm,\n mask=src_pad_mask,\n layer_cache=layer_cache,\n type=\"context\",\n )\n output = self.feed_forward(self.drop(mid) + query)\n\n return output, all_input\n # return output\n\n def _get_attn_subsequent_mask(self, size):\n \"\"\"\n Get an attention mask to avoid using the subsequent info.\n\n Args:\n size: int\n\n Returns:\n (`LongTensor`):\n\n * subsequent_mask `[1 x size x size]`\n \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype(\"uint8\")\n subsequent_mask = torch.from_numpy(subsequent_mask)\n return subsequent_mask\n\n\nclass MultiHeadedAttention(nn.Module):\n \"\"\"\n Multi-Head Attention module from\n \"Attention is All You Need\"\n :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`.\n\n Similar to standard `dot` attention but uses\n multiple attention distributions simulataneously\n to select relevant items.\n\n .. mermaid::\n\n graph BT\n A[key]\n B[value]\n C[query]\n O[output]\n subgraph Attn\n D[Attn 1]\n E[Attn 2]\n F[Attn N]\n end\n A --> D\n C --> D\n A --> E\n C --> E\n A --> F\n C --> F\n D --> O\n E --> O\n F --> O\n B --> O\n\n Also includes several additional tricks.\n\n Args:\n head_count (int): number of parallel heads\n model_dim (int): the dimension of keys/values/queries,\n must be divisible by head_count\n dropout (float): dropout parameter\n \"\"\"\n\n def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True):\n assert model_dim % head_count == 0\n self.dim_per_head = model_dim // head_count\n self.model_dim = model_dim\n\n super().__init__()\n self.head_count = head_count\n\n self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head)\n self.linear_values = nn.Linear(model_dim, head_count * self.dim_per_head)\n self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head)\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = nn.Dropout(dropout)\n self.use_final_linear = use_final_linear\n if self.use_final_linear:\n self.final_linear = nn.Linear(model_dim, model_dim)\n\n def forward(\n self,\n key,\n value,\n query,\n mask=None,\n layer_cache=None,\n type=None,\n predefined_graph_1=None,\n ):\n \"\"\"\n Compute the context vector and the attention vectors.\n\n Args:\n key (`FloatTensor`): set of `key_len`\n key vectors `[batch, key_len, dim]`\n value (`FloatTensor`): set of `key_len`\n value vectors `[batch, key_len, dim]`\n query (`FloatTensor`): set of `query_len`\n query vectors `[batch, query_len, dim]`\n mask: binary mask indicating which keys have\n non-zero attention `[batch, query_len, key_len]`\n Returns:\n (`FloatTensor`, `FloatTensor`) :\n\n * output context vectors `[batch, query_len, dim]`\n * one of the attention vectors `[batch, query_len, key_len]`\n \"\"\"\n batch_size = key.size(0)\n dim_per_head = self.dim_per_head\n head_count = self.head_count\n\n def shape(x):\n \"\"\"projection\"\"\"\n return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2)\n\n def unshape(x):\n \"\"\"compute context\"\"\"\n return x.transpose(1, 2).contiguous().view(batch_size, -1, head_count * dim_per_head)\n\n # 1) Project key, value, and query.\n if layer_cache is not None:\n if type == \"self\":\n query, key, value = (\n self.linear_query(query),\n self.linear_keys(query),\n self.linear_values(query),\n )\n\n key = shape(key)\n value = shape(value)\n\n if layer_cache is not None:\n device = key.device\n if layer_cache[\"self_keys\"] is not None:\n key = torch.cat((layer_cache[\"self_keys\"].to(device), key), dim=2)\n if layer_cache[\"self_values\"] is not None:\n value = torch.cat((layer_cache[\"self_values\"].to(device), value), dim=2)\n layer_cache[\"self_keys\"] = key\n layer_cache[\"self_values\"] = value\n elif type == \"context\":\n query = self.linear_query(query)\n if layer_cache is not None:\n if layer_cache[\"memory_keys\"] is None:\n key, value = self.linear_keys(key), self.linear_values(value)\n key = shape(key)\n value = shape(value)\n else:\n key, value = (\n layer_cache[\"memory_keys\"],\n layer_cache[\"memory_values\"],\n )\n layer_cache[\"memory_keys\"] = key\n layer_cache[\"memory_values\"] = value\n else:\n key, value = self.linear_keys(key), self.linear_values(value)\n key = shape(key)\n value = shape(value)\n else:\n key = self.linear_keys(key)\n value = self.linear_values(value)\n query = self.linear_query(query)\n key = shape(key)\n value = shape(value)\n\n query = shape(query)\n\n # 2) Calculate and scale scores.\n query = query / math.sqrt(dim_per_head)\n scores = torch.matmul(query, key.transpose(2, 3))\n\n if mask is not None:\n mask = mask.unsqueeze(1).expand_as(scores)\n scores = scores.masked_fill(mask, -1e18)\n\n # 3) Apply attention dropout and compute context vectors.\n\n attn = self.softmax(scores)\n\n if predefined_graph_1 is not None:\n attn_masked = attn[:, -1] * predefined_graph_1\n attn_masked = attn_masked / (torch.sum(attn_masked, 2).unsqueeze(2) + 1e-9)\n\n attn = torch.cat([attn[:, :-1], attn_masked.unsqueeze(1)], 1)\n\n drop_attn = self.dropout(attn)\n if self.use_final_linear:\n context = unshape(torch.matmul(drop_attn, value))\n output = self.final_linear(context)\n return output\n else:\n context = torch.matmul(drop_attn, value)\n return context\n\n\nclass DecoderState(object):\n \"\"\"Interface for grouping together the current state of a recurrent\n decoder. In the simplest case just represents the hidden state of\n the model. But can also be used for implementing various forms of\n input_feeding and non-recurrent models.\n\n Modules need to implement this to utilize beam search decoding.\n \"\"\"\n\n def detach(self):\n \"\"\"Need to document this\"\"\"\n self.hidden = tuple([_.detach() for _ in self.hidden])\n self.input_feed = self.input_feed.detach()\n\n def beam_update(self, idx, positions, beam_size):\n \"\"\"Need to document this\"\"\"\n for e in self._all:\n sizes = e.size()\n br = sizes[1]\n if len(sizes) == 3:\n sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2])[:, :, idx]\n else:\n sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2], sizes[3])[:, :, idx]\n\n sent_states.data.copy_(sent_states.data.index_select(1, positions))\n\n def map_batch_fn(self, fn):\n raise NotImplementedError()\n\n\nclass TransformerDecoderState(DecoderState):\n \"\"\"Transformer Decoder state base class\"\"\"\n\n def __init__(self, src):\n \"\"\"\n Args:\n src (FloatTensor): a sequence of source words tensors\n with optional feature tensors, of size (len x batch).\n \"\"\"\n self.src = src\n self.previous_input = None\n self.previous_layer_inputs = None\n self.cache = None\n\n @property\n def _all(self):\n \"\"\"\n Contains attributes that need to be updated in self.beam_update().\n \"\"\"\n if self.previous_input is not None and self.previous_layer_inputs is not None:\n return (self.previous_input, self.previous_layer_inputs, self.src)\n else:\n return (self.src,)\n\n def detach(self):\n if self.previous_input is not None:\n self.previous_input = self.previous_input.detach()\n if self.previous_layer_inputs is not None:\n self.previous_layer_inputs = self.previous_layer_inputs.detach()\n self.src = self.src.detach()\n\n def update_state(self, new_input, previous_layer_inputs):\n state = TransformerDecoderState(self.src)\n state.previous_input = new_input\n state.previous_layer_inputs = previous_layer_inputs\n return state\n\n def _init_cache(self, memory_bank, num_layers):\n self.cache = {}\n\n for l in range(num_layers):\n layer_cache = {\"memory_keys\": None, \"memory_values\": None}\n layer_cache[\"self_keys\"] = None\n layer_cache[\"self_values\"] = None\n self.cache[\"layer_{}\".format(l)] = layer_cache\n\n def repeat_beam_size_times(self, beam_size):\n \"\"\"Repeat beam_size times along batch dimension.\"\"\"\n self.src = self.src.data.repeat(1, beam_size, 1)\n\n def map_batch_fn(self, fn):\n def _recursive_map(struct, batch_dim=0):\n for k, v in struct.items():\n if v is not None:\n if isinstance(v, dict):\n _recursive_map(v)\n else:\n struct[k] = fn(v, batch_dim)\n\n self.src = fn(self.src, 0)\n if self.cache is not None:\n _recursive_map(self.cache)\n\n\ndef gelu(x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\n\nclass PositionwiseFeedForward(nn.Module):\n \"\"\"A two-layer Feed-Forward-Network with residual layer norm.\n\n Args:\n d_model (int): the size of input for the first-layer of the FFN.\n d_ff (int): the hidden layer size of the second-layer\n of the FNN.\n dropout (float): dropout probability in :math:`[0, 1)`.\n \"\"\"\n\n def __init__(self, d_model, d_ff, dropout=0.1):\n super().__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n self.actv = gelu\n self.dropout_1 = nn.Dropout(dropout)\n self.dropout_2 = nn.Dropout(dropout)\n\n def forward(self, x):\n inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))\n output = self.dropout_2(self.w_2(inter))\n return output + x\n\n\n#\n# TRANSLATOR\n# The following code is used to generate summaries using the\n# pre-trained weights and beam search.\n#\n\n\ndef build_predictor(args, tokenizer, symbols, model, logger=None):\n # we should be able to refactor the global scorer a lot\n scorer = GNMTGlobalScorer(args.alpha, length_penalty=\"wu\")\n translator = Translator(args, model, tokenizer, symbols, global_scorer=scorer, logger=logger)\n return translator\n\n\nclass GNMTGlobalScorer(object):\n \"\"\"\n NMT re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`\n\n Args:\n alpha (float): length parameter\n beta (float): coverage parameter\n \"\"\"\n\n def __init__(self, alpha, length_penalty):\n self.alpha = alpha\n penalty_builder = PenaltyBuilder(length_penalty)\n self.length_penalty = penalty_builder.length_penalty()\n\n def score(self, beam, logprobs):\n \"\"\"\n Rescores a prediction based on penalty functions\n \"\"\"\n normalized_probs = self.length_penalty(beam, logprobs, self.alpha)\n return normalized_probs\n\n\nclass PenaltyBuilder(object):\n \"\"\"\n Returns the Length and Coverage Penalty function for Beam Search.\n\n Args:\n length_pen (str): option name of length pen\n cov_pen (str): option name of cov pen\n \"\"\"\n\n def __init__(self, length_pen):\n self.length_pen = length_pen\n\n def length_penalty(self):\n if self.length_pen == \"wu\":\n return self.length_wu\n elif self.length_pen == \"avg\":\n return self.length_average\n else:\n return self.length_none\n\n \"\"\"\n Below are all the different penalty terms implemented so far\n \"\"\"\n\n def length_wu(self, beam, logprobs, alpha=0.0):\n \"\"\"\n NMT length re-ranking score from\n \"Google's Neural Machine Translation System\" :cite:`wu2016google`.\n \"\"\"\n\n modifier = ((5 + len(beam.next_ys)) ** alpha) / ((5 + 1) ** alpha)\n return logprobs / modifier\n\n def length_average(self, beam, logprobs, alpha=0.0):\n \"\"\"\n Returns the average probability of tokens in a sequence.\n \"\"\"\n return logprobs / len(beam.next_ys)\n\n def length_none(self, beam, logprobs, alpha=0.0, beta=0.0):\n \"\"\"\n Returns unmodified scores.\n \"\"\"\n return logprobs\n\n\nclass Translator(object):\n \"\"\"\n Uses a model to translate a batch of sentences.\n\n Args:\n model (:obj:`onmt.modules.NMTModel`):\n NMT model to use for translation\n fields (dict of Fields): data fields\n beam_size (int): size of beam to use\n n_best (int): number of translations produced\n max_length (int): maximum length output to produce\n global_scores (:obj:`GlobalScorer`):\n object to rescore final translations\n copy_attn (bool): use copy attention during translation\n beam_trace (bool): trace beam search for debugging\n logger(logging.Logger): logger.\n \"\"\"\n\n def __init__(self, args, model, vocab, symbols, global_scorer=None, logger=None):\n self.logger = logger\n\n self.args = args\n self.model = model\n self.generator = self.model.generator\n self.vocab = vocab\n self.symbols = symbols\n self.start_token = symbols[\"BOS\"]\n self.end_token = symbols[\"EOS\"]\n\n self.global_scorer = global_scorer\n self.beam_size = args.beam_size\n self.min_length = args.min_length\n self.max_length = args.max_length\n\n def translate(self, batch, step, attn_debug=False):\n \"\"\"Generates summaries from one batch of data.\"\"\"\n self.model.eval()\n with torch.no_grad():\n batch_data = self.translate_batch(batch)\n translations = self.from_batch(batch_data)\n return translations\n\n def translate_batch(self, batch, fast=False):\n \"\"\"\n Translate a batch of sentences.\n\n Mostly a wrapper around :obj:`Beam`.\n\n Args:\n batch (:obj:`Batch`): a batch from a dataset object\n fast (bool): enables fast beam search (may not support all features)\n \"\"\"\n with torch.no_grad():\n return self._fast_translate_batch(batch, self.max_length, min_length=self.min_length)\n\n # Where the beam search lives\n # I have no idea why it is being called from the method above\n def _fast_translate_batch(self, batch, max_length, min_length=0):\n \"\"\"Beam Search using the encoder inputs contained in `batch`.\"\"\"\n\n # The batch object is funny\n # Instead of just looking at the size of the arguments we encapsulate\n # a size argument.\n # Where is it defined?\n beam_size = self.beam_size\n batch_size = batch.batch_size\n src = batch.src\n segs = batch.segs\n mask_src = batch.mask_src\n\n src_features = self.model.bert(src, segs, mask_src)\n dec_states = self.model.decoder.init_decoder_state(src, src_features, with_cache=True)\n device = src_features.device\n\n # Tile states and memory beam_size times.\n dec_states.map_batch_fn(lambda state, dim: tile(state, beam_size, dim=dim))\n src_features = tile(src_features, beam_size, dim=0)\n batch_offset = torch.arange(batch_size, dtype=torch.long, device=device)\n beam_offset = torch.arange(0, batch_size * beam_size, step=beam_size, dtype=torch.long, device=device)\n alive_seq = torch.full([batch_size * beam_size, 1], self.start_token, dtype=torch.long, device=device)\n\n # Give full probability to the first beam on the first step.\n topk_log_probs = torch.tensor([0.0] + [float(\"-inf\")] * (beam_size - 1), device=device).repeat(batch_size)\n\n # Structure that holds finished hypotheses.\n hypotheses = [[] for _ in range(batch_size)] # noqa: F812\n\n results = {}\n results[\"predictions\"] = [[] for _ in range(batch_size)] # noqa: F812\n results[\"scores\"] = [[] for _ in range(batch_size)] # noqa: F812\n results[\"gold_score\"] = [0] * batch_size\n results[\"batch\"] = batch\n\n for step in range(max_length):\n decoder_input = alive_seq[:, -1].view(1, -1)\n\n # Decoder forward.\n decoder_input = decoder_input.transpose(0, 1)\n\n dec_out, dec_states = self.model.decoder(decoder_input, src_features, dec_states, step=step)\n\n # Generator forward.\n log_probs = self.generator(dec_out.transpose(0, 1).squeeze(0))\n vocab_size = log_probs.size(-1)\n\n if step < min_length:\n log_probs[:, self.end_token] = -1e20\n\n # Multiply probs by the beam probability.\n log_probs += topk_log_probs.view(-1).unsqueeze(1)\n\n alpha = self.global_scorer.alpha\n length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha\n\n # Flatten probs into a list of possibilities.\n curr_scores = log_probs / length_penalty\n\n if self.args.block_trigram:\n cur_len = alive_seq.size(1)\n if cur_len > 3:\n for i in range(alive_seq.size(0)):\n fail = False\n words = [int(w) for w in alive_seq[i]]\n words = [self.vocab.ids_to_tokens[w] for w in words]\n words = \" \".join(words).replace(\" ##\", \"\").split()\n if len(words) <= 3:\n continue\n trigrams = [(words[i - 1], words[i], words[i + 1]) for i in range(1, len(words) - 1)]\n trigram = tuple(trigrams[-1])\n if trigram in trigrams[:-1]:\n fail = True\n if fail:\n curr_scores[i] = -10e20\n\n curr_scores = curr_scores.reshape(-1, beam_size * vocab_size)\n topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1)\n\n # Recover log probs.\n topk_log_probs = topk_scores * length_penalty\n\n # Resolve beam origin and true word ids.\n topk_beam_index = topk_ids.div(vocab_size)\n topk_ids = topk_ids.fmod(vocab_size)\n\n # Map beam_index to batch_index in the flat representation.\n batch_index = topk_beam_index + beam_offset[: topk_beam_index.size(0)].unsqueeze(1)\n select_indices = batch_index.view(-1)\n\n # Append last prediction.\n alive_seq = torch.cat([alive_seq.index_select(0, select_indices), topk_ids.view(-1, 1)], -1)\n\n is_finished = topk_ids.eq(self.end_token)\n if step + 1 == max_length:\n is_finished.fill_(1)\n # End condition is top beam is finished.\n end_condition = is_finished[:, 0].eq(1)\n # Save finished hypotheses.\n if is_finished.any():\n predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1))\n for i in range(is_finished.size(0)):\n b = batch_offset[i]\n if end_condition[i]:\n is_finished[i].fill_(1)\n finished_hyp = is_finished[i].nonzero().view(-1)\n # Store finished hypotheses for this batch.\n for j in finished_hyp:\n hypotheses[b].append((topk_scores[i, j], predictions[i, j, 1:]))\n # If the batch reached the end, save the n_best hypotheses.\n if end_condition[i]:\n best_hyp = sorted(hypotheses[b], key=lambda x: x[0], reverse=True)\n score, pred = best_hyp[0]\n\n results[\"scores\"][b].append(score)\n results[\"predictions\"][b].append(pred)\n non_finished = end_condition.eq(0).nonzero().view(-1)\n # If all sentences are translated, no need to go further.\n if len(non_finished) == 0:\n break\n # Remove finished batches for the next step.\n topk_log_probs = topk_log_probs.index_select(0, non_finished)\n batch_index = batch_index.index_select(0, non_finished)\n batch_offset = batch_offset.index_select(0, non_finished)\n alive_seq = predictions.index_select(0, non_finished).view(-1, alive_seq.size(-1))\n # Reorder states.\n select_indices = batch_index.view(-1)\n src_features = src_features.index_select(0, select_indices)\n dec_states.map_batch_fn(lambda state, dim: state.index_select(dim, select_indices))\n\n return results\n\n def from_batch(self, translation_batch):\n batch = translation_batch[\"batch\"]\n assert len(translation_batch[\"gold_score\"]) == len(translation_batch[\"predictions\"])\n batch_size = batch.batch_size\n\n preds, _, _, tgt_str, src = (\n translation_batch[\"predictions\"],\n translation_batch[\"scores\"],\n translation_batch[\"gold_score\"],\n batch.tgt_str,\n batch.src,\n )\n\n translations = []\n for b in range(batch_size):\n pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]])\n pred_sents = \" \".join(pred_sents).replace(\" ##\", \"\")\n gold_sent = \" \".join(tgt_str[b].split())\n raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500]\n raw_src = \" \".join(raw_src)\n translation = (pred_sents, gold_sent, raw_src)\n translations.append(translation)\n\n return translations\n\n\ndef tile(x, count, dim=0):\n \"\"\"\n Tiles x on dimension dim count times.\n \"\"\"\n perm = list(range(len(x.size())))\n if dim != 0:\n perm[0], perm[dim] = perm[dim], perm[0]\n x = x.permute(perm).contiguous()\n out_size = list(x.size())\n out_size[0] *= count\n batch = x.size(0)\n x = x.view(batch, -1).transpose(0, 1).repeat(count, 1).transpose(0, 1).contiguous().view(*out_size)\n if dim != 0:\n x = x.permute(perm).contiguous()\n return x\n\n\n#\n# Optimizer for training. We keep this here in case we want to add\n# a finetuning script.\n#\n\n\nclass BertSumOptimizer(object):\n \"\"\"Specific optimizer for BertSum.\n\n As described in [1], the authors fine-tune BertSum for abstractive\n summarization using two Adam Optimizers with different warm-up steps and\n learning rate. They also use a custom learning rate scheduler.\n\n [1] Liu, Yang, and Mirella Lapata. \"Text summarization with pretrained encoders.\"\n arXiv preprint arXiv:1908.08345 (2019).\n \"\"\"\n\n def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8):\n self.encoder = model.encoder\n self.decoder = model.decoder\n self.lr = lr\n self.warmup_steps = warmup_steps\n\n self.optimizers = {\n \"encoder\": torch.optim.Adam(\n model.encoder.parameters(),\n lr=lr[\"encoder\"],\n betas=(beta_1, beta_2),\n eps=eps,\n ),\n \"decoder\": torch.optim.Adam(\n model.decoder.parameters(),\n lr=lr[\"decoder\"],\n betas=(beta_1, beta_2),\n eps=eps,\n ),\n }\n\n self._step = 0\n self.current_learning_rates = {}\n\n def _update_rate(self, stack):\n return self.lr[stack] * min(self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-1.5))\n\n def zero_grad(self):\n self.optimizer_decoder.zero_grad()\n self.optimizer_encoder.zero_grad()\n\n def step(self):\n self._step += 1\n for stack, optimizer in self.optimizers.items():\n new_rate = self._update_rate(stack)\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = new_rate\n optimizer.step()\n self.current_learning_rates[stack] = new_rate\n", "output": ["build_predictor", "gelu", "tile", "TransformerDecoderLayer", "Translator", "TransformerDecoderState", "PositionalEncoding", "PenaltyBuilder", "BertSumOptimizer", "BertAbs", "TransformerDecoder", "PositionwiseFeedForward", "BertAbsPreTrainedModel", "GNMTGlobalScorer", "MultiHeadedAttention", "Bert", "DecoderState"], "metadata": {"file_path": "transformers-main/examples/research_projects/bertabs/modeling_bertabs.py", "file_length": 11928, "symbol_dict": [{"symbol": "build_predictor", "type": "mannual_defined_function", "byte_location": 24407, "location": 7662}, {"symbol": "gelu", "type": "mannual_defined_function", "byte_location": 23309, "location": 7243}, {"symbol": "tile", "type": "mannual_defined_function", "byte_location": 35929, "location": 11147}, {"symbol": "GNMTGlobalScorer", "type": "mannual_defined_class", "byte_location": 24719, "location": 7759}, {"symbol": "TransformerDecoder", "type": "mannual_defined_class", "byte_location": 5512, "location": 1797}, {"symbol": "DecoderState", "type": "mannual_defined_class", "byte_location": 19961, "location": 6255}, {"symbol": "MultiHeadedAttention", "type": "mannual_defined_class", "byte_location": 13997, "location": 4504}, {"symbol": "PositionwiseFeedForward", "type": "mannual_defined_class", "byte_location": 23421, "location": 7305}, {"symbol": "TransformerDecoderLayer", "type": "mannual_defined_class", "byte_location": 10561, "location": 3413}, {"symbol": "TransformerDecoderState", "type": "mannual_defined_class", "byte_location": 21074, "location": 6589}, {"symbol": "Translator", "type": "mannual_defined_class", "byte_location": 26636, "location": 8341}, {"symbol": "Bert", "type": "mannual_defined_class", "byte_location": 4930, "location": 1625}, {"symbol": "PenaltyBuilder", "type": "mannual_defined_class", "byte_location": 25381, "location": 7957}, {"symbol": "BertSumOptimizer", "type": "mannual_defined_class", "byte_location": 36508, "location": 11373}, {"symbol": "BertAbsPreTrainedModel", "type": "mannual_defined_class", "byte_location": 1494, "location": 497}, {"symbol": "PositionalEncoding", "type": "mannual_defined_class", "byte_location": 9649, "location": 3067}, {"symbol": "BertAbs", "type": "mannual_defined_class", "byte_location": 1635, "location": 545}]}} {"input": "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Masked Version of BERT. It replaces the `torch.nn.Linear` layers with\n:class:`~emmental.MaskedLinear` and add an additional parameters in the forward pass to\ncompute the adaptive mask.\nBuilt on top of `transformers.models.bert.modeling_bert`\"\"\"\n\n\nimport logging\nimport math\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom emmental import MaskedBertConfig\nfrom emmental.modules import MaskedLinear\nfrom transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward\nfrom transformers.modeling_utils import PreTrainedModel, prune_linear_layer\nfrom transformers.models.bert.modeling_bert import ACT2FN, load_tf_weights_in_bert\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention heads (%d)\"\n % (config.hidden_size, config.num_attention_heads)\n )\n self.output_attentions = config.output_attentions\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = MaskedLinear(\n config.hidden_size,\n self.all_head_size,\n pruning_method=config.pruning_method,\n mask_init=config.mask_init,\n mask_scale=config.mask_scale,\n )\n self.key = MaskedLinear(\n config.hidden_size,\n self.all_head_size,\n pruning_method=config.pruning_method,\n mask_init=config.mask_init,\n mask_scale=config.mask_scale,\n )\n self.value = MaskedLinear(\n config.hidden_size,\n self.all_head_size,\n pruning_method=config.pruning_method,\n mask_init=config.mask_init,\n mask_scale=config.mask_scale,\n )\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n threshold=None,\n ):\n mixed_query_layer = self.query(hidden_states, threshold=threshold)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n if encoder_hidden_states is not None:\n mixed_key_layer = self.key(encoder_hidden_states, threshold=threshold)\n mixed_value_layer = self.value(encoder_hidden_states, threshold=threshold)\n attention_mask = encoder_attention_mask\n else:\n mixed_key_layer = self.key(hidden_states, threshold=threshold)\n mixed_value_layer = self.value(hidden_states, threshold=threshold)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)\n return outputs\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = MaskedLinear(\n config.hidden_size,\n config.hidden_size,\n pruning_method=config.pruning_method,\n mask_init=config.mask_init,\n mask_scale=config.mask_scale,\n )\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor, threshold):\n hidden_states = self.dense(hidden_states, threshold=threshold)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)\n heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads\n for head in heads:\n # Compute how many pruned heads are before the head and move the index accordingly\n head = head - sum(1 if h < head else 0 for h in self.pruned_heads)\n mask[head] = 0\n mask = mask.view(-1).contiguous().eq(1)\n index = torch.arange(len(mask))[mask].long()\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n threshold=None,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n threshold=threshold,\n )\n attention_output = self.output(self_outputs[0], hidden_states, threshold=threshold)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = MaskedLinear(\n config.hidden_size,\n config.intermediate_size,\n pruning_method=config.pruning_method,\n mask_init=config.mask_init,\n mask_scale=config.mask_scale,\n )\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states, threshold):\n hidden_states = self.dense(hidden_states, threshold=threshold)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = MaskedLinear(\n config.intermediate_size,\n config.hidden_size,\n pruning_method=config.pruning_method,\n mask_init=config.mask_init,\n mask_scale=config.mask_scale,\n )\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor, threshold):\n hidden_states = self.dense(hidden_states, threshold=threshold)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = BertAttention(config)\n self.is_decoder = config.is_decoder\n if self.is_decoder:\n self.crossattention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n threshold=None,\n ):\n self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, threshold=threshold)\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n if self.is_decoder and encoder_hidden_states is not None:\n cross_attention_outputs = self.crossattention(\n attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights\n\n intermediate_output = self.intermediate(attention_output, threshold=threshold)\n layer_output = self.output(intermediate_output, attention_output, threshold=threshold)\n outputs = (layer_output,) + outputs\n return outputs\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n threshold=None,\n ):\n all_hidden_states = ()\n all_attentions = ()\n for i, layer_module in enumerate(self.layer):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n threshold=threshold,\n )\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (all hidden states), (all attentions)\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass MaskedBertPreTrainedModel(PreTrainedModel):\n \"\"\"An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = MaskedBertConfig\n load_tf_weights = load_tf_weights_in_bert\n base_model_prefix = \"bert\"\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nMASKED_BERT_START_DOCSTRING = r\"\"\"\n This model is a PyTorch `torch.nn.Module `_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~emmental.MaskedBertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nMASKED_BERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.BertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.__call__` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n if the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask\n is used in the cross-attention if the model is configured as a decoder.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Masked Bert Model transformer outputting raw hidden-states without any specific head on top.\",\n MASKED_BERT_START_DOCSTRING,\n)\nclass MaskedBertModel(MaskedBertPreTrainedModel):\n \"\"\"\n The `MaskedBertModel` class replicates the :class:`~transformers.BertModel` class\n and adds specific inputs to compute the adaptive mask on the fly.\n Note that we freeze the embeddings modules from their pre-trained values.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n\n self.embeddings = BertEmbeddings(config)\n self.embeddings.requires_grad_(requires_grad=False)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n threshold=None,\n ):\n r\"\"\"\n threshold (:obj:`float`):\n Threshold value (see :class:`~emmental.MaskedLinear`).\n\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during pre-training.\n\n This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n # Provided a padding mask of dimensions [batch_size, seq_length]\n # - if the model is a decoder, apply a causal mask in addition to the padding mask\n # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder:\n batch_size, seq_length = input_shape\n seq_ids = torch.arange(seq_length, device=device)\n causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]\n causal_mask = causal_mask.to(\n attention_mask.dtype\n ) # causal and attention masks must have same type with pytorch version < 1.3\n extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]\n else:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(\n \"Wrong shape for input_ids (shape {}) or attention_mask (shape {})\".format(\n input_shape, attention_mask.shape\n )\n )\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n # If a 2D ou 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n\n if encoder_attention_mask.dim() == 3:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]\n elif encoder_attention_mask.dim() == 2:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]\n else:\n raise ValueError(\n \"Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})\".format(\n encoder_hidden_shape, encoder_attention_mask.shape\n )\n )\n\n encoder_extended_attention_mask = encoder_extended_attention_mask.to(\n dtype=next(self.parameters()).dtype\n ) # fp16 compatibility\n encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = (\n head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n ) # We can specify head_mask for each layer\n head_mask = head_mask.to(\n dtype=next(self.parameters()).dtype\n ) # switch to float if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n embedding_output = self.embeddings(\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n threshold=threshold,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n\n outputs = (\n sequence_output,\n pooled_output,\n ) + encoder_outputs[1:] # add hidden_states and attentions if they are here\n return outputs # sequence_output, pooled_output, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Masked Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n MASKED_BERT_START_DOCSTRING,\n)\nclass MaskedBertForSequenceClassification(MaskedBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = MaskedBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n threshold=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n threshold (:obj:`float`):\n Threshold value (see :class:`~emmental.MaskedLinear`).\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n threshold=threshold,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Masked Bert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n MASKED_BERT_START_DOCSTRING,\n)\nclass MaskedBertForMultipleChoice(MaskedBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = MaskedBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n threshold=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n threshold (:obj:`float`):\n Threshold value (see :class:`~emmental.MaskedLinear`).\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):\n Classification loss.\n classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):\n `num_choices` is the second dimension of the input tensors. (see `input_ids` above).\n\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n \"\"\"\n num_choices = input_ids.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1))\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n threshold=threshold,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n outputs = (loss,) + outputs\n\n return outputs # (loss), reshaped_logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Masked Bert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n MASKED_BERT_START_DOCSTRING,\n)\nclass MaskedBertForTokenClassification(MaskedBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = MaskedBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n threshold=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n threshold (:obj:`float`):\n Threshold value (see :class:`~emmental.MaskedLinear`).\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :\n Classification loss.\n scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n threshold=threshold,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), scores, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"Masked Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n MASKED_BERT_START_DOCSTRING,\n)\nclass MaskedBertForQuestionAnswering(MaskedBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = MaskedBertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n threshold=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n threshold (:obj:`float`):\n Threshold value (see :class:`~emmental.MaskedLinear`).\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-start scores (before SoftMax).\n end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-end scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n threshold=threshold,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n outputs = (\n start_logits,\n end_logits,\n ) + outputs[2:]\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)\n", "output": ["BertIntermediate", "BertOutput", "MaskedBertModel", "BertAttention", "BertEncoder", "BertLayer", "BertPooler", "BertSelfAttention", "MaskedBertPreTrainedModel", "BertSelfOutput", "MaskedBertForMultipleChoice", "MaskedBertForQuestionAnswering", "MaskedBertForSequenceClassification", "BertEmbeddings", "MaskedBertForTokenClassification"], "metadata": {"file_path": "transformers-main/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py", "file_length": 13707, "symbol_dict": [{"symbol": "BertLayer", "type": "mannual_defined_class", "byte_location": 11863, "location": 3641}, {"symbol": "BertEncoder", "type": "mannual_defined_class", "byte_location": 13366, "location": 4058}, {"symbol": "BertSelfOutput", "type": "mannual_defined_class", "byte_location": 7521, "location": 2311}, {"symbol": "MaskedBertForSequenceClassification", "type": "mannual_defined_class", "byte_location": 30284, "location": 8918}, {"symbol": "MaskedBertPreTrainedModel", "type": "mannual_defined_class", "byte_location": 15503, "location": 4678}, {"symbol": "BertOutput", "type": "mannual_defined_class", "byte_location": 11094, "location": 3409}, {"symbol": "MaskedBertModel", "type": "mannual_defined_class", "byte_location": 20384, "location": 6077}, {"symbol": "MaskedBertForTokenClassification", "type": "mannual_defined_class", "byte_location": 38306, "location": 11239}, {"symbol": "BertIntermediate", "type": "mannual_defined_class", "byte_location": 10354, "location": 3193}, {"symbol": "BertAttention", "type": "mannual_defined_class", "byte_location": 8288, "location": 2543}, {"symbol": "MaskedBertForMultipleChoice", "type": "mannual_defined_class", "byte_location": 34231, "location": 10053}, {"symbol": "MaskedBertForQuestionAnswering", "type": "mannual_defined_class", "byte_location": 42300, "location": 12360}, {"symbol": "BertPooler", "type": "mannual_defined_class", "byte_location": 14971, "location": 4513}, {"symbol": "BertEmbeddings", "type": "mannual_defined_class", "byte_location": 1445, "location": 448}, {"symbol": "BertSelfAttention", "type": "mannual_defined_class", "byte_location": 3331, "location": 1053}]}} {"input": "# This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp\n\n# Copyright 2020 The HuggingFace Team and the AllenNLP authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUtilities for working with the local dataset cache.\n\"\"\"\n\nimport copy\nimport csv\nimport linecache\nimport os\nimport platform\nimport sys\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict, namedtuple\nfrom datetime import datetime\nfrom multiprocessing import Pipe, Process, Queue\nfrom multiprocessing.connection import Connection\nfrom typing import Callable, Iterable, List, NamedTuple, Optional, Union\n\nfrom .. import AutoConfig, PretrainedConfig\nfrom .. import __version__ as version\nfrom ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging\nfrom .benchmark_args_utils import BenchmarkArguments\n\n\nif is_torch_available():\n from torch.cuda import empty_cache as torch_empty_cache\n\nif is_tf_available():\n from tensorflow.python.eager import context as tf_context\n\nif is_psutil_available():\n import psutil\n\nif is_py3nvml_available():\n import py3nvml.py3nvml as nvml\n\nif platform.system() == \"Windows\":\n from signal import CTRL_C_EVENT as SIGKILL\nelse:\n from signal import SIGKILL\n\n\nlogger = logging.get_logger(__name__) # pylint: disable=invalid-name\n\n\n_is_memory_tracing_enabled = False\n\nBenchmarkOutput = namedtuple(\n \"BenchmarkOutput\",\n [\n \"time_inference_result\",\n \"memory_inference_result\",\n \"time_train_result\",\n \"memory_train_result\",\n \"inference_summary\",\n \"train_summary\",\n ],\n)\n\n\ndef separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]:\n \"\"\"\n This function wraps another function into its own separated process. In order to ensure accurate memory\n measurements it is important that the function is executed in a separate process\n\n Args:\n - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process\n - `do_multi_processing`: (`bool`) Whether to run function on separate process or not\n \"\"\"\n\n def multi_process_func(*args, **kwargs):\n # run function in an individual\n # process to get correct memory\n def wrapper_func(queue: Queue, *args):\n try:\n result = func(*args)\n except Exception as e:\n logger.error(e)\n print(e)\n result = \"N/A\"\n queue.put(result)\n\n queue = Queue()\n p = Process(target=wrapper_func, args=[queue] + list(args))\n p.start()\n result = queue.get()\n p.join()\n return result\n\n if do_multi_processing:\n logger.info(f\"Function {func} is executed in its own process...\")\n return multi_process_func\n else:\n return func\n\n\ndef is_memory_tracing_enabled():\n global _is_memory_tracing_enabled\n return _is_memory_tracing_enabled\n\n\nclass Frame(NamedTuple):\n \"\"\"\n `Frame` is a NamedTuple used to gather the current frame state. `Frame` has the following fields:\n\n - 'filename' (string): Name of the file currently executed\n - 'module' (string): Name of the module currently executed\n - 'line_number' (int): Number of the line currently executed\n - 'event' (string): Event that triggered the tracing (default will be \"line\")\n - 'line_text' (string): Text of the line in the python script\n \"\"\"\n\n filename: str\n module: str\n line_number: int\n event: str\n line_text: str\n\n\nclass UsedMemoryState(NamedTuple):\n \"\"\"\n `UsedMemoryState` are named tuples with the following fields:\n\n - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file,\n location in current file)\n - 'cpu_memory': CPU RSS memory state *before* executing the line\n - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if\n provided)\n \"\"\"\n\n frame: Frame\n cpu_memory: int\n gpu_memory: int\n\n\nclass Memory(NamedTuple):\n \"\"\"\n `Memory` NamedTuple have a single field `bytes` and you can get a human readable str of the number of mega bytes by\n calling `__repr__`\n\n - `byte` (integer): number of bytes,\n \"\"\"\n\n bytes: int\n\n def __repr__(self) -> str:\n return str(bytes_to_mega_bytes(self.bytes))\n\n\nclass MemoryState(NamedTuple):\n \"\"\"\n `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:\n\n - `frame` (`Frame`): the current frame (see above)\n - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple\n - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple\n - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple\n \"\"\"\n\n frame: Frame\n cpu: Memory\n gpu: Memory\n cpu_gpu: Memory\n\n\nclass MemorySummary(NamedTuple):\n \"\"\"\n `MemorySummary` namedtuple otherwise with the fields:\n\n - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by\n subtracting the memory after executing each line from the memory before executing said line.\n - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line\n obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted\n from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory\n is released)\n - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with\n memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).\n \"\"\"\n\n sequential: List[MemoryState]\n cumulative: List[MemoryState]\n current: List[MemoryState]\n total: Memory\n\n\nMemoryTrace = List[UsedMemoryState]\n\n\ndef measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int:\n \"\"\"\n measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and\n at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package\n `memory_profiler`:\n https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239\n\n Args:\n - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure\n the peak memory\n\n - `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage\n\n - `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage\n\n Returns:\n\n - `max_memory`: (`int`) consumed memory peak in Bytes\n \"\"\"\n\n def get_cpu_memory(process_id: int) -> int:\n \"\"\"\n measures current cpu memory usage of a given `process_id`\n\n Args:\n - `process_id`: (`int`) process_id for which to measure memory\n\n Returns\n\n - `memory`: (`int`) consumed memory in Bytes\n \"\"\"\n process = psutil.Process(process_id)\n try:\n meminfo_attr = \"memory_info\" if hasattr(process, \"memory_info\") else \"get_memory_info\"\n memory = getattr(process, meminfo_attr)()[0]\n except psutil.AccessDenied:\n raise ValueError(\"Error with Psutil.\")\n return memory\n\n if not is_psutil_available():\n logger.warning(\n \"Psutil not installed, we won't log CPU memory usage. \"\n \"Install Psutil (pip install psutil) to use CPU memory tracing.\"\n )\n max_memory = \"N/A\"\n else:\n\n class MemoryMeasureProcess(Process):\n\n \"\"\"\n `MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the\n memory usage of a process\n \"\"\"\n\n def __init__(self, process_id: int, child_connection: Connection, interval: float):\n super().__init__()\n self.process_id = process_id\n self.interval = interval\n self.connection = child_connection\n self.num_measurements = 1\n self.mem_usage = get_cpu_memory(self.process_id)\n\n def run(self):\n self.connection.send(0)\n stop = False\n while True:\n self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id))\n self.num_measurements += 1\n\n if stop:\n break\n\n stop = self.connection.poll(self.interval)\n\n # send results to parent pipe\n self.connection.send(self.mem_usage)\n self.connection.send(self.num_measurements)\n\n while True:\n # create child, parent connection\n child_connection, parent_connection = Pipe()\n\n # instantiate process\n mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval)\n mem_process.start()\n\n # wait until we get memory\n parent_connection.recv()\n\n try:\n # execute function\n function()\n\n # start parent connection\n parent_connection.send(0)\n\n # receive memory and num measurements\n max_memory = parent_connection.recv()\n num_measurements = parent_connection.recv()\n except Exception:\n # kill process in a clean way\n parent = psutil.Process(os.getpid())\n for child in parent.children(recursive=True):\n os.kill(child.pid, SIGKILL)\n mem_process.join(0)\n raise RuntimeError(\"Process killed. Error in Process\")\n\n # run process at least 20 * interval or until it finishes\n mem_process.join(20 * interval)\n\n if (num_measurements > 4) or (interval < 1e-6):\n break\n\n # reduce interval\n interval /= 10\n\n return max_memory\n\n\ndef start_memory_tracing(\n modules_to_trace: Optional[Union[str, Iterable[str]]] = None,\n modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None,\n events_to_trace: str = \"line\",\n gpus_to_trace: Optional[List[int]] = None,\n) -> MemoryTrace:\n \"\"\"\n Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for\n usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory \"Resident\n Set Size\u201d (the non-swapped physical memory the process is using). See\n https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info\n\n Args:\n - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list\n of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or\n 'transformers.models.gpt2.modeling_gpt2')\n - `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list\n of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch')\n - `events_to_trace`: string or list of string of events to be recorded (see official python doc for\n `sys.settrace` for the list of events) default to line\n - `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs\n\n Return:\n\n - `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script).\n\n - `UsedMemoryState` are named tuples with the following fields:\n\n - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current\n file, location in current file)\n - 'cpu_memory': CPU RSS memory state *before* executing the line\n - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only\n `gpus_to_trace` if provided)\n\n `Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following\n fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module\n currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that\n triggered the tracing (default will be \"line\") - 'line_text' (string): Text of the line in the python script\n\n \"\"\"\n if is_psutil_available():\n process = psutil.Process(os.getpid())\n else:\n logger.warning(\n \"Psutil not installed, we won't log CPU memory usage. \"\n \"Install psutil (pip install psutil) to use CPU memory tracing.\"\n )\n process = None\n\n if is_py3nvml_available():\n try:\n nvml.nvmlInit()\n devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace\n nvml.nvmlShutdown()\n except (OSError, nvml.NVMLError):\n logger.warning(\"Error while initializing communication with GPU. We won't perform GPU memory tracing.\")\n log_gpu = False\n else:\n log_gpu = is_torch_available() or is_tf_available()\n else:\n logger.warning(\n \"py3nvml not installed, we won't log GPU memory usage. \"\n \"Install py3nvml (pip install py3nvml) to use GPU memory tracing.\"\n )\n log_gpu = False\n\n memory_trace = []\n\n def traceit(frame, event, args):\n \"\"\"\n Tracing method executed before running each line in a module or sub-module Record memory allocated in a list\n with debugging information\n \"\"\"\n global _is_memory_tracing_enabled\n\n if not _is_memory_tracing_enabled:\n return traceit\n\n # Filter events\n if events_to_trace is not None:\n if isinstance(events_to_trace, str) and event != events_to_trace:\n return traceit\n elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace:\n return traceit\n\n if \"__name__\" not in frame.f_globals:\n return traceit\n\n # Filter modules\n name = frame.f_globals[\"__name__\"]\n if not isinstance(name, str):\n return traceit\n else:\n # Filter whitelist of modules to trace\n if modules_to_trace is not None:\n if isinstance(modules_to_trace, str) and modules_to_trace not in name:\n return traceit\n elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace):\n return traceit\n\n # Filter blacklist of modules not to trace\n if modules_not_to_trace is not None:\n if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name:\n return traceit\n elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace):\n return traceit\n\n # Record current tracing state (file, location in file...)\n lineno = frame.f_lineno\n filename = frame.f_globals[\"__file__\"]\n if filename.endswith(\".pyc\") or filename.endswith(\".pyo\"):\n filename = filename[:-1]\n line = linecache.getline(filename, lineno).rstrip()\n traced_state = Frame(filename, name, lineno, event, line)\n\n # Record current memory state (rss memory) and compute difference with previous memory state\n cpu_mem = 0\n if process is not None:\n mem = process.memory_info()\n cpu_mem = mem.rss\n\n gpu_mem = 0\n if log_gpu:\n # Clear GPU caches\n if is_torch_available():\n torch_empty_cache()\n if is_tf_available():\n tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802\n\n # Sum used memory for all GPUs\n nvml.nvmlInit()\n\n for i in devices:\n handle = nvml.nvmlDeviceGetHandleByIndex(i)\n meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)\n gpu_mem += meminfo.used\n\n nvml.nvmlShutdown()\n\n mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)\n memory_trace.append(mem_state)\n\n return traceit\n\n sys.settrace(traceit)\n\n global _is_memory_tracing_enabled\n _is_memory_tracing_enabled = True\n\n return memory_trace\n\n\ndef stop_memory_tracing(\n memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True\n) -> Optional[MemorySummary]:\n \"\"\"\n Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.\n\n Args:\n `memory_trace` (optional output of start_memory_tracing, default: None):\n memory trace to convert in summary\n `ignore_released_memory` (boolean, default: None):\n if True we only sum memory increase to compute total memory\n\n Return:\n\n - None if `memory_trace` is None\n - `MemorySummary` namedtuple otherwise with the fields:\n\n - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by\n subtracting the memory after executing each line from the memory before executing said line.\n - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each\n line obtained by summing repeated memory increase for a line if it's executed several times. The list is\n sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative\n if memory is released)\n - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with\n memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).\n\n `Memory` named tuple have fields\n\n - `byte` (integer): number of bytes,\n - `string` (string): same as human readable string (ex: \"3.5MB\")\n\n `Frame` are namedtuple used to list the current frame state and have the following fields:\n\n - 'filename' (string): Name of the file currently executed\n - 'module' (string): Name of the module currently executed\n - 'line_number' (int): Number of the line currently executed\n - 'event' (string): Event that triggered the tracing (default will be \"line\")\n - 'line_text' (string): Text of the line in the python script\n\n `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:\n\n - `frame` (`Frame`): the current frame (see above)\n - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple\n - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple\n - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple\n \"\"\"\n global _is_memory_tracing_enabled\n _is_memory_tracing_enabled = False\n\n if memory_trace is not None and len(memory_trace) > 1:\n memory_diff_trace = []\n memory_curr_trace = []\n\n cumulative_memory_dict = defaultdict(lambda: [0, 0, 0])\n\n for (\n (frame, cpu_mem, gpu_mem),\n (next_frame, next_cpu_mem, next_gpu_mem),\n ) in zip(memory_trace[:-1], memory_trace[1:]):\n cpu_mem_inc = next_cpu_mem - cpu_mem\n gpu_mem_inc = next_gpu_mem - gpu_mem\n cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc\n memory_diff_trace.append(\n MemoryState(\n frame=frame,\n cpu=Memory(cpu_mem_inc),\n gpu=Memory(gpu_mem_inc),\n cpu_gpu=Memory(cpu_gpu_mem_inc),\n )\n )\n\n memory_curr_trace.append(\n MemoryState(\n frame=frame,\n cpu=Memory(next_cpu_mem),\n gpu=Memory(next_gpu_mem),\n cpu_gpu=Memory(next_gpu_mem + next_cpu_mem),\n )\n )\n\n cumulative_memory_dict[frame][0] += cpu_mem_inc\n cumulative_memory_dict[frame][1] += gpu_mem_inc\n cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc\n\n cumulative_memory = sorted(\n cumulative_memory_dict.items(), key=lambda x: x[1][2], reverse=True\n ) # order by the total CPU + GPU memory increase\n cumulative_memory = [\n MemoryState(\n frame=frame,\n cpu=Memory(cpu_mem_inc),\n gpu=Memory(gpu_mem_inc),\n cpu_gpu=Memory(cpu_gpu_mem_inc),\n )\n for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory\n ]\n\n memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True)\n\n if ignore_released_memory:\n total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)\n else:\n total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)\n\n total_memory = Memory(total_memory)\n\n return MemorySummary(\n sequential=memory_diff_trace,\n cumulative=cumulative_memory,\n current=memory_curr_trace,\n total=total_memory,\n )\n\n return None\n\n\ndef bytes_to_mega_bytes(memory_amount: int) -> int:\n \"\"\"Utility to convert a number of bytes (int) into a number of mega bytes (int)\"\"\"\n return memory_amount >> 20\n\n\nclass Benchmark(ABC):\n \"\"\"\n Benchmarks is a simple but feature-complete benchmarking script to compare memory and time performance of models in\n Transformers.\n \"\"\"\n\n args: BenchmarkArguments\n configs: PretrainedConfig\n framework: str\n\n def __init__(self, args: BenchmarkArguments = None, configs: PretrainedConfig = None):\n self.args = args\n if configs is None:\n self.config_dict = {\n model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names\n }\n else:\n self.config_dict = dict(zip(self.args.model_names, configs))\n\n warnings.warn(\n f\"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils\"\n \" are deprecated in general and it is advised to use external Benchmarking libraries \"\n \" to benchmark Transformer models.\",\n FutureWarning,\n )\n\n if self.args.memory and os.getenv(\"TRANSFORMERS_USE_MULTIPROCESSING\") == 0:\n logger.warning(\n \"Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The\"\n \" flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing.\"\n )\n\n self._print_fn = None\n self._framework_version = None\n self._environment_info = None\n\n @property\n def print_fn(self):\n if self._print_fn is None:\n if self.args.log_print:\n\n def print_and_log(*args):\n with open(self.args.log_filename, \"a\") as log_file:\n log_file.write(\"\".join(args) + \"\\n\")\n print(*args)\n\n self._print_fn = print_and_log\n else:\n self._print_fn = print\n return self._print_fn\n\n @property\n @abstractmethod\n def framework_version(self):\n pass\n\n @abstractmethod\n def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:\n pass\n\n @abstractmethod\n def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:\n pass\n\n @abstractmethod\n def _inference_memory(\n self, model_name: str, batch_size: int, sequence_length: int\n ) -> [Memory, Optional[MemorySummary]]:\n pass\n\n @abstractmethod\n def _train_memory(\n self, model_name: str, batch_size: int, sequence_length: int\n ) -> [Memory, Optional[MemorySummary]]:\n pass\n\n def inference_speed(self, *args, **kwargs) -> float:\n return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs)\n\n def train_speed(self, *args, **kwargs) -> float:\n return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs)\n\n def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:\n return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs)\n\n def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:\n return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs)\n\n def run(self):\n result_dict = {model_name: {} for model_name in self.args.model_names}\n inference_result_time = copy.deepcopy(result_dict)\n inference_result_memory = copy.deepcopy(result_dict)\n train_result_time = copy.deepcopy(result_dict)\n train_result_memory = copy.deepcopy(result_dict)\n\n for c, model_name in enumerate(self.args.model_names):\n self.print_fn(f\"{c + 1} / {len(self.args.model_names)}\")\n\n model_dict = {\n \"bs\": self.args.batch_sizes,\n \"ss\": self.args.sequence_lengths,\n \"result\": {i: {} for i in self.args.batch_sizes},\n }\n inference_result_time[model_name] = copy.deepcopy(model_dict)\n inference_result_memory[model_name] = copy.deepcopy(model_dict)\n train_result_time[model_name] = copy.deepcopy(model_dict)\n train_result_memory[model_name] = copy.deepcopy(model_dict)\n\n inference_summary = train_summary = None\n\n for batch_size in self.args.batch_sizes:\n for sequence_length in self.args.sequence_lengths:\n if self.args.inference:\n if self.args.memory:\n memory, inference_summary = self.inference_memory(model_name, batch_size, sequence_length)\n inference_result_memory[model_name][\"result\"][batch_size][sequence_length] = memory\n if self.args.speed:\n time = self.inference_speed(model_name, batch_size, sequence_length)\n inference_result_time[model_name][\"result\"][batch_size][sequence_length] = time\n\n if self.args.training:\n if self.args.memory:\n memory, train_summary = self.train_memory(model_name, batch_size, sequence_length)\n train_result_memory[model_name][\"result\"][batch_size][sequence_length] = memory\n if self.args.speed:\n time = self.train_speed(model_name, batch_size, sequence_length)\n train_result_time[model_name][\"result\"][batch_size][sequence_length] = time\n\n if self.args.inference:\n if self.args.speed:\n self.print_fn(\"\\n\" + 20 * \"=\" + (\"INFERENCE - SPEED - RESULT\").center(40) + 20 * \"=\")\n self.print_results(inference_result_time, type_label=\"Time in s\")\n self.save_to_csv(inference_result_time, self.args.inference_time_csv_file)\n if self.args.is_tpu:\n self.print_fn(\n \"TPU was used for inference. Note that the time after compilation stabilized (after ~10\"\n \" inferences model.forward(..) calls) was measured.\"\n )\n\n if self.args.memory:\n self.print_fn(\"\\n\" + 20 * \"=\" + (\"INFERENCE - MEMORY - RESULT\").center(40) + 20 * \"=\")\n self.print_results(inference_result_memory, type_label=\"Memory in MB\")\n self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file)\n\n if self.args.trace_memory_line_by_line:\n self.print_fn(\"\\n\" + 20 * \"=\" + (\"INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY\").center(40) + 20 * \"=\")\n self.print_memory_trace_statistics(inference_summary)\n\n if self.args.training:\n if self.args.speed:\n self.print_fn(\"\\n\" + 20 * \"=\" + (\"TRAIN - SPEED - RESULTS\").center(40) + 20 * \"=\")\n self.print_results(train_result_time, \"Time in s\")\n self.save_to_csv(train_result_time, self.args.train_time_csv_file)\n if self.args.is_tpu:\n self.print_fn(\n \"TPU was used for training. Note that the time after compilation stabilized (after ~10 train\"\n \" loss=model.forward(...) + loss.backward() calls) was measured.\"\n )\n\n if self.args.memory:\n self.print_fn(\"\\n\" + 20 * \"=\" + (\"TRAIN - MEMORY - RESULTS\").center(40) + 20 * \"=\")\n self.print_results(train_result_memory, type_label=\"Memory in MB\")\n self.save_to_csv(train_result_memory, self.args.train_memory_csv_file)\n\n if self.args.trace_memory_line_by_line:\n self.print_fn(\"\\n\" + 20 * \"=\" + (\"TRAIN - MEMOMRY - LINE BY LINE - SUMMARY\").center(40) + 20 * \"=\")\n self.print_memory_trace_statistics(train_summary)\n\n if self.args.env_print:\n self.print_fn(\"\\n\" + 20 * \"=\" + (\"ENVIRONMENT INFORMATION\").center(40) + 20 * \"=\")\n self.print_fn(\"\\n\".join([f\"- {prop}: {val}\" for prop, val in self.environment_info.items()]) + \"\\n\")\n\n if self.args.save_to_csv:\n with open(self.args.env_info_csv_file, mode=\"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file)\n for key, value in self.environment_info.items():\n writer.writerow([key, value])\n\n return BenchmarkOutput(\n inference_result_time,\n inference_result_memory,\n train_result_time,\n train_result_memory,\n inference_summary,\n train_summary,\n )\n\n @property\n def environment_info(self):\n if self._environment_info is None:\n info = {}\n info[\"transformers_version\"] = version\n info[\"framework\"] = self.framework\n if self.framework == \"PyTorch\":\n info[\"use_torchscript\"] = self.args.torchscript\n if self.framework == \"TensorFlow\":\n info[\"eager_mode\"] = self.args.eager_mode\n info[\"use_xla\"] = self.args.use_xla\n info[\"framework_version\"] = self.framework_version\n info[\"python_version\"] = platform.python_version()\n info[\"system\"] = platform.system()\n info[\"cpu\"] = platform.processor()\n info[\"architecture\"] = platform.architecture()[0]\n info[\"date\"] = datetime.date(datetime.now())\n info[\"time\"] = datetime.time(datetime.now())\n info[\"fp16\"] = self.args.fp16\n info[\"use_multiprocessing\"] = self.args.do_multi_processing\n info[\"only_pretrain_model\"] = self.args.only_pretrain_model\n\n if is_psutil_available():\n info[\"cpu_ram_mb\"] = bytes_to_mega_bytes(psutil.virtual_memory().total)\n else:\n logger.warning(\n \"Psutil not installed, we won't log available CPU memory. \"\n \"Install psutil (pip install psutil) to log available CPU memory.\"\n )\n info[\"cpu_ram_mb\"] = \"N/A\"\n\n info[\"use_gpu\"] = self.args.is_gpu\n if self.args.is_gpu:\n info[\"num_gpus\"] = 1 # TODO(PVP) Currently only single GPU is supported\n if is_py3nvml_available():\n nvml.nvmlInit()\n handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)\n info[\"gpu\"] = nvml.nvmlDeviceGetName(handle)\n info[\"gpu_ram_mb\"] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total)\n info[\"gpu_power_watts\"] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000\n info[\"gpu_performance_state\"] = nvml.nvmlDeviceGetPerformanceState(handle)\n nvml.nvmlShutdown()\n else:\n logger.warning(\n \"py3nvml not installed, we won't log GPU memory usage. \"\n \"Install py3nvml (pip install py3nvml) to log information about GPU.\"\n )\n info[\"gpu\"] = \"N/A\"\n info[\"gpu_ram_mb\"] = \"N/A\"\n info[\"gpu_power_watts\"] = \"N/A\"\n info[\"gpu_performance_state\"] = \"N/A\"\n\n info[\"use_tpu\"] = self.args.is_tpu\n # TODO(PVP): See if we can add more information about TPU\n # see: https://github.com/pytorch/xla/issues/2180\n\n self._environment_info = info\n return self._environment_info\n\n def print_results(self, result_dict, type_label):\n self.print_fn(80 * \"-\")\n self.print_fn(\n \"Model Name\".center(30) + \"Batch Size\".center(15) + \"Seq Length\".center(15) + type_label.center(15)\n )\n self.print_fn(80 * \"-\")\n for model_name in self.args.model_names:\n for batch_size in result_dict[model_name][\"bs\"]:\n for sequence_length in result_dict[model_name][\"ss\"]:\n result = result_dict[model_name][\"result\"][batch_size][sequence_length]\n if isinstance(result, float):\n result = round(1000 * result) / 1000\n result = \"< 0.001\" if result == 0.0 else str(result)\n else:\n result = str(result)\n self.print_fn(\n model_name[:30].center(30) + str(batch_size).center(15),\n str(sequence_length).center(15),\n result.center(15),\n )\n self.print_fn(80 * \"-\")\n\n def print_memory_trace_statistics(self, summary: MemorySummary):\n self.print_fn(\n \"\\nLine by line memory consumption:\\n\"\n + \"\\n\".join(\n f\"{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}\"\n for state in summary.sequential\n )\n )\n self.print_fn(\n \"\\nLines with top memory consumption:\\n\"\n + \"\\n\".join(\n f\"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}\"\n for state in summary.cumulative[:6]\n )\n )\n self.print_fn(\n \"\\nLines with lowest memory consumption:\\n\"\n + \"\\n\".join(\n f\"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}\"\n for state in summary.cumulative[-6:]\n )\n )\n self.print_fn(f\"\\nTotal memory increase: {summary.total}\")\n\n def save_to_csv(self, result_dict, filename):\n if not self.args.save_to_csv:\n return\n self.print_fn(\"Saving results to csv.\")\n with open(filename, mode=\"w\") as csv_file:\n if len(self.args.model_names) <= 0:\n raise ValueError(f\"At least 1 model should be defined, but got {self.model_names}\")\n\n fieldnames = [\"model\", \"batch_size\", \"sequence_length\"]\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames + [\"result\"])\n writer.writeheader()\n\n for model_name in self.args.model_names:\n result_dict_model = result_dict[model_name][\"result\"]\n for bs in result_dict_model:\n for ss in result_dict_model[bs]:\n result_model = result_dict_model[bs][ss]\n writer.writerow(\n {\n \"model\": model_name,\n \"batch_size\": bs,\n \"sequence_length\": ss,\n \"result\": (\"{}\" if not isinstance(result_model, float) else \"{:.4f}\").format(\n result_model\n ),\n }\n )\n", "output": ["separate_process_wrapper_fn", "stop_memory_tracing", "start_memory_tracing", "is_memory_tracing_enabled", "bytes_to_mega_bytes", "measure_peak_memory_cpu", "Benchmark", "MemorySummary", "MemoryMeasureProcess", "MemoryState", "Frame", "Memory", "UsedMemoryState"], "metadata": {"file_path": "transformers-main/src/transformers/benchmark/benchmark_utils.py", "file_length": 10602, "symbol_dict": [{"symbol": "separate_process_wrapper_fn", "type": "mannual_defined_function", "byte_location": 2166, "location": 643}, {"symbol": "is_memory_tracing_enabled", "type": "mannual_defined_function", "byte_location": 3433, "location": 968}, {"symbol": "bytes_to_mega_bytes", "type": "mannual_defined_function", "byte_location": 22513, "location": 6231}, {"symbol": "stop_memory_tracing", "type": "mannual_defined_function", "byte_location": 17535, "location": 4847}, {"symbol": "measure_peak_memory_cpu", "type": "mannual_defined_function", "byte_location": 6653, "location": 1845}, {"symbol": "start_memory_tracing", "type": "mannual_defined_function", "byte_location": 10945, "location": 2970}, {"symbol": "MemoryState", "type": "mannual_defined_class", "byte_location": 5012, "location": 1429}, {"symbol": "Frame", "type": "mannual_defined_class", "byte_location": 3544, "location": 1008}, {"symbol": "Benchmark", "type": "mannual_defined_class", "byte_location": 22685, "location": 6288}, {"symbol": "MemoryMeasureProcess", "type": "mannual_defined_class", "byte_location": 8480, "location": 2383}, {"symbol": "Memory", "type": "mannual_defined_class", "byte_location": 4679, "location": 1322}, {"symbol": "UsedMemoryState", "type": "mannual_defined_class", "byte_location": 4139, "location": 1170}, {"symbol": "MemorySummary", "type": "mannual_defined_class", "byte_location": 5566, "location": 1584}]}} {"input": "# coding=utf-8\n# Copyright 2022 Microsoft, clefourrier The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch Graphormer model.\"\"\"\n\nimport math\nfrom typing import Iterable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...modeling_outputs import (\n BaseModelOutputWithNoAttention,\n SequenceClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_graphormer import GraphormerConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"graphormer-base-pcqm4mv1\"\n_CONFIG_FOR_DOC = \"GraphormerConfig\"\n\n\nGRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"clefourrier/graphormer-base-pcqm4mv1\",\n \"clefourrier/graphormer-base-pcqm4mv2\",\n # See all Graphormer models at https://huggingface.co/models?filter=graphormer\n]\n\n\ndef quant_noise(module: nn.Module, p: float, block_size: int):\n \"\"\"\n From:\n https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/quant_noise.py\n\n Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product\n Quantization as described in \"Training with Quantization Noise for Extreme Model Compression\"\n\n Args:\n - module: nn.Module\n - p: amount of Quantization Noise\n - block_size: size of the blocks for subsequent quantization with iPQ\n\n Remarks:\n - Module weights must have the right sizes wrt the block size\n - Only Linear, Embedding and Conv2d modules are supported for the moment\n - For more detail on how to quantize by blocks with convolutional weights, see \"And the Bit Goes Down:\n Revisiting the Quantization of Neural Networks\"\n - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping\n blocks\n \"\"\"\n\n # if no quantization noise, don't register hook\n if p <= 0:\n return module\n\n # supported modules\n if not isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)):\n raise NotImplementedError(\"Module unsupported for quant_noise.\")\n\n # test whether module.weight has the right sizes wrt block_size\n is_conv = module.weight.ndim == 4\n\n # 2D matrix\n if not is_conv:\n if module.weight.size(1) % block_size != 0:\n raise AssertionError(\"Input features must be a multiple of block sizes\")\n\n # 4D matrix\n else:\n # 1x1 convolutions\n if module.kernel_size == (1, 1):\n if module.in_channels % block_size != 0:\n raise AssertionError(\"Input channels must be a multiple of block sizes\")\n # regular convolutions\n else:\n k = module.kernel_size[0] * module.kernel_size[1]\n if k % block_size != 0:\n raise AssertionError(\"Kernel size must be a multiple of block size\")\n\n def _forward_pre_hook(mod, input):\n # no noise for evaluation\n if mod.training:\n if not is_conv:\n # gather weight and sizes\n weight = mod.weight\n in_features = weight.size(1)\n out_features = weight.size(0)\n\n # split weight matrix into blocks and randomly drop selected blocks\n mask = torch.zeros(in_features // block_size * out_features, device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)\n\n else:\n # gather weight and sizes\n weight = mod.weight\n in_channels = mod.in_channels\n out_channels = mod.out_channels\n\n # split weight matrix into blocks and randomly drop selected blocks\n if mod.kernel_size == (1, 1):\n mask = torch.zeros(\n int(in_channels // block_size * out_channels),\n device=weight.device,\n )\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)\n else:\n mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])\n\n # scale weights and apply mask\n mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript\n s = 1 / (1 - p)\n mod.weight.data = s * weight.masked_fill(mask, 0)\n\n module.register_forward_pre_hook(_forward_pre_hook)\n return module\n\n\nclass LayerDropModuleList(nn.ModuleList):\n \"\"\"\n From:\n https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/layer_drop.py\n A LayerDrop implementation based on [`torch.nn.ModuleList`]. LayerDrop as described in\n https://arxiv.org/abs/1909.11556.\n\n We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During\n evaluation we always iterate over all layers.\n\n Usage:\n\n ```python\n layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])\n for layer in layers: # this might iterate over layers 1 and 3\n x = layer(x)\n for layer in layers: # this might iterate over all layers\n x = layer(x)\n for layer in layers: # this might not iterate over any layers\n x = layer(x)\n ```\n\n Args:\n p (float): probability of dropping out each layer\n modules (iterable, optional): an iterable of modules to add\n \"\"\"\n\n def __init__(self, p: float, modules: Optional[Iterable[nn.Module]] = None):\n super().__init__(modules)\n self.p = p\n\n def __iter__(self) -> Iterator[nn.Module]:\n dropout_probs = torch.empty(len(self)).uniform_()\n for i, m in enumerate(super().__iter__()):\n if not self.training or (dropout_probs[i] > self.p):\n yield m\n\n\nclass GraphormerGraphNodeFeature(nn.Module):\n \"\"\"\n Compute node features for each node in the graph.\n \"\"\"\n\n def __init__(self, config: GraphormerConfig):\n super().__init__()\n self.num_heads = config.num_attention_heads\n self.num_atoms = config.num_atoms\n\n self.atom_encoder = nn.Embedding(config.num_atoms + 1, config.hidden_size, padding_idx=config.pad_token_id)\n self.in_degree_encoder = nn.Embedding(\n config.num_in_degree, config.hidden_size, padding_idx=config.pad_token_id\n )\n self.out_degree_encoder = nn.Embedding(\n config.num_out_degree, config.hidden_size, padding_idx=config.pad_token_id\n )\n\n self.graph_token = nn.Embedding(1, config.hidden_size)\n\n def forward(\n self,\n input_nodes: torch.LongTensor,\n in_degree: torch.LongTensor,\n out_degree: torch.LongTensor,\n ) -> torch.Tensor:\n n_graph, n_node = input_nodes.size()[:2]\n\n node_feature = ( # node feature + graph token\n self.atom_encoder(input_nodes).sum(dim=-2) # [n_graph, n_node, n_hidden]\n + self.in_degree_encoder(in_degree)\n + self.out_degree_encoder(out_degree)\n )\n\n graph_token_feature = self.graph_token.weight.unsqueeze(0).repeat(n_graph, 1, 1)\n\n graph_node_feature = torch.cat([graph_token_feature, node_feature], dim=1)\n\n return graph_node_feature\n\n\nclass GraphormerGraphAttnBias(nn.Module):\n \"\"\"\n Compute attention bias for each head.\n \"\"\"\n\n def __init__(self, config: GraphormerConfig):\n super().__init__()\n self.num_heads = config.num_attention_heads\n self.multi_hop_max_dist = config.multi_hop_max_dist\n\n # We do not change edge feature embedding learning, as edge embeddings are represented as a combination of the original features\n # + shortest path\n self.edge_encoder = nn.Embedding(config.num_edges + 1, config.num_attention_heads, padding_idx=0)\n\n self.edge_type = config.edge_type\n if self.edge_type == \"multi_hop\":\n self.edge_dis_encoder = nn.Embedding(\n config.num_edge_dis * config.num_attention_heads * config.num_attention_heads,\n 1,\n )\n\n self.spatial_pos_encoder = nn.Embedding(config.num_spatial, config.num_attention_heads, padding_idx=0)\n\n self.graph_token_virtual_distance = nn.Embedding(1, config.num_attention_heads)\n\n def forward(\n self,\n input_nodes: torch.LongTensor,\n attn_bias: torch.Tensor,\n spatial_pos: torch.LongTensor,\n input_edges: torch.LongTensor,\n attn_edge_type: torch.LongTensor,\n ) -> torch.Tensor:\n n_graph, n_node = input_nodes.size()[:2]\n graph_attn_bias = attn_bias.clone()\n graph_attn_bias = graph_attn_bias.unsqueeze(1).repeat(\n 1, self.num_heads, 1, 1\n ) # [n_graph, n_head, n_node+1, n_node+1]\n\n # spatial pos\n # [n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]\n spatial_pos_bias = self.spatial_pos_encoder(spatial_pos).permute(0, 3, 1, 2)\n graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + spatial_pos_bias\n\n # reset spatial pos here\n t = self.graph_token_virtual_distance.weight.view(1, self.num_heads, 1)\n graph_attn_bias[:, :, 1:, 0] = graph_attn_bias[:, :, 1:, 0] + t\n graph_attn_bias[:, :, 0, :] = graph_attn_bias[:, :, 0, :] + t\n\n # edge feature\n if self.edge_type == \"multi_hop\":\n spatial_pos_ = spatial_pos.clone()\n\n spatial_pos_[spatial_pos_ == 0] = 1 # set pad to 1\n # set 1 to 1, input_nodes > 1 to input_nodes - 1\n spatial_pos_ = torch.where(spatial_pos_ > 1, spatial_pos_ - 1, spatial_pos_)\n if self.multi_hop_max_dist > 0:\n spatial_pos_ = spatial_pos_.clamp(0, self.multi_hop_max_dist)\n input_edges = input_edges[:, :, :, : self.multi_hop_max_dist, :]\n # [n_graph, n_node, n_node, max_dist, n_head]\n\n input_edges = self.edge_encoder(input_edges).mean(-2)\n max_dist = input_edges.size(-2)\n edge_input_flat = input_edges.permute(3, 0, 1, 2, 4).reshape(max_dist, -1, self.num_heads)\n edge_input_flat = torch.bmm(\n edge_input_flat,\n self.edge_dis_encoder.weight.reshape(-1, self.num_heads, self.num_heads)[:max_dist, :, :],\n )\n input_edges = edge_input_flat.reshape(max_dist, n_graph, n_node, n_node, self.num_heads).permute(\n 1, 2, 3, 0, 4\n )\n input_edges = (input_edges.sum(-2) / (spatial_pos_.float().unsqueeze(-1))).permute(0, 3, 1, 2)\n else:\n # [n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]\n input_edges = self.edge_encoder(attn_edge_type).mean(-2).permute(0, 3, 1, 2)\n\n graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + input_edges\n graph_attn_bias = graph_attn_bias + attn_bias.unsqueeze(1) # reset\n\n return graph_attn_bias\n\n\nclass GraphormerMultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(self, config: GraphormerConfig):\n super().__init__()\n self.embedding_dim = config.embedding_dim\n self.kdim = config.kdim if config.kdim is not None else config.embedding_dim\n self.vdim = config.vdim if config.vdim is not None else config.embedding_dim\n self.qkv_same_dim = self.kdim == config.embedding_dim and self.vdim == config.embedding_dim\n\n self.num_heads = config.num_attention_heads\n self.attention_dropout_module = torch.nn.Dropout(p=config.attention_dropout, inplace=False)\n\n self.head_dim = config.embedding_dim // config.num_attention_heads\n if not (self.head_dim * config.num_attention_heads == self.embedding_dim):\n raise AssertionError(\"The embedding_dim must be divisible by num_heads.\")\n self.scaling = self.head_dim**-0.5\n\n self.self_attention = True # config.self_attention\n if not (self.self_attention):\n raise NotImplementedError(\"The Graphormer model only supports self attention for now.\")\n if self.self_attention and not self.qkv_same_dim:\n raise AssertionError(\"Self-attention requires query, key and value to be of the same size.\")\n\n self.k_proj = quant_noise(\n nn.Linear(self.kdim, config.embedding_dim, bias=config.bias),\n config.q_noise,\n config.qn_block_size,\n )\n self.v_proj = quant_noise(\n nn.Linear(self.vdim, config.embedding_dim, bias=config.bias),\n config.q_noise,\n config.qn_block_size,\n )\n self.q_proj = quant_noise(\n nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias),\n config.q_noise,\n config.qn_block_size,\n )\n\n self.out_proj = quant_noise(\n nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias),\n config.q_noise,\n config.qn_block_size,\n )\n\n self.onnx_trace = False\n\n def reset_parameters(self):\n if self.qkv_same_dim:\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))\n else:\n nn.init.xavier_uniform_(self.k_proj.weight)\n nn.init.xavier_uniform_(self.v_proj.weight)\n nn.init.xavier_uniform_(self.q_proj.weight)\n\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.0)\n\n def forward(\n self,\n query: torch.LongTensor,\n key: Optional[torch.Tensor],\n value: Optional[torch.Tensor],\n attn_bias: Optional[torch.Tensor],\n key_padding_mask: Optional[torch.Tensor] = None,\n need_weights: bool = True,\n attn_mask: Optional[torch.Tensor] = None,\n before_softmax: bool = False,\n need_head_weights: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:\n \"\"\"\n Args:\n key_padding_mask (Bytetorch.Tensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (Bytetorch.Tensor, optional): typically used to\n implement causal attention, where the mask prevents the attention from looking forward in time\n (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default: return the average attention weights over all\n heads.\n \"\"\"\n if need_head_weights:\n need_weights = True\n\n tgt_len, bsz, embedding_dim = query.size()\n src_len = tgt_len\n if not (embedding_dim == self.embedding_dim):\n raise AssertionError(\n f\"The query embedding dimension {embedding_dim} is not equal to the expected embedding_dim\"\n f\" {self.embedding_dim}.\"\n )\n if not (list(query.size()) == [tgt_len, bsz, embedding_dim]):\n raise AssertionError(\"Query size incorrect in Graphormer, compared to model dimensions.\")\n\n if key is not None:\n src_len, key_bsz, _ = key.size()\n if not torch.jit.is_scripting():\n if (key_bsz != bsz) or (value is None) or not (src_len, bsz == value.shape[:2]):\n raise AssertionError(\n \"The batch shape does not match the key or value shapes provided to the attention.\"\n )\n\n q = self.q_proj(query)\n k = self.k_proj(query)\n v = self.v_proj(query)\n\n q *= self.scaling\n\n q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if k is not None:\n k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if v is not None:\n v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n if (k is None) or not (k.size(1) == src_len):\n raise AssertionError(\"The shape of the key generated in the attention is incorrect\")\n\n # This is part of a workaround to get around fork/join parallelism\n # not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.dim() == 0:\n key_padding_mask = None\n\n if key_padding_mask is not None:\n if key_padding_mask.size(0) != bsz or key_padding_mask.size(1) != src_len:\n raise AssertionError(\n \"The shape of the generated padding mask for the key does not match expected dimensions.\"\n )\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)\n\n if list(attn_weights.size()) != [bsz * self.num_heads, tgt_len, src_len]:\n raise AssertionError(\"The attention weights generated do not match the expected dimensions.\")\n\n if attn_bias is not None:\n attn_weights += attn_bias.view(bsz * self.num_heads, tgt_len, src_len)\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float(\"-inf\")\n )\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if before_softmax:\n return attn_weights, v\n\n attn_weights_float = torch.nn.functional.softmax(attn_weights, dim=-1)\n attn_weights = attn_weights_float.type_as(attn_weights)\n attn_probs = self.attention_dropout_module(attn_weights)\n\n if v is None:\n raise AssertionError(\"No value generated\")\n attn = torch.bmm(attn_probs, v)\n if list(attn.size()) != [bsz * self.num_heads, tgt_len, self.head_dim]:\n raise AssertionError(\"The attention generated do not match the expected dimensions.\")\n\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embedding_dim)\n attn: torch.Tensor = self.out_proj(attn)\n\n attn_weights = None\n if need_weights:\n attn_weights = attn_weights_float.contiguous().view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)\n if not need_head_weights:\n # average attention weights over heads\n attn_weights = attn_weights.mean(dim=0)\n\n return attn, attn_weights\n\n def apply_sparse_mask(self, attn_weights: torch.Tensor, tgt_len: int, src_len: int, bsz: int) -> torch.Tensor:\n return attn_weights\n\n\nclass GraphormerGraphEncoderLayer(nn.Module):\n def __init__(self, config: GraphormerConfig) -> None:\n super().__init__()\n\n # Initialize parameters\n self.embedding_dim = config.embedding_dim\n self.num_attention_heads = config.num_attention_heads\n self.q_noise = config.q_noise\n self.qn_block_size = config.qn_block_size\n self.pre_layernorm = config.pre_layernorm\n\n self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)\n\n self.activation_dropout_module = torch.nn.Dropout(p=config.activation_dropout, inplace=False)\n\n # Initialize blocks\n self.activation_fn = ACT2FN[config.activation_fn]\n self.self_attn = GraphormerMultiheadAttention(config)\n\n # layer norm associated with the self attention layer\n self.self_attn_layer_norm = nn.LayerNorm(self.embedding_dim)\n\n self.fc1 = self.build_fc(\n self.embedding_dim,\n config.ffn_embedding_dim,\n q_noise=config.q_noise,\n qn_block_size=config.qn_block_size,\n )\n self.fc2 = self.build_fc(\n config.ffn_embedding_dim,\n self.embedding_dim,\n q_noise=config.q_noise,\n qn_block_size=config.qn_block_size,\n )\n\n # layer norm associated with the position wise feed-forward NN\n self.final_layer_norm = nn.LayerNorm(self.embedding_dim)\n\n def build_fc(\n self, input_dim: int, output_dim: int, q_noise: float, qn_block_size: int\n ) -> Union[nn.Module, nn.Linear, nn.Embedding, nn.Conv2d]:\n return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)\n\n def forward(\n self,\n input_nodes: torch.Tensor,\n self_attn_bias: Optional[torch.Tensor] = None,\n self_attn_mask: Optional[torch.Tensor] = None,\n self_attn_padding_mask: Optional[torch.Tensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:\n \"\"\"\n nn.LayerNorm is applied either before or after the self-attention/ffn modules similar to the original\n Transformer implementation.\n \"\"\"\n residual = input_nodes\n if self.pre_layernorm:\n input_nodes = self.self_attn_layer_norm(input_nodes)\n\n input_nodes, attn = self.self_attn(\n query=input_nodes,\n key=input_nodes,\n value=input_nodes,\n attn_bias=self_attn_bias,\n key_padding_mask=self_attn_padding_mask,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n input_nodes = self.dropout_module(input_nodes)\n input_nodes = residual + input_nodes\n if not self.pre_layernorm:\n input_nodes = self.self_attn_layer_norm(input_nodes)\n\n residual = input_nodes\n if self.pre_layernorm:\n input_nodes = self.final_layer_norm(input_nodes)\n input_nodes = self.activation_fn(self.fc1(input_nodes))\n input_nodes = self.activation_dropout_module(input_nodes)\n input_nodes = self.fc2(input_nodes)\n input_nodes = self.dropout_module(input_nodes)\n input_nodes = residual + input_nodes\n if not self.pre_layernorm:\n input_nodes = self.final_layer_norm(input_nodes)\n\n return input_nodes, attn\n\n\nclass GraphormerGraphEncoder(nn.Module):\n def __init__(self, config: GraphormerConfig):\n super().__init__()\n\n self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)\n self.layerdrop = config.layerdrop\n self.embedding_dim = config.embedding_dim\n self.apply_graphormer_init = config.apply_graphormer_init\n self.traceable = config.traceable\n\n self.graph_node_feature = GraphormerGraphNodeFeature(config)\n self.graph_attn_bias = GraphormerGraphAttnBias(config)\n\n self.embed_scale = config.embed_scale\n\n if config.q_noise > 0:\n self.quant_noise = quant_noise(\n nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),\n config.q_noise,\n config.qn_block_size,\n )\n else:\n self.quant_noise = None\n\n if config.encoder_normalize_before:\n self.emb_layer_norm = nn.LayerNorm(self.embedding_dim)\n else:\n self.emb_layer_norm = None\n\n if config.pre_layernorm:\n self.final_layer_norm = nn.LayerNorm(self.embedding_dim)\n\n if self.layerdrop > 0.0:\n self.layers = LayerDropModuleList(p=self.layerdrop)\n else:\n self.layers = nn.ModuleList([])\n self.layers.extend([GraphormerGraphEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n\n # Apply initialization of model params after building the model\n if config.freeze_embeddings:\n raise NotImplementedError(\"Freezing embeddings is not implemented yet.\")\n\n for layer in range(config.num_trans_layers_to_freeze):\n m = self.layers[layer]\n if m is not None:\n for p in m.parameters():\n p.requires_grad = False\n\n def forward(\n self,\n input_nodes: torch.LongTensor,\n input_edges: torch.LongTensor,\n attn_bias: torch.Tensor,\n in_degree: torch.LongTensor,\n out_degree: torch.LongTensor,\n spatial_pos: torch.LongTensor,\n attn_edge_type: torch.LongTensor,\n perturb=None,\n last_state_only: bool = False,\n token_embeddings: Optional[torch.Tensor] = None,\n attn_mask: Optional[torch.Tensor] = None,\n ) -> Tuple[Union[torch.Tensor, List[torch.LongTensor]], torch.Tensor]:\n # compute padding mask. This is needed for multi-head attention\n data_x = input_nodes\n n_graph, n_node = data_x.size()[:2]\n padding_mask = (data_x[:, :, 0]).eq(0)\n padding_mask_cls = torch.zeros(n_graph, 1, device=padding_mask.device, dtype=padding_mask.dtype)\n padding_mask = torch.cat((padding_mask_cls, padding_mask), dim=1)\n\n attn_bias = self.graph_attn_bias(input_nodes, attn_bias, spatial_pos, input_edges, attn_edge_type)\n\n if token_embeddings is not None:\n input_nodes = token_embeddings\n else:\n input_nodes = self.graph_node_feature(input_nodes, in_degree, out_degree)\n\n if perturb is not None:\n input_nodes[:, 1:, :] += perturb\n\n if self.embed_scale is not None:\n input_nodes = input_nodes * self.embed_scale\n\n if self.quant_noise is not None:\n input_nodes = self.quant_noise(input_nodes)\n\n if self.emb_layer_norm is not None:\n input_nodes = self.emb_layer_norm(input_nodes)\n\n input_nodes = self.dropout_module(input_nodes)\n\n input_nodes = input_nodes.transpose(0, 1)\n\n inner_states = []\n if not last_state_only:\n inner_states.append(input_nodes)\n\n for layer in self.layers:\n input_nodes, _ = layer(\n input_nodes,\n self_attn_padding_mask=padding_mask,\n self_attn_mask=attn_mask,\n self_attn_bias=attn_bias,\n )\n if not last_state_only:\n inner_states.append(input_nodes)\n\n graph_rep = input_nodes[0, :, :]\n\n if last_state_only:\n inner_states = [input_nodes]\n\n if self.traceable:\n return torch.stack(inner_states), graph_rep\n else:\n return inner_states, graph_rep\n\n\nclass GraphormerDecoderHead(nn.Module):\n def __init__(self, embedding_dim: int, num_classes: int):\n super().__init__()\n \"\"\"num_classes should be 1 for regression, or the number of classes for classification\"\"\"\n self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))\n self.classifier = nn.Linear(embedding_dim, num_classes, bias=False)\n self.num_classes = num_classes\n\n def forward(self, input_nodes: torch.Tensor, **unused) -> torch.Tensor:\n input_nodes = self.classifier(input_nodes)\n input_nodes = input_nodes + self.lm_output_learned_bias\n return input_nodes\n\n\nclass GraphormerPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = GraphormerConfig\n base_model_prefix = \"graphormer\"\n main_input_name_nodes = \"input_nodes\"\n main_input_name_edges = \"input_edges\"\n\n def normal_(self, data: torch.Tensor):\n # with FSDP, module params will be on CUDA, so we cast them back to CPU\n # so that the RNG is consistent with and without FSDP\n data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))\n\n def init_graphormer_params(self, module: Union[nn.Linear, nn.Embedding, GraphormerMultiheadAttention]):\n \"\"\"\n Initialize the weights specific to the Graphormer Model.\n \"\"\"\n if isinstance(module, nn.Linear):\n self.normal_(module.weight.data)\n if module.bias is not None:\n module.bias.data.zero_()\n if isinstance(module, nn.Embedding):\n self.normal_(module.weight.data)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n if isinstance(module, GraphormerMultiheadAttention):\n self.normal_(module.q_proj.weight.data)\n self.normal_(module.k_proj.weight.data)\n self.normal_(module.v_proj.weight.data)\n\n def _init_weights(\n self,\n module: Union[\n nn.Linear, nn.Conv2d, nn.Embedding, nn.LayerNorm, GraphormerMultiheadAttention, GraphormerGraphEncoder\n ],\n ):\n \"\"\"\n Initialize the weights\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n # We might be missing part of the Linear init, dependant on the layer num\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, GraphormerMultiheadAttention):\n module.q_proj.weight.data.normal_(mean=0.0, std=0.02)\n module.k_proj.weight.data.normal_(mean=0.0, std=0.02)\n module.v_proj.weight.data.normal_(mean=0.0, std=0.02)\n module.reset_parameters()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, GraphormerGraphEncoder):\n if module.apply_graphormer_init:\n module.apply(self.init_graphormer_params)\n\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nclass GraphormerModel(GraphormerPreTrainedModel):\n \"\"\"The Graphormer model is a graph-encoder model.\n\n It goes from a graph to its representation. If you want to use the model for a downstream classification task, use\n GraphormerForGraphClassification instead. For any other downstream task, feel free to add a new class, or combine\n this model with a downstream model of your choice, following the example in GraphormerForGraphClassification.\n \"\"\"\n\n def __init__(self, config: GraphormerConfig):\n super().__init__(config)\n self.max_nodes = config.max_nodes\n\n self.graph_encoder = GraphormerGraphEncoder(config)\n\n self.share_input_output_embed = config.share_input_output_embed\n self.lm_output_learned_bias = None\n\n # Remove head is set to true during fine-tuning\n self.load_softmax = not getattr(config, \"remove_head\", False)\n\n self.lm_head_transform_weight = nn.Linear(config.embedding_dim, config.embedding_dim)\n self.activation_fn = ACT2FN[config.activation_fn]\n self.layer_norm = nn.LayerNorm(config.embedding_dim)\n\n self.post_init()\n\n def reset_output_layer_parameters(self):\n self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))\n\n def forward(\n self,\n input_nodes: torch.LongTensor,\n input_edges: torch.LongTensor,\n attn_bias: torch.Tensor,\n in_degree: torch.LongTensor,\n out_degree: torch.LongTensor,\n spatial_pos: torch.LongTensor,\n attn_edge_type: torch.LongTensor,\n perturb: Optional[torch.FloatTensor] = None,\n masked_tokens: None = None,\n return_dict: Optional[bool] = None,\n **unused,\n ) -> Union[Tuple[torch.LongTensor], BaseModelOutputWithNoAttention]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n inner_states, graph_rep = self.graph_encoder(\n input_nodes, input_edges, attn_bias, in_degree, out_degree, spatial_pos, attn_edge_type, perturb=perturb\n )\n\n # last inner state, then revert Batch and Graph len\n input_nodes = inner_states[-1].transpose(0, 1)\n\n # project masked tokens only\n if masked_tokens is not None:\n raise NotImplementedError\n\n input_nodes = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(input_nodes)))\n\n # project back to size of vocabulary\n if self.share_input_output_embed and hasattr(self.graph_encoder.embed_tokens, \"weight\"):\n input_nodes = torch.nn.functional.linear(input_nodes, self.graph_encoder.embed_tokens.weight)\n\n if not return_dict:\n return tuple(x for x in [input_nodes, inner_states] if x is not None)\n return BaseModelOutputWithNoAttention(last_hidden_state=input_nodes, hidden_states=inner_states)\n\n def max_nodes(self):\n \"\"\"Maximum output length supported by the encoder.\"\"\"\n return self.max_nodes\n\n\nclass GraphormerForGraphClassification(GraphormerPreTrainedModel):\n \"\"\"\n This model can be used for graph-level classification or regression tasks.\n\n It can be trained on\n - regression (by setting config.num_classes to 1); there should be one float-type label per graph\n - one task classification (by setting config.num_classes to the number of classes); there should be one integer\n label per graph\n - binary multi-task classification (by setting config.num_classes to the number of labels); there should be a list\n of integer labels for each graph.\n \"\"\"\n\n def __init__(self, config: GraphormerConfig):\n super().__init__(config)\n self.encoder = GraphormerModel(config)\n self.embedding_dim = config.embedding_dim\n self.num_classes = config.num_classes\n self.classifier = GraphormerDecoderHead(self.embedding_dim, self.num_classes)\n self.is_encoder_decoder = True\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def forward(\n self,\n input_nodes: torch.LongTensor,\n input_edges: torch.LongTensor,\n attn_bias: torch.Tensor,\n in_degree: torch.LongTensor,\n out_degree: torch.LongTensor,\n spatial_pos: torch.LongTensor,\n attn_edge_type: torch.LongTensor,\n labels: Optional[torch.LongTensor] = None,\n return_dict: Optional[bool] = None,\n **unused,\n ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n encoder_outputs = self.encoder(\n input_nodes,\n input_edges,\n attn_bias,\n in_degree,\n out_degree,\n spatial_pos,\n attn_edge_type,\n return_dict=True,\n )\n outputs, hidden_states = encoder_outputs[\"last_hidden_state\"], encoder_outputs[\"hidden_states\"]\n\n head_outputs = self.classifier(outputs)\n logits = head_outputs[:, 0, :].contiguous()\n\n loss = None\n if labels is not None:\n mask = ~torch.isnan(labels)\n\n if self.num_classes == 1: # regression\n loss_fct = MSELoss()\n loss = loss_fct(logits[mask].squeeze(), labels[mask].squeeze().float())\n elif self.num_classes > 1 and len(labels.shape) == 1: # One task classification\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits[mask].view(-1, self.num_classes), labels[mask].view(-1))\n else: # Binary multi-task classification\n loss_fct = BCEWithLogitsLoss(reduction=\"sum\")\n loss = loss_fct(logits[mask], labels[mask])\n\n if not return_dict:\n return tuple(x for x in [loss, logits, hidden_states] if x is not None)\n return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=hidden_states, attentions=None)\n", "output": ["quant_noise", "GraphormerGraphEncoder", "GraphormerGraphAttnBias", "GraphormerGraphNodeFeature", "GraphormerDecoderHead", "LayerDropModuleList", "GraphormerModel", "GraphormerForGraphClassification", "GraphormerPreTrainedModel", "GraphormerGraphEncoderLayer", "GraphormerMultiheadAttention"], "metadata": {"file_path": "transformers-main/src/transformers/models/graphormer/modeling_graphormer.py", "file_length": 11889, "symbol_dict": [{"symbol": "quant_noise", "type": "mannual_defined_function", "byte_location": 1481, "location": 486}, {"symbol": "GraphormerModel", "type": "mannual_defined_class", "byte_location": 31304, "location": 10108}, {"symbol": "GraphormerForGraphClassification", "type": "mannual_defined_class", "byte_location": 34268, "location": 11020}, {"symbol": "GraphormerMultiheadAttention", "type": "mannual_defined_class", "byte_location": 11830, "location": 3938}, {"symbol": "GraphormerGraphEncoderLayer", "type": "mannual_defined_class", "byte_location": 20332, "location": 6621}, {"symbol": "GraphormerGraphEncoder", "type": "mannual_defined_class", "byte_location": 23622, "location": 7700}, {"symbol": "GraphormerDecoderHead", "type": "mannual_defined_class", "byte_location": 27817, "location": 9029}, {"symbol": "GraphormerGraphNodeFeature", "type": "mannual_defined_class", "byte_location": 6705, "location": 2017}, {"symbol": "GraphormerGraphAttnBias", "type": "mannual_defined_class", "byte_location": 8140, "location": 2518}, {"symbol": "GraphormerPreTrainedModel", "type": "mannual_defined_class", "byte_location": 28447, "location": 9228}, {"symbol": "LayerDropModuleList", "type": "mannual_defined_class", "byte_location": 5332, "location": 1573}]}} {"input": "# Copyright 2021 AlQuraishi Laboratory\n# Copyright 2021 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom functools import lru_cache\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple\n\nimport numpy as np\nimport torch\n\n\ndef rot_matmul(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Performs matrix multiplication of two rotation matrix tensors. Written out by hand to avoid AMP downcasting.\n\n Args:\n a: [*, 3, 3] left multiplicand\n b: [*, 3, 3] right multiplicand\n Returns:\n The product ab\n \"\"\"\n\n def row_mul(i: int) -> torch.Tensor:\n return torch.stack(\n [\n a[..., i, 0] * b[..., 0, 0] + a[..., i, 1] * b[..., 1, 0] + a[..., i, 2] * b[..., 2, 0],\n a[..., i, 0] * b[..., 0, 1] + a[..., i, 1] * b[..., 1, 1] + a[..., i, 2] * b[..., 2, 1],\n a[..., i, 0] * b[..., 0, 2] + a[..., i, 1] * b[..., 1, 2] + a[..., i, 2] * b[..., 2, 2],\n ],\n dim=-1,\n )\n\n return torch.stack(\n [\n row_mul(0),\n row_mul(1),\n row_mul(2),\n ],\n dim=-2,\n )\n\n\ndef rot_vec_mul(r: torch.Tensor, t: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Applies a rotation to a vector. Written out by hand to avoid transfer to avoid AMP downcasting.\n\n Args:\n r: [*, 3, 3] rotation matrices\n t: [*, 3] coordinate tensors\n Returns:\n [*, 3] rotated coordinates\n \"\"\"\n x, y, z = torch.unbind(t, dim=-1)\n return torch.stack(\n [\n r[..., 0, 0] * x + r[..., 0, 1] * y + r[..., 0, 2] * z,\n r[..., 1, 0] * x + r[..., 1, 1] * y + r[..., 1, 2] * z,\n r[..., 2, 0] * x + r[..., 2, 1] * y + r[..., 2, 2] * z,\n ],\n dim=-1,\n )\n\n\n@lru_cache(maxsize=None)\ndef identity_rot_mats(\n batch_dims: Tuple[int, ...],\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n) -> torch.Tensor:\n rots = torch.eye(3, dtype=dtype, device=device, requires_grad=requires_grad)\n rots = rots.view(*((1,) * len(batch_dims)), 3, 3)\n rots = rots.expand(*batch_dims, -1, -1)\n rots = rots.contiguous()\n\n return rots\n\n\n@lru_cache(maxsize=None)\ndef identity_trans(\n batch_dims: Tuple[int, ...],\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n) -> torch.Tensor:\n trans = torch.zeros((*batch_dims, 3), dtype=dtype, device=device, requires_grad=requires_grad)\n return trans\n\n\n@lru_cache(maxsize=None)\ndef identity_quats(\n batch_dims: Tuple[int, ...],\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n) -> torch.Tensor:\n quat = torch.zeros((*batch_dims, 4), dtype=dtype, device=device, requires_grad=requires_grad)\n\n with torch.no_grad():\n quat[..., 0] = 1\n\n return quat\n\n\n_quat_elements: List[str] = [\"a\", \"b\", \"c\", \"d\"]\n_qtr_keys: List[str] = [l1 + l2 for l1 in _quat_elements for l2 in _quat_elements]\n_qtr_ind_dict: Dict[str, int] = {key: ind for ind, key in enumerate(_qtr_keys)}\n\n\ndef _to_mat(pairs: List[Tuple[str, int]]) -> np.ndarray:\n mat = np.zeros((4, 4))\n for key, value in pairs:\n ind = _qtr_ind_dict[key]\n mat[ind // 4][ind % 4] = value\n\n return mat\n\n\n_QTR_MAT = np.zeros((4, 4, 3, 3))\n_QTR_MAT[..., 0, 0] = _to_mat([(\"aa\", 1), (\"bb\", 1), (\"cc\", -1), (\"dd\", -1)])\n_QTR_MAT[..., 0, 1] = _to_mat([(\"bc\", 2), (\"ad\", -2)])\n_QTR_MAT[..., 0, 2] = _to_mat([(\"bd\", 2), (\"ac\", 2)])\n_QTR_MAT[..., 1, 0] = _to_mat([(\"bc\", 2), (\"ad\", 2)])\n_QTR_MAT[..., 1, 1] = _to_mat([(\"aa\", 1), (\"bb\", -1), (\"cc\", 1), (\"dd\", -1)])\n_QTR_MAT[..., 1, 2] = _to_mat([(\"cd\", 2), (\"ab\", -2)])\n_QTR_MAT[..., 2, 0] = _to_mat([(\"bd\", 2), (\"ac\", -2)])\n_QTR_MAT[..., 2, 1] = _to_mat([(\"cd\", 2), (\"ab\", 2)])\n_QTR_MAT[..., 2, 2] = _to_mat([(\"aa\", 1), (\"bb\", -1), (\"cc\", -1), (\"dd\", 1)])\n\n\ndef quat_to_rot(quat: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Converts a quaternion to a rotation matrix.\n\n Args:\n quat: [*, 4] quaternions\n Returns:\n [*, 3, 3] rotation matrices\n \"\"\"\n # [*, 4, 4]\n quat = quat[..., None] * quat[..., None, :]\n\n # [4, 4, 3, 3]\n mat = _get_quat(\"_QTR_MAT\", dtype=quat.dtype, device=quat.device)\n\n # [*, 4, 4, 3, 3]\n shaped_qtr_mat = mat.view((1,) * len(quat.shape[:-2]) + mat.shape)\n quat = quat[..., None, None] * shaped_qtr_mat\n\n # [*, 3, 3]\n return torch.sum(quat, dim=(-3, -4))\n\n\ndef rot_to_quat(rot: torch.Tensor) -> torch.Tensor:\n if rot.shape[-2:] != (3, 3):\n raise ValueError(\"Input rotation is incorrectly shaped\")\n\n [[xx, xy, xz], [yx, yy, yz], [zx, zy, zz]] = [[rot[..., i, j] for j in range(3)] for i in range(3)]\n\n k = [\n [\n xx + yy + zz,\n zy - yz,\n xz - zx,\n yx - xy,\n ],\n [\n zy - yz,\n xx - yy - zz,\n xy + yx,\n xz + zx,\n ],\n [\n xz - zx,\n xy + yx,\n yy - xx - zz,\n yz + zy,\n ],\n [\n yx - xy,\n xz + zx,\n yz + zy,\n zz - xx - yy,\n ],\n ]\n\n _, vectors = torch.linalg.eigh((1.0 / 3.0) * torch.stack([torch.stack(t, dim=-1) for t in k], dim=-2))\n return vectors[..., -1]\n\n\n_QUAT_MULTIPLY = np.zeros((4, 4, 4))\n_QUAT_MULTIPLY[:, :, 0] = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]\n\n_QUAT_MULTIPLY[:, :, 1] = [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0]]\n\n_QUAT_MULTIPLY[:, :, 2] = [[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, 1, 0, 0]]\n\n_QUAT_MULTIPLY[:, :, 3] = [[0, 0, 0, 1], [0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0]]\n\n_QUAT_MULTIPLY_BY_VEC = _QUAT_MULTIPLY[:, 1:, :]\n\n_CACHED_QUATS: Dict[str, np.ndarray] = {\n \"_QTR_MAT\": _QTR_MAT,\n \"_QUAT_MULTIPLY\": _QUAT_MULTIPLY,\n \"_QUAT_MULTIPLY_BY_VEC\": _QUAT_MULTIPLY_BY_VEC,\n}\n\n\n@lru_cache(maxsize=None)\ndef _get_quat(quat_key: str, dtype: torch.dtype, device: torch.device) -> torch.Tensor:\n return torch.tensor(_CACHED_QUATS[quat_key], dtype=dtype, device=device)\n\n\ndef quat_multiply(quat1: torch.Tensor, quat2: torch.Tensor) -> torch.Tensor:\n \"\"\"Multiply a quaternion by another quaternion.\"\"\"\n mat = _get_quat(\"_QUAT_MULTIPLY\", dtype=quat1.dtype, device=quat1.device)\n reshaped_mat = mat.view((1,) * len(quat1.shape[:-1]) + mat.shape)\n return torch.sum(reshaped_mat * quat1[..., :, None, None] * quat2[..., None, :, None], dim=(-3, -2))\n\n\ndef quat_multiply_by_vec(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:\n \"\"\"Multiply a quaternion by a pure-vector quaternion.\"\"\"\n mat = _get_quat(\"_QUAT_MULTIPLY_BY_VEC\", dtype=quat.dtype, device=quat.device)\n reshaped_mat = mat.view((1,) * len(quat.shape[:-1]) + mat.shape)\n return torch.sum(reshaped_mat * quat[..., :, None, None] * vec[..., None, :, None], dim=(-3, -2))\n\n\ndef invert_rot_mat(rot_mat: torch.Tensor) -> torch.Tensor:\n return rot_mat.transpose(-1, -2)\n\n\ndef invert_quat(quat: torch.Tensor) -> torch.Tensor:\n quat_prime = quat.clone()\n quat_prime[..., 1:] *= -1\n inv = quat_prime / torch.sum(quat**2, dim=-1, keepdim=True)\n return inv\n\n\nclass Rotation:\n \"\"\"\n A 3D rotation. Depending on how the object is initialized, the rotation is represented by either a rotation matrix\n or a quaternion, though both formats are made available by helper functions. To simplify gradient computation, the\n underlying format of the rotation cannot be changed in-place. Like Rigid, the class is designed to mimic the\n behavior of a torch Tensor, almost as if each Rotation object were a tensor of rotations, in one format or another.\n \"\"\"\n\n def __init__(\n self,\n rot_mats: Optional[torch.Tensor] = None,\n quats: Optional[torch.Tensor] = None,\n normalize_quats: bool = True,\n ):\n \"\"\"\n Args:\n rot_mats:\n A [*, 3, 3] rotation matrix tensor. Mutually exclusive with quats\n quats:\n A [*, 4] quaternion. Mutually exclusive with rot_mats. If normalize_quats is not True, must be a unit\n quaternion\n normalize_quats:\n If quats is specified, whether to normalize quats\n \"\"\"\n if (rot_mats is None and quats is None) or (rot_mats is not None and quats is not None):\n raise ValueError(\"Exactly one input argument must be specified\")\n\n if (rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or (quats is not None and quats.shape[-1] != 4):\n raise ValueError(\"Incorrectly shaped rotation matrix or quaternion\")\n\n # Force full-precision\n if quats is not None:\n quats = quats.to(dtype=torch.float32)\n if rot_mats is not None:\n rot_mats = rot_mats.to(dtype=torch.float32)\n\n if quats is not None and normalize_quats:\n quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n\n self._rot_mats = rot_mats\n self._quats = quats\n\n @staticmethod\n def identity(\n shape,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rotation:\n \"\"\"\n Returns an identity Rotation.\n\n Args:\n shape:\n The \"shape\" of the resulting Rotation object. See documentation for the shape property\n dtype:\n The torch dtype for the rotation\n device:\n The torch device for the new rotation\n requires_grad:\n Whether the underlying tensors in the new rotation object should require gradient computation\n fmt:\n One of \"quat\" or \"rot_mat\". Determines the underlying format of the new object's rotation\n Returns:\n A new identity rotation\n \"\"\"\n if fmt == \"rot_mat\":\n rot_mats = identity_rot_mats(\n shape,\n dtype,\n device,\n requires_grad,\n )\n return Rotation(rot_mats=rot_mats, quats=None)\n elif fmt == \"quat\":\n quats = identity_quats(shape, dtype, device, requires_grad)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(f\"Invalid format: f{fmt}\")\n\n # Magic methods\n\n def __getitem__(self, index: Any) -> Rotation:\n \"\"\"\n Allows torch-style indexing over the virtual shape of the rotation object. See documentation for the shape\n property.\n\n Args:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n Returns:\n The indexed rotation\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n if self._rot_mats is not None:\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif self._quats is not None:\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __mul__(self, right: torch.Tensor) -> Rotation:\n \"\"\"\n Pointwise left multiplication of the rotation with a tensor. Can be used to e.g. mask the Rotation.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not (isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n if self._rot_mats is not None:\n rot_mats = self._rot_mats * right[..., None, None]\n return Rotation(rot_mats=rot_mats, quats=None)\n elif self._quats is not None:\n quats = self._quats * right[..., None]\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __rmul__(self, left: torch.Tensor) -> Rotation:\n \"\"\"\n Reverse pointwise multiplication of the rotation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n # Properties\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the virtual shape of the rotation object. This shape is defined as the batch dimensions of the\n underlying rotation matrix or quaternion. If the Rotation was initialized with a [10, 3, 3] rotation matrix\n tensor, for example, the resulting shape would be [10].\n\n Returns:\n The virtual shape of the rotation object\n \"\"\"\n if self._rot_mats is not None:\n return self._rot_mats.shape[:-2]\n elif self._quats is not None:\n return self._quats.shape[:-1]\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Returns the dtype of the underlying rotation.\n\n Returns:\n The dtype of the underlying rotation\n \"\"\"\n if self._rot_mats is not None:\n return self._rot_mats.dtype\n elif self._quats is not None:\n return self._quats.dtype\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n The device of the underlying rotation\n\n Returns:\n The device of the underlying rotation\n \"\"\"\n if self._rot_mats is not None:\n return self._rot_mats.device\n elif self._quats is not None:\n return self._quats.device\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def requires_grad(self) -> bool:\n \"\"\"\n Returns the requires_grad property of the underlying rotation\n\n Returns:\n The requires_grad property of the underlying tensor\n \"\"\"\n if self._rot_mats is not None:\n return self._rot_mats.requires_grad\n elif self._quats is not None:\n return self._quats.requires_grad\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_rot_mats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a rotation matrix tensor.\n\n Returns:\n The rotation as a rotation matrix tensor\n \"\"\"\n if self._rot_mats is not None:\n return self._rot_mats\n elif self._quats is not None:\n return quat_to_rot(self._quats)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_quats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a quaternion tensor.\n\n Depending on whether the Rotation was initialized with a quaternion, this function may call torch.linalg.eigh.\n\n Returns:\n The rotation as a quaternion tensor.\n \"\"\"\n if self._rot_mats is not None:\n return rot_to_quat(self._rot_mats)\n elif self._quats is not None:\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_cur_rot(self) -> torch.Tensor:\n \"\"\"\n Return the underlying rotation in its current form\n\n Returns:\n The stored rotation\n \"\"\"\n if self._rot_mats is not None:\n return self._rot_mats\n elif self._quats is not None:\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n # Rotation functions\n\n def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Returns a new quaternion Rotation after updating the current object's underlying rotation with a quaternion\n update, formatted as a [*, 3] tensor whose final three columns represent x, y, z such that (1, x, y, z) is the\n desired (not necessarily unit) quaternion update.\n\n Args:\n q_update_vec:\n A [*, 3] quaternion update tensor\n normalize_quats:\n Whether to normalize the output quaternion\n Returns:\n An updated Rotation\n \"\"\"\n quats = self.get_quats()\n new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n return Rotation(\n rot_mats=None,\n quats=new_quats,\n normalize_quats=normalize_quats,\n )\n\n def compose_r(self, r: Rotation) -> Rotation:\n \"\"\"\n Compose the rotation matrices of the current Rotation object with those of another.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n r1 = self.get_rot_mats()\n r2 = r.get_rot_mats()\n new_rot_mats = rot_matmul(r1, r2)\n return Rotation(rot_mats=new_rot_mats, quats=None)\n\n def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Compose the quaternions of the current Rotation object with those of another.\n\n Depending on whether either Rotation was initialized with quaternions, this function may call\n torch.linalg.eigh.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n q1 = self.get_quats()\n q2 = r.get_quats()\n new_quats = quat_multiply(q1, q2)\n return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats)\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply the current Rotation as a rotation matrix to a set of 3D coordinates.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The inverse of the apply() method.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] inverse-rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n inv_rot_mats = invert_rot_mat(rot_mats)\n return rot_vec_mul(inv_rot_mats, pts)\n\n def invert(self) -> Rotation:\n \"\"\"\n Returns the inverse of the current Rotation.\n\n Returns:\n The inverse of the current Rotation\n \"\"\"\n if self._rot_mats is not None:\n return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None)\n elif self._quats is not None:\n return Rotation(\n rot_mats=None,\n quats=invert_quat(self._quats),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n # \"Tensor\" stuff\n\n def unsqueeze(self, dim: int) -> Rotation:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the shape of the Rotation object.\n\n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed Rotation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n\n if self._rot_mats is not None:\n rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)\n return Rotation(rot_mats=rot_mats, quats=None)\n elif self._quats is not None:\n quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n @staticmethod\n def cat(rs: Sequence[Rotation], dim: int) -> Rotation:\n \"\"\"\n Concatenates rotations along one of the batch dimensions. Analogous to torch.cat().\n\n Note that the output of this operation is always a rotation matrix, regardless of the format of input\n rotations.\n\n Args:\n rs:\n A list of rotation objects\n dim:\n The dimension along which the rotations should be concatenated\n Returns:\n A concatenated Rotation object in rotation matrix format\n \"\"\"\n rot_mats = torch.cat(\n [r.get_rot_mats() for r in rs],\n dim=dim if dim >= 0 else dim - 2,\n )\n\n return Rotation(rot_mats=rot_mats, quats=None)\n\n def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rotation:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying rotation tensors, mapping over the rotation dimension(s). Can\n be used e.g. to sum out a one-hot batch dimension.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rotation\n Returns:\n The transformed Rotation object\n \"\"\"\n if self._rot_mats is not None:\n rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))\n rot_mats = torch.stack(list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1)\n rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))\n return Rotation(rot_mats=rot_mats, quats=None)\n elif self._quats is not None:\n quats = torch.stack(list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def cuda(self) -> Rotation:\n \"\"\"\n Analogous to the cuda() method of torch Tensors\n\n Returns:\n A copy of the Rotation in CUDA memory\n \"\"\"\n if self._rot_mats is not None:\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif self._quats is not None:\n return Rotation(rot_mats=None, quats=self._quats.cuda(), normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def to(self, device: Optional[torch.device], dtype: Optional[torch.dtype]) -> Rotation:\n \"\"\"\n Analogous to the to() method of torch Tensors\n\n Args:\n device:\n A torch device\n dtype:\n A torch dtype\n Returns:\n A copy of the Rotation using the new device and dtype\n \"\"\"\n if self._rot_mats is not None:\n return Rotation(\n rot_mats=self._rot_mats.to(device=device, dtype=dtype),\n quats=None,\n )\n elif self._quats is not None:\n return Rotation(\n rot_mats=None,\n quats=self._quats.to(device=device, dtype=dtype),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def detach(self) -> Rotation:\n \"\"\"\n Returns a copy of the Rotation whose underlying Tensor has been detached from its torch graph.\n\n Returns:\n A copy of the Rotation whose underlying Tensor has been detached from its torch graph\n \"\"\"\n if self._rot_mats is not None:\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif self._quats is not None:\n return Rotation(\n rot_mats=None,\n quats=self._quats.detach(),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n\nclass Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper around two objects: a Rotation object and a\n [*, 3] translation Designed to behave approximately like a single torch tensor with the shape of the shared batch\n dimensions of its component parts.\n \"\"\"\n\n def __init__(self, rots: Optional[Rotation], trans: Optional[torch.Tensor]):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if trans is not None:\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif rots is not None:\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if rots is None:\n rots = Rotation.identity(\n batch_dims,\n dtype,\n device,\n requires_grad,\n )\n elif trans is None:\n trans = identity_trans(\n batch_dims,\n dtype,\n device,\n requires_grad,\n )\n\n assert rots is not None\n assert trans is not None\n\n if (rots.shape != trans.shape[:-1]) or (rots.device != trans.device):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int, ...],\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape:\n The desired shape\n dtype:\n The dtype of both internal tensors\n device:\n The device of both internal tensors\n requires_grad:\n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, index: Any) -> Rigid:\n \"\"\"\n Indexes the affine transformation with PyTorch-style indices. The index is applied to the shared dimensions of\n both the rotation and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None) t = Rigid(r, torch.rand(10, 10, 3)) indexed =\n t[3, 4:6] assert(indexed.shape == (2,)) assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self, right: torch.Tensor) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor. Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not (isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self, left: torch.Tensor) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and the translation.\n\n Returns:\n The shape of the transformation\n \"\"\"\n return self._trans.shape[:-1]\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, q_update_vec: torch.Tensor) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of shape [*, 6], where the final 6 columns\n represent the x, y, and z values of a quaternion of form (1, x, y, z) followed by a 3D translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self, r: Rigid) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts)\n return rotated + self._trans\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts)\n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert()\n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and rotation tensors, mapping over the\n translation/rotation dimensions respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\"\n new_rots = self._rots.map_tensor_fn(fn)\n new_trans = torch.stack(list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1)\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(t: torch.Tensor) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if t.shape[-2:] != (4, 4):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n\n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the\n translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(t: torch.Tensor, normalize_quats: bool = False) -> Rigid:\n if t.shape[-1] != 7:\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(rot_mats=None, quats=quats, normalize_quats=normalize_quats)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, origin: torch.Tensor, p_xy_plane: torch.Tensor, eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis_unbound = torch.unbind(p_neg_x_axis, dim=-1)\n origin_unbound = torch.unbind(origin, dim=-1)\n p_xy_plane_unbound = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin_unbound, p_neg_x_axis_unbound)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane_unbound, origin_unbound)]\n\n denom = torch.sqrt(sum(c * c for c in e0) + eps * torch.ones_like(e0[0]))\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps * torch.ones_like(e1[0]))\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin_unbound, dim=-1))\n\n def unsqueeze(self, dim: int) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation.\n\n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(ts: Sequence[Rigid], dim: int) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts:\n A list of T objects\n dim:\n The dimension along which the transformations should be concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim)\n trans = torch.cat([t._trans for t in ts], dim=dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[[Rotation], Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn:\n A function of type Tensor -> Tensor to be applied to the translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n return self.apply_trans_fn(lambda t: t * trans_scale_factor)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n return self.apply_rot_fn(lambda r: r.detach())\n\n @staticmethod\n def make_transform_from_reference(\n n_xyz: torch.Tensor, ca_xyz: torch.Tensor, c_xyz: torch.Tensor, eps: float = 1e-20\n ) -> Rigid:\n \"\"\"\n Returns a transformation object from reference coordinates.\n\n Note that this method does not take care of symmetries. If you provide the atom positions in the non-standard\n way, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You\n need to take care of such cases in your code.\n\n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and rotation to the reference backbone, the\n coordinates will approximately equal to the input coordinates.\n \"\"\"\n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x**2 + c_y**2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x**2 + c_y**2 + c_z**2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x**2 + c_y**2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y**2 + n_z**2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n\n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())\n", "output": ["rot_vec_mul", "invert_quat", "invert_rot_mat", "rot_matmul", "quat_multiply_by_vec", "quat_to_rot", "rot_to_quat", "quat_multiply", "_to_mat", "Rotation", "Rigid"], "metadata": {"file_path": "transformers-main/src/transformers/models/esm/openfold_utils/rigid_utils.py", "file_length": 13503, "symbol_dict": [{"symbol": "quat_multiply_by_vec", "type": "mannual_defined_function", "byte_location": 7102, "location": 3153}, {"symbol": "rot_matmul", "type": "mannual_defined_function", "byte_location": 809, "location": 239}, {"symbol": "quat_multiply", "type": "mannual_defined_function", "byte_location": 6715, "location": 2987}, {"symbol": "_to_mat", "type": "mannual_defined_function", "byte_location": 3715, "location": 1437}, {"symbol": "invert_rot_mat", "type": "mannual_defined_function", "byte_location": 7500, "location": 3322}, {"symbol": "quat_to_rot", "type": "mannual_defined_function", "byte_location": 4515, "location": 1890}, {"symbol": "invert_quat", "type": "mannual_defined_function", "byte_location": 7598, "location": 3364}, {"symbol": "rot_vec_mul", "type": "mannual_defined_function", "byte_location": 1716, "location": 614}, {"symbol": "rot_to_quat", "type": "mannual_defined_function", "byte_location": 5083, "location": 2154}, {"symbol": "Rigid", "type": "mannual_defined_class", "byte_location": 24319, "location": 8144}, {"symbol": "Rotation", "type": "mannual_defined_class", "byte_location": 7792, "location": 3448}]}} {"input": "# coding=utf-8\n# Copyright 2018 Salesforce and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 CTRL model.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast, TFSequenceClassifierOutput\nfrom ...modeling_tf_utils import (\n TFCausalLanguageModelingLoss,\n TFModelInputType,\n TFPreTrainedModel,\n TFSequenceClassificationLoss,\n get_initializer,\n keras,\n keras_serializable,\n unpack_inputs,\n)\nfrom ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax\nfrom ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging\nfrom .configuration_ctrl import CTRLConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"Salesforce/ctrl\"\n_CONFIG_FOR_DOC = \"CTRLConfig\"\n\nTF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"Salesforce/ctrl\"\n # See all CTRL models at https://huggingface.co/models?filter=ctrl\n]\n\n\ndef angle_defn(pos, i, d_model_size):\n angle_rates = 1 / np.power(10000, (2 * (i // 2)) / d_model_size)\n return pos * angle_rates\n\n\ndef positional_encoding(position, d_model_size):\n # create the sinusoidal pattern for the positional encoding\n angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)\n\n sines = np.sin(angle_rads[:, 0::2])\n cosines = np.cos(angle_rads[:, 1::2])\n pos_encoding = tf.convert_to_tensor(np.concatenate([sines, cosines], axis=-1))\n\n return pos_encoding\n\n\ndef scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):\n # calculate attention\n matmul_qk = tf.matmul(q, k, transpose_b=True)\n\n dk = tf.cast(shape_list(k)[-1], dtype=matmul_qk.dtype)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n\n if mask is not None:\n scaled_attention_logits += tf.cast(mask * -1e4, dtype=scaled_attention_logits.dtype)\n\n if attention_mask is not None:\n # Apply the attention mask\n attention_mask = tf.cast(attention_mask, dtype=scaled_attention_logits.dtype)\n scaled_attention_logits = scaled_attention_logits + attention_mask\n\n attention_weights = stable_softmax(scaled_attention_logits, axis=-1)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_weights = attention_weights * head_mask\n\n output = tf.matmul(attention_weights, v)\n\n return output, attention_weights\n\n\nclass TFMultiHeadAttention(keras.layers.Layer):\n def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):\n super().__init__(**kwargs)\n self.num_heads = num_heads\n self.d_model_size = d_model_size\n self.output_attentions = output_attentions\n\n self.depth = int(d_model_size / self.num_heads)\n\n self.Wq = keras.layers.Dense(d_model_size, name=\"Wq\")\n self.Wk = keras.layers.Dense(d_model_size, name=\"Wk\")\n self.Wv = keras.layers.Dense(d_model_size, name=\"Wv\")\n\n self.dense = keras.layers.Dense(d_model_size, name=\"dense\")\n\n def split_into_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, v, k, q, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):\n batch_size = shape_list(q)[0]\n\n q = self.Wq(q)\n k = self.Wk(k)\n v = self.Wv(v)\n\n q = self.split_into_heads(q, batch_size)\n k = self.split_into_heads(k, batch_size)\n v = self.split_into_heads(v, batch_size)\n\n if layer_past is not None:\n past_key, past_value = tf.unstack(layer_past, axis=0)\n k = tf.concat((past_key, k), axis=-2)\n v = tf.concat((past_value, v), axis=-2)\n\n if use_cache:\n present = tf.stack((k, v), axis=0)\n else:\n present = (None,)\n\n output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)\n scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])\n attn = output[1]\n original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))\n output = self.dense(original_size_attention)\n outputs = (output, present)\n\n if output_attentions:\n outputs = outputs + (attn,)\n\n return outputs\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"Wq\", None) is not None:\n with tf.name_scope(self.Wq.name):\n self.Wq.build([None, None, self.d_model_size])\n if getattr(self, \"Wk\", None) is not None:\n with tf.name_scope(self.Wk.name):\n self.Wk.build([None, None, self.d_model_size])\n if getattr(self, \"Wv\", None) is not None:\n with tf.name_scope(self.Wv.name):\n self.Wv.build([None, None, self.d_model_size])\n if getattr(self, \"dense\", None) is not None:\n with tf.name_scope(self.dense.name):\n self.dense.build([None, None, self.d_model_size])\n\n\nclass TFPointWiseFeedForwardLayer(keras.layers.Layer):\n def __init__(self, d_model_size, dff, **kwargs):\n super().__init__(**kwargs)\n\n self.dense_0 = keras.layers.Dense(dff, activation=\"relu\", name=\"0\")\n self.dense_2 = keras.layers.Dense(d_model_size, name=\"2\")\n self.d_model_size = d_model_size\n self.dff = dff\n\n def call(self, inputs, trainable=False):\n dense_0_output = self.dense_0(inputs)\n dense_2_output = self.dense_2(dense_0_output)\n\n return dense_2_output\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"dense_0\", None) is not None:\n with tf.name_scope(self.dense_0.name):\n self.dense_0.build([None, None, self.d_model_size])\n if getattr(self, \"dense_2\", None) is not None:\n with tf.name_scope(self.dense_2.name):\n self.dense_2.build([None, None, self.dff])\n\n\nclass TFEncoderLayer(keras.layers.Layer):\n def __init__(\n self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs\n ):\n super().__init__(**kwargs)\n\n self.output_attentions = output_attentions\n\n self.multi_head_attention = TFMultiHeadAttention(\n d_model_size, num_heads, output_attentions=self.output_attentions, name=\"multi_head_attention\"\n )\n self.ffn = TFPointWiseFeedForwardLayer(d_model_size, dff, name=\"ffn\")\n\n self.layernorm1 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name=\"layernorm1\")\n self.layernorm2 = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name=\"layernorm2\")\n\n self.dropout1 = keras.layers.Dropout(rate)\n self.dropout2 = keras.layers.Dropout(rate)\n self.d_model_size = d_model_size\n\n def call(self, x, mask, layer_past, attention_mask, head_mask, use_cache, output_attentions, training=False):\n normed = self.layernorm1(x)\n attn_outputs = self.multi_head_attention(\n normed,\n normed,\n normed,\n mask,\n layer_past,\n attention_mask,\n head_mask,\n use_cache,\n output_attentions,\n training=training,\n )\n attn_output = attn_outputs[0]\n attn_output = self.dropout1(attn_output, training=training)\n out1 = x + attn_output\n\n out2 = self.layernorm2(out1)\n ffn_output = self.ffn(out2)\n ffn_output = self.dropout2(ffn_output, training=training)\n out2 = out1 + ffn_output\n\n outputs = (out2,) + attn_outputs[1:]\n return outputs\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"multi_head_attention\", None) is not None:\n with tf.name_scope(self.multi_head_attention.name):\n self.multi_head_attention.build(None)\n if getattr(self, \"ffn\", None) is not None:\n with tf.name_scope(self.ffn.name):\n self.ffn.build(None)\n if getattr(self, \"layernorm1\", None) is not None:\n with tf.name_scope(self.layernorm1.name):\n self.layernorm1.build([None, None, self.d_model_size])\n if getattr(self, \"layernorm2\", None) is not None:\n with tf.name_scope(self.layernorm2.name):\n self.layernorm2.build([None, None, self.d_model_size])\n\n\n@keras_serializable\nclass TFCTRLMainLayer(keras.layers.Layer):\n config_class = CTRLConfig\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.output_hidden_states = config.output_hidden_states\n self.output_attentions = config.output_attentions\n self.use_cache = config.use_cache\n self.return_dict = config.use_return_dict\n\n self.d_model_size = config.n_embd\n self.num_layers = config.n_layer\n\n self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)\n\n self.w = keras.layers.Embedding(\n input_dim=config.vocab_size,\n output_dim=config.n_embd,\n embeddings_initializer=get_initializer(config.initializer_range),\n name=\"w\",\n )\n\n self.dropout = keras.layers.Dropout(config.embd_pdrop)\n self.h = [\n TFEncoderLayer(\n config.n_embd,\n config.n_head,\n config.dff,\n config.resid_pdrop,\n config.layer_norm_epsilon,\n self.output_attentions,\n name=f\"h_._{i}\",\n )\n for i in range(config.n_layer)\n ]\n self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name=\"layernorm\")\n\n def get_input_embeddings(self):\n return self.w\n\n def set_input_embeddings(self, new_embeddings):\n self.w = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n raise NotImplementedError\n\n @unpack_inputs\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n token_type_ids: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFBaseModelOutputWithPast]:\n # If using past key value states, only the last tokens\n # should be given as an input\n if past_key_values is not None:\n if input_ids is not None:\n input_ids = input_ids[:, -1:]\n if inputs_embeds is not None:\n inputs_embeds = inputs_embeds[:, -1:]\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1:]\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = shape_list(input_ids)\n input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])\n elif inputs_embeds is not None:\n input_shape = shape_list(inputs_embeds)[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if past_key_values is None:\n past_length = 0\n past_key_values = [None] * len(self.h)\n else:\n past_length = shape_list(past_key_values[0][0])[-2]\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32), axis=0)\n position_ids = tf.tile(position_ids, [input_shape[0], 1])\n\n # Attention mask.\n if attention_mask is not None:\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1] + past_length))\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n\n one_cst = tf.constant(1.0)\n ten_thousand_cst = tf.constant(-10000.0)\n attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)\n attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), ten_thousand_cst)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # head_mask has shape n_layer x batch x n_heads x N x N\n if head_mask is not None:\n raise NotImplementedError\n else:\n head_mask = [None] * self.num_layers\n\n if token_type_ids is not None:\n token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])\n token_type_embeds = self.w(token_type_ids)\n token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, dtype=token_type_embeds.dtype))\n else:\n token_type_embeds = tf.constant(0.0)\n position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])\n\n if inputs_embeds is None:\n check_embeddings_within_bounds(input_ids, self.w.input_dim)\n inputs_embeds = self.w(input_ids)\n seq_len = input_shape[-1]\n mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)\n\n inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, inputs_embeds.dtype))\n\n pos_embeds = tf.gather(self.pos_encoding, position_ids)\n pos_embeds = tf.cast(pos_embeds, dtype=token_type_embeds.dtype)\n hidden_states = inputs_embeds + pos_embeds + token_type_embeds\n\n hidden_states = self.dropout(hidden_states, training=training)\n\n output_shape = input_shape + [shape_list(hidden_states)[-1]]\n presents = () if use_cache else None\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for i, (h, layer_past) in enumerate(zip(self.h, past_key_values)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)\n outputs = h(\n hidden_states,\n mask,\n layer_past,\n attention_mask,\n head_mask[i],\n use_cache,\n output_attentions,\n training=training,\n )\n hidden_states, present = outputs[:2]\n\n if use_cache:\n presents = presents + (present,)\n\n if output_attentions:\n all_attentions = all_attentions + (outputs[2],)\n\n hidden_states = self.layernorm(hidden_states)\n hidden_states = tf.reshape(hidden_states, output_shape)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if output_attentions:\n # let the number of heads free (-1) so we can extract attention even after head pruning\n attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]\n all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)\n\n return TFBaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"w\", None) is not None:\n with tf.name_scope(self.w.name):\n self.w.build(None)\n if getattr(self, \"layernorm\", None) is not None:\n with tf.name_scope(self.layernorm.name):\n self.layernorm.build([None, None, self.config.n_embd])\n if getattr(self, \"h\", None) is not None:\n for layer in self.h:\n with tf.name_scope(layer.name):\n layer.build(None)\n\n\nclass TFCTRLPreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = CTRLConfig\n base_model_prefix = \"transformer\"\n\n\nCTRL_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should \"just work\" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`CTRLConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nCTRL_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of\n input past key value states).\n\n Indices of input sequence tokens in the vocabulary.\n\n If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past (`List[tf.Tensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past` output below). Can be used to speed up sequential decoding. The token ids which have their past\n given to this model should not be passed as input ids as they have already been computed.\n attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past` key value states are returned and can be used to speed up decoding (see `past`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.\",\n CTRL_START_DOCSTRING,\n)\nclass TFCTRLModel(TFCTRLPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFCTRLMainLayer(config, name=\"transformer\")\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFBaseModelOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n token_type_ids: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFBaseModelOutputWithPast]:\n outputs = self.transformer(\n input_ids=input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n return outputs\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"transformer\", None) is not None:\n with tf.name_scope(self.transformer.name):\n self.transformer.build(None)\n\n\nclass TFCTRLBiasLayer(keras.layers.Layer):\n \"\"\"\n Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,\n so all weights have to be registered in a layer.\n \"\"\"\n\n def __init__(self, shape, initializer, trainable, name, **kwargs):\n super().__init__(name=name, **kwargs)\n self.shape = shape\n self.initializer = initializer\n self.trainable = trainable\n\n def build(self, input_shape):\n self.bias = self.add_weight(\n name=\"bias\", shape=self.shape, initializer=self.initializer, trainable=self.trainable\n )\n super().build(input_shape)\n\n def call(self, x):\n return x + self.bias\n\n\n@add_start_docstrings(\n \"\"\"\n The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n CTRL_START_DOCSTRING,\n)\nclass TFCTRLLMHeadModel(TFCTRLPreTrainedModel, TFCausalLanguageModelingLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFCTRLMainLayer(config, name=\"transformer\")\n self.bias_layer = TFCTRLBiasLayer(\n name=\"lm_head\", shape=[1, config.vocab_size], initializer=\"zeros\", trainable=True\n )\n\n def get_output_embeddings(self):\n return self.get_input_embeddings()\n\n def set_output_embeddings(self, value):\n self.set_input_embeddings(value)\n\n def get_bias(self):\n return {\"lm_head.bias\": self.bias_layer.bias}\n\n def set_bias(self, value):\n # Replaces the existing layers containing bias for correct (de)serialization.\n vocab_size = value[\"lm_head.bias\"].shape[-1]\n self.bias_layer = TFCTRLBiasLayer(\n name=\"final_logits_bias\", shape=[1, vocab_size], initializer=\"zeros\", trainable=True\n )\n self.bias_layer.build(None)\n self.bias_layer.bias.assign(value[\"lm_head.bias\"])\n\n # Copied from transformers.models.gpt2.modeling_tf_gpt2.TFGPT2LMHeadModel.prepare_inputs_for_generation\n def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # only last token for inputs_ids if past is defined in kwargs\n if past_key_values:\n inputs = tf.expand_dims(inputs[:, -1], -1)\n if token_type_ids is not None:\n token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)\n\n position_ids = kwargs.get(\"position_ids\", None)\n attention_mask = kwargs.get(\"attention_mask\", None)\n\n if attention_mask is not None and position_ids is None:\n position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)\n if past_key_values:\n position_ids = tf.expand_dims(position_ids[:, -1], -1)\n\n return {\n \"input_ids\": inputs,\n \"attention_mask\": attention_mask,\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": use_cache,\n \"token_type_ids\": token_type_ids,\n }\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFCausalLMOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n token_type_ids: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: np.ndarray | tf.Tensor | None = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFCausalLMOutputWithPast]:\n r\"\"\"\n labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n transformer_outputs = self.transformer(\n input_ids=input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n hidden_states = transformer_outputs[0]\n logits = tf.matmul(hidden_states, self.transformer.w.weights, transpose_b=True)\n logits = self.bias_layer(logits)\n\n loss = None\n if labels is not None:\n # shift labels to the left and cut last logit token\n shifted_logits = logits[:, :-1]\n labels = labels[:, 1:]\n loss = self.hf_compute_loss(labels, shifted_logits)\n\n if not return_dict:\n output = (logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFCausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"transformer\", None) is not None:\n with tf.name_scope(self.transformer.name):\n self.transformer.build(None)\n if getattr(self, \"bias_layer\", None) is not None:\n with tf.name_scope(self.bias_layer.name):\n self.bias_layer.build(None)\n\n\n@add_start_docstrings(\n \"\"\"\n The CTRL Model transformer with a sequence classification head on top (linear layer).\n\n [`TFCTRLForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1, GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n \"\"\",\n CTRL_START_DOCSTRING,\n)\nclass TFCTRLForSequenceClassification(TFCTRLPreTrainedModel, TFSequenceClassificationLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n self.classifier = keras.layers.Dense(\n config.num_labels,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"classifier\",\n use_bias=False,\n )\n self.transformer = TFCTRLMainLayer(config, name=\"transformer\")\n self.config = config\n\n def get_output_embeddings(self):\n # Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too.\n logger.warning(\n \"Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed \"\n \"in transformers v4.32.\"\n )\n return self.transformer.w\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(CTRL_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n token_type_ids: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: np.ndarray | tf.Tensor | None = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFSequenceClassifierOutput]:\n r\"\"\"\n labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n\n transformer_outputs = self.transformer(\n input_ids=input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n hidden_states = transformer_outputs[0]\n logits = self.classifier(hidden_states)\n in_logits = None\n if self.config.pad_token_id is None:\n sequence_lengths = -1\n else:\n if input_ids is not None:\n sequence_lengths = (\n tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)\n - 1\n )\n sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)\n in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)\n else:\n sequence_lengths = -1\n logger.warning(\n f\"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be \"\n \"unexpected if using padding tokens in conjunction with `inputs_embeds.`\"\n )\n loss = None\n\n if labels is not None:\n if input_ids is not None:\n batch_size, sequence_length = shape_list(input_ids)[:2]\n else:\n batch_size, sequence_length = shape_list(inputs_embeds)[:2]\n if self.config.pad_token_id is None and batch_size != 1:\n raise ValueError(\"Cannot handle batch sizes > 1 if no padding token is defined.\")\n\n if not tf.is_tensor(sequence_lengths):\n in_logits = logits[0:batch_size, sequence_lengths]\n\n loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))\n\n pooled_logits = in_logits if in_logits is not None else logits\n\n if not return_dict:\n output = (pooled_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFSequenceClassifierOutput(\n loss=loss,\n logits=pooled_logits,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"classifier\", None) is not None:\n with tf.name_scope(self.classifier.name):\n self.classifier.build([None, None, self.config.n_embd])\n if getattr(self, \"transformer\", None) is not None:\n with tf.name_scope(self.transformer.name):\n self.transformer.build(None)\n", "output": ["positional_encoding", "scaled_dot_product_attention", "angle_defn", "TFPointWiseFeedForwardLayer", "TFCTRLModel", "TFCTRLBiasLayer", "TFEncoderLayer", "TFCTRLLMHeadModel", "TFCTRLMainLayer", "TFMultiHeadAttention", "TFCTRLForSequenceClassification", "TFCTRLPreTrainedModel"], "metadata": {"file_path": "transformers-main/src/transformers/models/ctrl/modeling_tf_ctrl.py", "file_length": 12282, "symbol_dict": [{"symbol": "scaled_dot_product_attention", "type": "mannual_defined_function", "byte_location": 2228, "location": 774}, {"symbol": "positional_encoding", "type": "mannual_defined_function", "byte_location": 1804, "location": 614}, {"symbol": "angle_defn", "type": "mannual_defined_function", "byte_location": 1666, "location": 551}, {"symbol": "TFPointWiseFeedForwardLayer", "type": "mannual_defined_class", "byte_location": 5835, "location": 2066}, {"symbol": "TFCTRLModel", "type": "mannual_defined_class", "byte_location": 25343, "location": 7853}, {"symbol": "TFCTRLPreTrainedModel", "type": "mannual_defined_class", "byte_location": 18248, "location": 5992}, {"symbol": "TFCTRLMainLayer", "type": "mannual_defined_class", "byte_location": 9332, "location": 3246}, {"symbol": "TFCTRLForSequenceClassification", "type": "mannual_defined_class", "byte_location": 34474, "location": 10700}, {"symbol": "TFEncoderLayer", "type": "mannual_defined_class", "byte_location": 6811, "location": 2412}, {"symbol": "TFMultiHeadAttention", "type": "mannual_defined_class", "byte_location": 3140, "location": 1098}, {"symbol": "TFCTRLBiasLayer", "type": "mannual_defined_class", "byte_location": 27377, "location": 8492}, {"symbol": "TFCTRLLMHeadModel", "type": "mannual_defined_class", "byte_location": 28292, "location": 8770}]}} {"input": "# coding=utf-8\n# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch ErnieM model.\"\"\"\n\n\nimport math\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn, tensor\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer\nfrom ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging\nfrom .configuration_ernie_m import ErnieMConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"susnato/ernie-m-base_pytorch\"\n_CONFIG_FOR_DOC = \"ErnieMConfig\"\n_TOKENIZER_FOR_DOC = \"ErnieMTokenizer\"\n\nERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"susnato/ernie-m-base_pytorch\",\n \"susnato/ernie-m-large_pytorch\",\n # See all ErnieM models at https://huggingface.co/models?filter=ernie_m\n]\n\n\n# Adapted from paddlenlp.transformers.ernie_m.modeling.ErnieEmbeddings\nclass ErnieMEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word and position embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.hidden_size = config.hidden_size\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=config.pad_token_id\n )\n self.layer_norm = nn.LayerNorm(normalized_shape=config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(p=config.hidden_dropout_prob)\n self.padding_idx = config.pad_token_id\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n inputs_embeds: Optional[torch.LongTensor] = None,\n past_key_values_length: int = 0,\n ) -> torch.Tensor:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n if position_ids is None:\n input_shape = inputs_embeds.size()[:-1]\n ones = torch.ones(input_shape, dtype=torch.int64, device=inputs_embeds.device)\n seq_length = torch.cumsum(ones, dim=1)\n position_ids = seq_length - ones\n\n if past_key_values_length > 0:\n position_ids = position_ids + past_key_values_length\n # to mimic paddlenlp implementation\n position_ids += 2\n position_embeddings = self.position_embeddings(position_ids)\n embeddings = inputs_embeds + position_embeddings\n embeddings = self.layer_norm(embeddings)\n embeddings = self.dropout(embeddings)\n\n return embeddings\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ErnieM,self.value->self.v_proj,self.key->self.k_proj,self.query->self.q_proj\nclass ErnieMSelfAttention(nn.Module):\n def __init__(self, config, position_embedding_type=None):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.q_proj = nn.Linear(config.hidden_size, self.all_head_size)\n self.k_proj = nn.Linear(config.hidden_size, self.all_head_size)\n self.v_proj = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = position_embedding_type or getattr(\n config, \"position_embedding_type\", \"absolute\"\n )\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n output_attentions: Optional[bool] = False,\n ) -> Tuple[torch.Tensor]:\n mixed_query_layer = self.q_proj(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.k_proj(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.v_proj(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.k_proj(hidden_states))\n value_layer = self.transpose_for_scores(self.v_proj(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.k_proj(hidden_states))\n value_layer = self.transpose_for_scores(self.v_proj(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n use_cache = past_key_value is not None\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n query_length, key_length = query_layer.shape[2], key_layer.shape[2]\n if use_cache:\n position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(\n -1, 1\n )\n else:\n position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in ErnieMModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\nclass ErnieMAttention(nn.Module):\n def __init__(self, config, position_embedding_type=None):\n super().__init__()\n self.self_attn = ErnieMSelfAttention(config, position_embedding_type=position_embedding_type)\n self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self_attn.num_attention_heads, self.self_attn.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self_attn.q_proj = prune_linear_layer(self.self_attn.q_proj, index)\n self.self_attn.k_proj = prune_linear_layer(self.self_attn.k_proj, index)\n self.self_attn.v_proj = prune_linear_layer(self.self_attn.v_proj, index)\n self.out_proj = prune_linear_layer(self.out_proj, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self_attn.num_attention_heads = self.self_attn.num_attention_heads - len(heads)\n self.self_attn.all_head_size = self.self_attn.attention_head_size * self.self_attn.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n output_attentions: Optional[bool] = False,\n ) -> Tuple[torch.Tensor]:\n self_outputs = self.self_attn(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.out_proj(self_outputs[0])\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass ErnieMEncoderLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n # to mimic paddlenlp implementation\n dropout = 0.1 if config.hidden_dropout_prob is None else config.hidden_dropout_prob\n act_dropout = config.hidden_dropout_prob if config.act_dropout is None else config.act_dropout\n\n self.self_attn = ErnieMAttention(config)\n self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size)\n self.dropout = nn.Dropout(act_dropout)\n self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size)\n self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n if isinstance(config.hidden_act, str):\n self.activation = ACT2FN[config.hidden_act]\n else:\n self.activation = config.hidden_act\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n output_attentions: Optional[bool] = True,\n ):\n residual = hidden_states\n if output_attentions:\n hidden_states, attention_opt_weights = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n head_mask=head_mask,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n )\n\n else:\n hidden_states = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n head_mask=head_mask,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n )\n hidden_states = residual + self.dropout1(hidden_states)\n hidden_states = self.norm1(hidden_states)\n residual = hidden_states\n\n hidden_states = self.linear1(hidden_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.linear2(hidden_states)\n hidden_states = residual + self.dropout2(hidden_states)\n hidden_states = self.norm2(hidden_states)\n\n if output_attentions:\n return hidden_states, attention_opt_weights\n else:\n return hidden_states\n\n\nclass ErnieMEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layers = nn.ModuleList([ErnieMEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n input_embeds: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n output_attentions: Optional[bool] = False,\n output_hidden_states: Optional[bool] = False,\n return_dict: Optional[bool] = True,\n ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:\n hidden_states = () if output_hidden_states else None\n attentions = () if output_attentions else None\n\n output = input_embeds\n if output_hidden_states:\n hidden_states = hidden_states + (output,)\n for i, layer in enumerate(self.layers):\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n output, opt_attn_weights = layer(\n hidden_states=output,\n attention_mask=attention_mask,\n head_mask=layer_head_mask,\n past_key_value=past_key_value,\n )\n\n if output_hidden_states:\n hidden_states = hidden_states + (output,)\n if output_attentions:\n attentions = attentions + (opt_attn_weights,)\n\n last_hidden_state = output\n if not return_dict:\n return tuple(v for v in [last_hidden_state, hidden_states, attentions] if v is not None)\n\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=attentions\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ErnieM\nclass ErnieMPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass ErnieMPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = ErnieMConfig\n base_model_prefix = \"ernie_m\"\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nERNIE_M_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ErnieMConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nERNIE_M_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`ErnieMTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare ErnieM Model transformer outputting raw hidden-states without any specific head on top.\",\n ERNIE_M_START_DOCSTRING,\n)\nclass ErnieMModel(ErnieMPreTrainedModel):\n def __init__(self, config, add_pooling_layer=True):\n super(ErnieMModel, self).__init__(config)\n self.initializer_range = config.initializer_range\n self.embeddings = ErnieMEmbeddings(config)\n self.encoder = ErnieMEncoder(config)\n self.pooler = ErnieMPooler(config) if add_pooling_layer else None\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layers[layer].self_attn.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPastAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[tensor] = None,\n position_ids: Optional[tensor] = None,\n attention_mask: Optional[tensor] = None,\n head_mask: Optional[tensor] = None,\n inputs_embeds: Optional[tensor] = None,\n past_key_values: Optional[Tuple[Tuple[tensor]]] = None,\n use_cache: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time.\")\n\n # init the default bool value\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.return_dict\n\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n past_key_values_length = 0\n if past_key_values is not None:\n past_key_values_length = past_key_values[0][0].shape[2]\n\n # Adapted from paddlenlp.transformers.ernie_m.ErnieMModel\n if attention_mask is None:\n attention_mask = (input_ids == self.config.pad_token_id).to(torch.float32)\n attention_mask *= torch.finfo(attention_mask.dtype).min\n if past_key_values is not None:\n batch_size = past_key_values[0][0].shape[0]\n past_mask = torch.zeros([batch_size, 1, 1, past_key_values_length], dtype=attention_mask.dtype)\n attention_mask = torch.concat([past_mask, attention_mask], dim=-1)\n # For 2D attention_mask from tokenizer\n elif attention_mask.ndim == 2:\n attention_mask = attention_mask.to(torch.float32)\n attention_mask = 1.0 - attention_mask\n attention_mask *= torch.finfo(attention_mask.dtype).min\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n past_key_values=past_key_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n sequence_output = encoder_outputs[0]\n pooler_output = self.pooler(sequence_output) if self.pooler is not None else None\n return (sequence_output, pooler_output) + encoder_outputs[1:]\n\n sequence_output = encoder_outputs[\"last_hidden_state\"]\n pooler_output = self.pooler(sequence_output) if self.pooler is not None else None\n hidden_states = None if not output_hidden_states else encoder_outputs[\"hidden_states\"]\n attentions = None if not output_attentions else encoder_outputs[\"attentions\"]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooler_output,\n hidden_states=hidden_states,\n attentions=attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"ErnieM Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.\"\"\",\n ERNIE_M_START_DOCSTRING,\n)\nclass ErnieMForSequenceClassification(ErnieMPreTrainedModel):\n # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->ErnieM,bert->ernie_m\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.ernie_m = ErnieMModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.Tensor]] = None,\n use_cache: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n return_dict: Optional[bool] = True,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.ernie_m(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n past_key_values=past_key_values,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"ErnieM Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks.\"\"\",\n ERNIE_M_START_DOCSTRING,\n)\nclass ErnieMForMultipleChoice(ErnieMPreTrainedModel):\n # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->ErnieM,bert->ernie_m\n def __init__(self, config):\n super().__init__(config)\n\n self.ernie_m = ErnieMModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n labels: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = True,\n ) -> Union[Tuple[torch.FloatTensor], MultipleChoiceModelOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,\n num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See\n `input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.ernie_m(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"ErnieM Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.\"\"\",\n ERNIE_M_START_DOCSTRING,\n)\nclass ErnieMForTokenClassification(ErnieMPreTrainedModel):\n # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->ErnieM,bert->ernie_m\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.ernie_m = ErnieMModel(config, add_pooling_layer=False)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.Tensor]] = None,\n output_hidden_states: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n return_dict: Optional[bool] = True,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple[torch.FloatTensor], TokenClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.ernie_m(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n past_key_values=past_key_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"ErnieM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\"\"\",\n ERNIE_M_START_DOCSTRING,\n)\nclass ErnieMForQuestionAnswering(ErnieMPreTrainedModel):\n # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->ErnieM,bert->ernie_m\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.ernie_m = ErnieMModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n start_positions: Optional[torch.Tensor] = None,\n end_positions: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = True,\n ) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:\n r\"\"\"\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.ernie_m(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"ErnieMForInformationExtraction is a Ernie-M Model with two linear layer on top of the hidden-states output to\n compute `start_prob` and `end_prob`, designed for Universal Information Extraction.\"\"\",\n ERNIE_M_START_DOCSTRING,\n)\n# Copied from paddlenlp.transformers.ernie_m.modeling.UIEM\nclass ErnieMForInformationExtraction(ErnieMPreTrainedModel):\n def __init__(self, config):\n super(ErnieMForInformationExtraction, self).__init__(config)\n self.ernie_m = ErnieMModel(config)\n self.linear_start = nn.Linear(config.hidden_size, 1)\n self.linear_end = nn.Linear(config.hidden_size, 1)\n self.sigmoid = nn.Sigmoid()\n self.post_init()\n\n @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n start_positions: Optional[torch.Tensor] = None,\n end_positions: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = True,\n ) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:\n r\"\"\"\n start_positions (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for position (index) for computing the start_positions loss. Position outside of the sequence are\n not taken into account for computing the loss.\n end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) for computing the end_positions loss. Position outside of the sequence are not\n taken into account for computing the loss.\n \"\"\"\n\n result = self.ernie_m(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n if return_dict:\n sequence_output = result.last_hidden_state\n elif not return_dict:\n sequence_output = result[0]\n\n start_logits = self.linear_start(sequence_output)\n start_logits = start_logits.squeeze(-1)\n end_logits = self.linear_end(sequence_output)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = BCEWithLogitsLoss()\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n return tuple(\n i\n for i in [total_loss, start_logits, end_logits, result.hidden_states, result.attentions]\n if i is not None\n )\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=result.hidden_states,\n attentions=result.attentions,\n )\n", "output": ["ErnieMEmbeddings", "ErnieMModel", "ErnieMEncoderLayer", "ErnieMPreTrainedModel", "ErnieMForSequenceClassification", "ErnieMForMultipleChoice", "ErnieMForInformationExtraction", "ErnieMAttention", "ErnieMSelfAttention", "ErnieMEncoder", "ErnieMForTokenClassification", "ErnieMPooler", "ErnieMForQuestionAnswering"], "metadata": {"file_path": "transformers-main/src/transformers/models/ernie_m/modeling_ernie_m.py", "file_length": 14642, "symbol_dict": [{"symbol": "ErnieMEncoder", "type": "mannual_defined_class", "byte_location": 16002, "location": 5103}, {"symbol": "ErnieMModel", "type": "mannual_defined_class", "byte_location": 22937, "location": 7086}, {"symbol": "ErnieMForQuestionAnswering", "type": "mannual_defined_class", "byte_location": 39648, "location": 12160}, {"symbol": "ErnieMForInformationExtraction", "type": "mannual_defined_class", "byte_location": 44264, "location": 13530}, {"symbol": "ErnieMForTokenClassification", "type": "mannual_defined_class", "byte_location": 36326, "location": 11156}, {"symbol": "ErnieMAttention", "type": "mannual_defined_class", "byte_location": 11237, "location": 3579}, {"symbol": "ErnieMEncoderLayer", "type": "mannual_defined_class", "byte_location": 13398, "location": 4313}, {"symbol": "ErnieMSelfAttention", "type": "mannual_defined_class", "byte_location": 3870, "location": 1292}, {"symbol": "ErnieMPreTrainedModel", "type": "mannual_defined_class", "byte_location": 18575, "location": 5880}, {"symbol": "ErnieMForSequenceClassification", "type": "mannual_defined_class", "byte_location": 28081, "location": 8667}, {"symbol": "ErnieMEmbeddings", "type": "mannual_defined_class", "byte_location": 1951, "location": 667}, {"symbol": "ErnieMPooler", "type": "mannual_defined_class", "byte_location": 18011, "location": 5700}, {"symbol": "ErnieMForMultipleChoice", "type": "mannual_defined_class", "byte_location": 32542, "location": 10000}]}} {"input": "# coding=utf-8\n# Copyright 2023 Bo Peng and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch RWKV model.\"\"\"\n\nimport math\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n is_bitsandbytes_available,\n is_ninja_available,\n is_torch_cuda_available,\n logging,\n)\nfrom .configuration_rwkv import RwkvConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"RWKV/rwkv-4-169m-pile\"\n_CONFIG_FOR_DOC = \"RwkvConfig\"\n\nRWKV_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"RWKV/rwkv-4-169m-pile\",\n \"RWKV/rwkv-4-430m-pile\",\n \"RWKV/rwkv-4-1b5-pile\",\n \"RWKV/rwkv-4-3b-pile\",\n \"RWKV/rwkv-4-7b-pile\",\n \"RWKV/rwkv-4-14b-pile\",\n \"RWKV/rwkv-raven-1b5\",\n \"RWKV/rwkv-raven-3b\",\n \"RWKV/rwkv-raven-7b\",\n \"RWKV/rwkv-raven-14b\",\n # See all RWKV models at https://huggingface.co/models?filter=rwkv\n]\n\n\nrwkv_cuda_kernel = None\n\n\ndef load_wkv_cuda_kernel(context_length):\n from torch.utils.cpp_extension import load as load_kernel\n\n global rwkv_cuda_kernel\n\n kernel_folder = Path(__file__).resolve().parent.parent.parent / \"kernels\" / \"rwkv\"\n cuda_kernel_files = [kernel_folder / f for f in [\"wkv_op.cpp\", \"wkv_cuda.cu\", \"wkv_cuda_bf16.cu\"]]\n\n # Only load the kernel if it's not been loaded yet or if we changed the context length\n if rwkv_cuda_kernel is not None and rwkv_cuda_kernel.max_seq_length == context_length:\n return\n\n logger.info(f\"Loading CUDA kernel for RWKV at context length of {context_length}.\")\n\n flags = [\n \"-res-usage\",\n \"--maxrregcount 60\",\n \"--use_fast_math\",\n \"-O3\",\n \"-Xptxas -O3\",\n \"--extra-device-vectorization\",\n f\"-DTmax={context_length}\",\n ]\n rwkv_cuda_kernel = load_kernel(\n name=f\"wkv_{context_length}\",\n sources=cuda_kernel_files,\n verbose=(logging.get_verbosity() == logging.DEBUG),\n extra_cuda_cflags=flags,\n )\n rwkv_cuda_kernel.max_seq_length = context_length\n\n\nclass RwkvLinearAttention(torch.autograd.Function):\n @staticmethod\n def forward(ctx, time_decay, time_first, key, value, state=None, return_state=False):\n batch_size, seq_len, hidden_size = key.size()\n if seq_len > rwkv_cuda_kernel.max_seq_length:\n raise ValueError(\n f\"Cannot process a batch with {seq_len} tokens at the same time, use a maximum of \"\n f\"{rwkv_cuda_kernel.max_seq_length} with this model.\"\n )\n if batch_size * hidden_size % min(hidden_size, 32) != 0:\n raise ValueError(\n f\"The product of batch size ({batch_size}) and hidden size ({hidden_size}) needs to be a round \"\n f\"multiple of {min(hidden_size, 32)}.\"\n )\n\n ctx.input_dtype = key.dtype\n\n if (\n time_decay.device.type != \"cuda\"\n or time_first.device.type != \"cuda\"\n or key.device.type != \"cuda\"\n or value.device.type != \"cuda\"\n ):\n raise ValueError(\"Calling the CUDA kernel for wkv attention requires all tensors to be on CUDA devices.\")\n\n time_decay = -torch.exp(time_decay.float().contiguous())\n if key.dtype == torch.float16:\n time_first = time_first.float()\n key = key.float()\n value = value.float()\n time_first = time_first.contiguous()\n key = key.contiguous()\n value = value.contiguous()\n # The CUDA kernel will fill this tensor.\n output = torch.empty_like(key, memory_format=torch.contiguous_format)\n if return_state or state is not None:\n if state is None:\n state = torch.zeros(\n batch_size,\n hidden_size,\n 3,\n dtype=torch.float32,\n device=key.device,\n memory_format=torch.contiguous_format,\n )\n state[:, :, 2] -= 1e38\n else:\n state = torch.cat([s.unsqueeze(2) for s in state], dim=2).contiguous()\n if key.dtype == torch.bfloat16:\n forward_func = rwkv_cuda_kernel.forward_with_state_bf16\n else:\n forward_func = rwkv_cuda_kernel.forward_with_state\n forward_func(time_decay, time_first, key, value, output, state)\n else:\n forward_func = rwkv_cuda_kernel.forward_bf16 if key.dtype == torch.bfloat16 else rwkv_cuda_kernel.forward\n forward_func(time_decay, time_first, key, value, output)\n\n ctx.save_for_backward(time_decay, time_first, key, value, output)\n\n if state is not None:\n state = [s.squeeze(2) for s in torch.chunk(state, 3, dim=2)]\n\n return output.to(ctx.input_dtype), state\n\n @staticmethod\n # g stands for grad\n def backward(ctx, g_output, g_state=None):\n input_dtype = ctx.input_dtype\n\n time_decay, time_first, key, value, output = ctx.saved_tensors\n # The CUDA kernel will fill those tensors.\n g_time_decay = torch.empty_like(\n time_decay,\n memory_format=torch.contiguous_format,\n dtype=torch.bfloat16 if input_dtype == torch.bfloat16 else torch.float32,\n )\n g_time_first = torch.empty_like(time_first, memory_format=torch.contiguous_format)\n g_key = torch.empty_like(key, memory_format=torch.contiguous_format)\n g_value = torch.empty_like(value, memory_format=torch.contiguous_format)\n\n if input_dtype == torch.float16:\n g_output = g_output.float()\n backward_func = rwkv_cuda_kernel.backward_bf16 if input_dtype == torch.bfloat16 else rwkv_cuda_kernel.backward\n backward_func(\n time_decay,\n time_first,\n key,\n value,\n output,\n g_output.contiguous(),\n g_time_decay,\n g_time_first,\n g_key,\n g_value,\n )\n\n return (\n g_time_decay.to(input_dtype),\n g_time_first.to(input_dtype),\n g_key.to(input_dtype),\n g_value.to(input_dtype),\n None,\n None,\n )\n\n\ndef rwkv_linear_attention_cpu(time_decay, time_first, key, value, state=None, return_state=False):\n # For CPU fallback. Will be slower and probably take more memory than the custom CUDA kernel if not executed\n # within a torch.no_grad.\n _, seq_length, _ = key.size()\n output = torch.zeros_like(key)\n\n if state is None:\n num_state = torch.zeros_like(key[:, 0], dtype=torch.float32)\n den_state = torch.zeros_like(key[:, 0], dtype=torch.float32)\n max_state = torch.zeros_like(key[:, 0], dtype=torch.float32) - 1e38\n else:\n num_state, den_state, max_state = state\n # For numerical stability\n # real_numerator_state = num_state * torch.exp(max_state)\n # real_denominator_state = den_state * torch.exp(max_state)\n\n time_decay = -torch.exp(time_decay)\n\n for current_index in range(seq_length):\n current_key = key[:, current_index].float()\n current_value = value[:, current_index]\n\n # wkv computation at time t\n max_for_output = torch.maximum(max_state, current_key + time_first)\n e1 = torch.exp(max_state - max_for_output)\n e2 = torch.exp(current_key + time_first - max_for_output)\n numerator = e1 * num_state + e2 * current_value\n denominator = e1 * den_state + e2\n output[:, current_index] = (numerator / denominator).to(output.dtype)\n\n # Update state for next iteration\n max_for_state = torch.maximum(max_state + time_decay, current_key)\n e1 = torch.exp(max_state + time_decay - max_for_state)\n e2 = torch.exp(current_key - max_for_state)\n num_state = e1 * num_state + e2 * current_value\n den_state = e1 * den_state + e2\n max_state = max_for_state\n\n if return_state or state is not None:\n state = [num_state, den_state, max_state]\n\n return output, state\n\n\ndef rwkv_linear_attention(time_decay, time_first, key, value, state=None, return_state=False):\n no_cuda = any(t.device.type != \"cuda\" for t in [time_decay, time_first, key, value])\n # Launching the CUDA kernel for just one token will actually be slower (there is no for loop in the CPU version\n # in this case).\n one_token = key.size(1) == 1\n if rwkv_cuda_kernel is None or no_cuda or one_token:\n return rwkv_linear_attention_cpu(time_decay, time_first, key, value, state=state, return_state=return_state)\n else:\n return RwkvLinearAttention.apply(time_decay, time_first, key, value, state, return_state)\n\n\nclass RwkvSelfAttention(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.config = config\n kernel_loaded = rwkv_cuda_kernel is not None and rwkv_cuda_kernel.max_seq_length == config.context_length\n if is_ninja_available() and is_torch_cuda_available() and not kernel_loaded:\n try:\n load_wkv_cuda_kernel(config.context_length)\n except Exception:\n logger.info(\"Could not load the custom CUDA kernel for RWKV attention.\")\n self.layer_id = layer_id\n hidden_size = config.hidden_size\n attention_hidden_size = (\n config.attention_hidden_size if config.attention_hidden_size is not None else hidden_size\n )\n self.attention_hidden_size = attention_hidden_size\n\n self.time_decay = nn.Parameter(torch.empty(attention_hidden_size))\n self.time_first = nn.Parameter(torch.empty(attention_hidden_size))\n\n self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size))\n self.time_mix_value = nn.Parameter(torch.empty(1, 1, hidden_size))\n self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size))\n\n self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))\n self.key = nn.Linear(hidden_size, attention_hidden_size, bias=False)\n self.value = nn.Linear(hidden_size, attention_hidden_size, bias=False)\n self.receptance = nn.Linear(hidden_size, attention_hidden_size, bias=False)\n self.output = nn.Linear(attention_hidden_size, hidden_size, bias=False)\n\n # TODO: maybe jit, otherwise move inside forward\n def extract_key_value(self, hidden, state=None):\n # Mix hidden with the previous timestep to produce key, value, receptance\n if hidden.size(1) == 1 and state is not None:\n shifted = state[1][:, :, self.layer_id]\n else:\n shifted = self.time_shift(hidden)\n if state is not None:\n shifted[:, 0] = state[1][:, :, self.layer_id]\n key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key)\n value = hidden * self.time_mix_value + shifted * (1 - self.time_mix_value)\n receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance)\n\n key = self.key(key)\n value = self.value(value)\n receptance = torch.sigmoid(self.receptance(receptance))\n if state is not None:\n state[1][:, :, self.layer_id] = hidden[:, -1]\n return receptance, key, value, state\n\n def forward(self, hidden, state=None, use_cache=False):\n receptance, key, value, state = self.extract_key_value(hidden, state=state)\n layer_state = tuple(s[:, :, self.layer_id] for s in state[2:]) if state is not None else None\n rwkv, layer_state = rwkv_linear_attention(\n self.time_decay,\n self.time_first,\n key,\n value,\n state=layer_state,\n return_state=use_cache,\n )\n\n if layer_state is not None:\n state[2][:, :, self.layer_id] = layer_state[0]\n state[3][:, :, self.layer_id] = layer_state[1]\n state[4][:, :, self.layer_id] = layer_state[2]\n\n return self.output(receptance * rwkv), state\n\n\nclass RwkvFeedForward(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.config = config\n self.layer_id = layer_id\n hidden_size = config.hidden_size\n intermediate_size = (\n config.intermediate_size if config.intermediate_size is not None else 4 * config.hidden_size\n )\n\n self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))\n self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size))\n self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size))\n\n self.key = nn.Linear(hidden_size, intermediate_size, bias=False)\n self.receptance = nn.Linear(hidden_size, hidden_size, bias=False)\n self.value = nn.Linear(intermediate_size, hidden_size, bias=False)\n\n def forward(self, hidden, state=None):\n if hidden.size(1) == 1 and state is not None:\n shifted = state[0][:, :, self.layer_id]\n else:\n shifted = self.time_shift(hidden)\n if state is not None:\n shifted[:, 0] = state[0][:, :, self.layer_id]\n key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key)\n receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance)\n\n key = torch.square(torch.relu(self.key(key)))\n value = self.value(key)\n receptance = torch.sigmoid(self.receptance(receptance))\n\n if state is not None:\n state[0][:, :, self.layer_id] = hidden[:, -1]\n\n return receptance * value, state\n\n\nclass RwkvBlock(nn.Module):\n def __init__(self, config, layer_id):\n super().__init__()\n self.config = config\n self.layer_id = layer_id\n\n if layer_id == 0:\n self.pre_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)\n\n self.ln1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)\n self.ln2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)\n\n self.attention = RwkvSelfAttention(config, layer_id)\n self.feed_forward = RwkvFeedForward(config, layer_id)\n\n def forward(self, hidden, state=None, use_cache=False, output_attentions=False):\n if self.layer_id == 0:\n hidden = self.pre_ln(hidden)\n\n attention, state = self.attention(self.ln1(hidden), state=state, use_cache=use_cache)\n hidden = hidden + attention\n\n feed_forward, state = self.feed_forward(self.ln2(hidden), state=state)\n hidden = hidden + feed_forward\n\n outputs = (hidden, state)\n if output_attentions:\n outputs += (attention,)\n else:\n outputs += (None,)\n\n return outputs\n\n\nclass RwkvPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = RwkvConfig\n base_model_prefix = \"rwkv\"\n _no_split_modules = [\"RwkvBlock\"]\n _keep_in_fp32_modules = [\"time_decay\", \"time_first\"]\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, RwkvSelfAttention):\n layer_id = module.layer_id\n num_hidden_layers = module.config.num_hidden_layers\n hidden_size = module.config.hidden_size\n attention_hidden_size = module.attention_hidden_size\n\n ratio_0_to_1 = layer_id / (num_hidden_layers - 1) # 0 to 1\n ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0\n\n time_weight = torch.tensor(\n [i / hidden_size for i in range(hidden_size)],\n dtype=module.time_mix_key.dtype,\n device=module.time_mix_key.device,\n )\n time_weight = time_weight[None, None, :]\n\n decay_speed = [\n -5 + 8 * (h / (attention_hidden_size - 1)) ** (0.7 + 1.3 * ratio_0_to_1)\n for h in range(attention_hidden_size)\n ]\n decay_speed = torch.tensor(decay_speed, dtype=module.time_decay.dtype, device=module.time_decay.device)\n zigzag = (\n torch.tensor(\n [(i + 1) % 3 - 1 for i in range(attention_hidden_size)],\n dtype=module.time_first.dtype,\n device=module.time_first.device,\n )\n * 0.5\n )\n\n with torch.no_grad():\n module.time_decay.data = decay_speed\n module.time_first.data = torch.ones_like(module.time_first * math.log(0.3) + zigzag)\n\n module.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0)\n module.time_mix_value.data = torch.pow(time_weight, ratio_1_to_almost0) + 0.3 * ratio_0_to_1\n module.time_mix_receptance.data = torch.pow(time_weight, 0.5 * ratio_1_to_almost0)\n elif isinstance(module, RwkvFeedForward):\n layer_id = module.layer_id\n num_hidden_layers = module.config.num_hidden_layers\n hidden_size = module.config.hidden_size\n\n ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0\n\n time_weight = torch.tensor(\n [i / hidden_size for i in range(hidden_size)],\n dtype=module.time_mix_key.dtype,\n device=module.time_mix_key.device,\n )\n time_weight = time_weight[None, None, :]\n\n with torch.no_grad():\n module.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0)\n module.time_mix_receptance.data = torch.pow(time_weight, ratio_1_to_almost0)\n\n\n@dataclass\nclass RwkvOutput(ModelOutput):\n \"\"\"\n Class for the RWKV model outputs.\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`):\n The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to\n avoid providing the old `input_ids`.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor = None\n state: Optional[List[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n\n\n@dataclass\nclass RwkvCausalLMOutput(ModelOutput):\n \"\"\"\n Base class for causal language model (or autoregressive) outputs.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Language modeling loss (for next-token prediction).\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`):\n The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to\n avoid providing the old `input_ids`.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n state: Optional[List[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n\n\nRWKV_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`RwkvConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nRWKV_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else\n `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n This is currently not used by `RwkvModel`, but will be supported in the future.\n\n [What are attention masks?](../glossary#attention-mask)\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n state (tuple of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`, *optional*):\n If passed along, the model uses the previous state in all the blocks (which will give the output for the\n `input_ids` provided as if the model add `state_input_ids + input_ids` as context).\n use_cache (`bool`, *optional*):\n If set to `True`, the last state is returned and can be used to quickly generate the next logits.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare RWKV Model transformer outputting raw hidden-states without any specific head on top.\",\n RWKV_START_DOCSTRING,\n)\nclass RwkvModel(RwkvPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)\n self.blocks = nn.ModuleList([RwkvBlock(config, layer_id=idx) for idx in range(config.num_hidden_layers)])\n self.ln_out = nn.LayerNorm(config.hidden_size)\n\n self.layers_are_rescaled = False\n\n self.gradient_checkpointing = False\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings = new_embeddings\n\n @add_start_docstrings_to_model_forward(RWKV_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=RwkvOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None, # noqa\n inputs_embeds: Optional[torch.FloatTensor] = None,\n state: Optional[List[torch.FloatTensor]] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, RwkvOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.training == self.layers_are_rescaled:\n self._rescale_layers()\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is None and inputs_embeds is None:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids)\n\n if use_cache and state is None:\n shape = (inputs_embeds.size(0), self.config.hidden_size, self.config.num_hidden_layers)\n state = [\n torch.zeros(\n *shape, dtype=inputs_embeds.dtype if i <= 1 else torch.float32, device=inputs_embeds.device\n )\n for i in range(5)\n ]\n state[4] -= 1e30\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n hidden_states = inputs_embeds\n\n all_self_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n for idx, block in enumerate(self.blocks):\n if self.gradient_checkpointing and self.training:\n hidden_states, state, attentions = self._gradient_checkpointing_func(\n block.__call__, hidden_states, state, use_cache, output_attentions\n )\n else:\n hidden_states, state, attentions = block(\n hidden_states, state=state, use_cache=use_cache, output_attentions=output_attentions\n )\n\n if (\n self.layers_are_rescaled\n and self.config.rescale_every > 0\n and (idx + 1) % self.config.rescale_every == 0\n ):\n hidden_states = hidden_states / 2\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (attentions,)\n\n hidden_states = self.ln_out(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(x for x in [hidden_states, state, all_hidden_states, all_self_attentions] if x is not None)\n\n return RwkvOutput(\n last_hidden_state=hidden_states,\n state=state,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n def _rescale_layers(self):\n # Layers should be rescaled for inference only.\n if self.layers_are_rescaled == (not self.training):\n return\n if self.config.rescale_every > 0:\n with torch.no_grad():\n for block_id, block in enumerate(self.blocks):\n if self.training:\n block.attention.output.weight.mul_(2 ** int(block_id // self.config.rescale_every))\n block.feed_forward.value.weight.mul_(2 ** int(block_id // self.config.rescale_every))\n else:\n # Deal with quantization statistics\n if hasattr(block.attention.output.weight, \"SCB\"):\n block.attention.output.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every))\n block.feed_forward.value.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every))\n elif hasattr(block.attention.output.weight, \"quant_state\"):\n self._bnb_4bit_dequantize_and_rescale(block.attention.output, block_id)\n self._bnb_4bit_dequantize_and_rescale(block.feed_forward.value, block_id)\n else:\n block.attention.output.weight.div_(2 ** int(block_id // self.config.rescale_every))\n block.feed_forward.value.weight.div_(2 ** int(block_id // self.config.rescale_every))\n\n self.layers_are_rescaled = not self.training\n\n def _bnb_4bit_dequantize_and_rescale(self, target_layer, block_id):\n r\"\"\"\n Perform the dequantization and rescaling of the weights of a given layer. After that operation the layer will\n be quantized again.\n \"\"\"\n if not is_bitsandbytes_available():\n raise ImportError(\"Please install bitsandbytes to use this method.\")\n import bitsandbytes as bnb\n\n dequant_weights = bnb.functional.dequantize_4bit(target_layer.weight.data, target_layer.weight.quant_state)\n\n dequant_weights.div_(2 ** int(block_id // self.config.rescale_every))\n\n # re-quantize the model:\n # we need to put it first on CPU then back to the device\n # this will create an overhead :/\n # We set requires_grad=False as we cannot compute gradients on top of 4bit parameters anyway and to avoid\n # bugs with bnb\n quant_weight = bnb.nn.Params4bit(dequant_weights.to(\"cpu\"), requires_grad=False).to(dequant_weights.device)\n setattr(target_layer, \"weight\", quant_weight)\n\n\n@add_start_docstrings(\n \"\"\"\n The RWKV Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n RWKV_START_DOCSTRING,\n)\nclass RwkvForCausalLM(RwkvPreTrainedModel):\n _tied_weights_keys = [\"head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.rwkv = RwkvModel(config)\n self.head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.head\n\n def set_output_embeddings(self, new_embeddings):\n self.head = new_embeddings\n\n def generate(self, *args, **kwargs):\n # Thin wrapper to raise exceptions when trying to generate with methods that manipulate `past_key_values`.\n # RWKV is one of the few models that don't have it (it has `state` instead, which has different properties and\n # usage).\n try:\n gen_output = super().generate(*args, **kwargs)\n except AttributeError as exc:\n # Expected exception: \"AttributeError: '(object name)' object has no attribute 'past_key_values'\"\n if \"past_key_values\" in str(exc):\n raise AttributeError(\n \"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`. RWKV \"\n \"doesn't have that attribute, try another generation strategy instead. For the available \"\n \"generation strategies, check this doc: https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies\"\n )\n else:\n raise exc\n return gen_output\n\n def prepare_inputs_for_generation(self, input_ids, state=None, inputs_embeds=None, **kwargs):\n # only last token for inputs_ids if the state is passed along.\n if state is not None:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and state is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs[\"state\"] = state\n return model_inputs\n\n @add_start_docstrings_to_model_forward(RWKV_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=RwkvCausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None, # noqa\n inputs_embeds: Optional[torch.FloatTensor] = None,\n state: Optional[List[torch.FloatTensor]] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, RwkvCausalLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`\n are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n rwkv_outputs = self.rwkv(\n input_ids,\n inputs_embeds=inputs_embeds,\n state=state,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = rwkv_outputs[0]\n\n logits = self.head(hidden_states)\n\n loss = None\n if labels is not None:\n # move labels to correct device to enable model parallelism\n labels = labels.to(logits.device)\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n if not return_dict:\n output = (logits,) + rwkv_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return RwkvCausalLMOutput(\n loss=loss,\n logits=logits,\n state=rwkv_outputs.state,\n hidden_states=rwkv_outputs.hidden_states,\n attentions=rwkv_outputs.attentions,\n )\n", "output": ["rwkv_linear_attention", "rwkv_linear_attention_cpu", "load_wkv_cuda_kernel", "RwkvSelfAttention", "RwkvLinearAttention", "RwkvFeedForward", "RwkvPreTrainedModel", "RwkvCausalLMOutput", "RwkvForCausalLM", "RwkvModel", "RwkvBlock", "RwkvOutput"], "metadata": {"file_path": "transformers-main/src/transformers/models/rwkv/modeling_rwkv.py", "file_length": 11828, "symbol_dict": [{"symbol": "rwkv_linear_attention_cpu", "type": "mannual_defined_function", "byte_location": 7051, "location": 2417}, {"symbol": "load_wkv_cuda_kernel", "type": "mannual_defined_function", "byte_location": 1789, "location": 710}, {"symbol": "rwkv_linear_attention", "type": "mannual_defined_function", "byte_location": 8895, "location": 3055}, {"symbol": "RwkvForCausalLM", "type": "mannual_defined_class", "byte_location": 33324, "location": 10404}, {"symbol": "RwkvSelfAttention", "type": "mannual_defined_class", "byte_location": 9533, "location": 3275}, {"symbol": "RwkvFeedForward", "type": "mannual_defined_class", "byte_location": 12806, "location": 4341}, {"symbol": "RwkvModel", "type": "mannual_defined_class", "byte_location": 25857, "location": 8213}, {"symbol": "RwkvPreTrainedModel", "type": "mannual_defined_class", "byte_location": 15502, "location": 5238}, {"symbol": "RwkvCausalLMOutput", "type": "mannual_defined_class", "byte_location": 20248, "location": 6694}, {"symbol": "RwkvOutput", "type": "mannual_defined_class", "byte_location": 18521, "location": 6211}, {"symbol": "RwkvLinearAttention", "type": "mannual_defined_class", "byte_location": 2876, "location": 1093}, {"symbol": "RwkvBlock", "type": "mannual_defined_class", "byte_location": 14355, "location": 4867}]}} {"input": "# coding=utf-8\n# Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch TimeSformer model.\"\"\"\n\n\nimport collections\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.nn.functional\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...modeling_outputs import BaseModelOutput, ImageClassifierOutput\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings\nfrom .configuration_timesformer import TimesformerConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"TimesformerConfig\"\n_CHECKPOINT_FOR_DOC = \"facebook/timesformer\"\n\nTIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/timesformer-base-finetuned-k400\",\n # See all TimeSformer models at https://huggingface.co/models?filter=timesformer\n]\n\n\n# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L155\nclass TimesformerPatchEmbeddings(nn.Module):\n \"\"\"Image to Patch Embedding\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n image_size = config.image_size\n patch_size = config.patch_size\n\n image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)\n patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)\n\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_patches = num_patches\n\n self.projection = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size)\n\n def forward(self, pixel_values):\n batch_size, num_frames, num_channels, height, width = pixel_values.shape\n pixel_values = pixel_values.reshape(batch_size * num_frames, num_channels, height, width)\n\n embeddings = self.projection(pixel_values)\n patch_width = embeddings.size(-1)\n embeddings = embeddings.flatten(2).transpose(1, 2)\n return embeddings, num_frames, patch_width\n\n\nclass TimesformerEmbeddings(nn.Module):\n \"\"\"\n Construct the patch and position embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n embed_dim = config.hidden_size\n num_frames = config.num_frames\n drop_rate = config.hidden_dropout_prob\n attention_type = config.attention_type\n\n self.attention_type = attention_type\n self.patch_embeddings = TimesformerPatchEmbeddings(config)\n self.num_patches = self.patch_embeddings.num_patches\n\n # Positional Embeddings\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.position_embeddings = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_rate)\n if attention_type != \"space_only\":\n self.time_embeddings = nn.Parameter(torch.zeros(1, num_frames, embed_dim))\n self.time_drop = nn.Dropout(p=drop_rate)\n\n def forward(self, pixel_values):\n batch_size = pixel_values.shape[0]\n\n # create patch embeddings\n embeddings, num_frames, patch_width = self.patch_embeddings(pixel_values)\n\n cls_tokens = self.cls_token.expand(embeddings.size(0), -1, -1)\n embeddings = torch.cat((cls_tokens, embeddings), dim=1)\n\n # resizing the positional embeddings in case they don't match the input at inference\n if embeddings.size(1) != self.position_embeddings.size(1):\n position_embeddings = self.position_embeddings\n cls_pos_embed = position_embeddings[0, 0, :].unsqueeze(0).unsqueeze(1)\n other_pos_embed = position_embeddings[0, 1:, :].unsqueeze(0).transpose(1, 2)\n patch_num = int(other_pos_embed.size(2) ** 0.5)\n patch_height = embeddings.size(1) // patch_width\n other_pos_embed = other_pos_embed.reshape(1, embeddings.size(2), patch_num, patch_num)\n new_pos_embed = nn.functional.interpolate(\n other_pos_embed, size=(patch_height, patch_width), mode=\"nearest\"\n )\n new_pos_embed = new_pos_embed.flatten(2)\n new_pos_embed = new_pos_embed.transpose(1, 2)\n new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)\n embeddings = embeddings + new_pos_embed\n else:\n embeddings = embeddings + self.position_embeddings\n embeddings = self.pos_drop(embeddings)\n\n # Time Embeddings\n if self.attention_type != \"space_only\":\n cls_tokens = embeddings[:batch_size, 0, :].unsqueeze(1)\n embeddings = embeddings[:, 1:]\n _, patch_height, patch_width = embeddings.shape\n embeddings = (\n embeddings.reshape(batch_size, num_frames, patch_height, patch_width)\n .permute(0, 2, 1, 3)\n .reshape(batch_size * patch_height, num_frames, patch_width)\n )\n # Resizing time embeddings in case they don't match\n if num_frames != self.time_embeddings.size(1):\n time_embeddings = self.time_embeddings.transpose(1, 2)\n new_time_embeddings = nn.functional.interpolate(time_embeddings, size=(num_frames), mode=\"nearest\")\n new_time_embeddings = new_time_embeddings.transpose(1, 2)\n embeddings = embeddings + new_time_embeddings\n else:\n embeddings = embeddings + self.time_embeddings\n embeddings = self.time_drop(embeddings)\n embeddings = embeddings.view(batch_size, patch_height, num_frames, patch_width).reshape(\n batch_size, patch_height * num_frames, patch_width\n )\n embeddings = torch.cat((cls_tokens, embeddings), dim=1)\n\n return embeddings\n\n\n# Copied from transformers.models.beit.modeling_beit.drop_path\ndef drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:\n \"\"\"\n Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n\n Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,\n however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the\n layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the\n argument.\n \"\"\"\n if drop_prob == 0.0 or not training:\n return input\n keep_prob = 1 - drop_prob\n shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets\n random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)\n random_tensor.floor_() # binarize\n output = input.div(keep_prob) * random_tensor\n return output\n\n\n# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->TimeSformer\nclass TimeSformerDropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\"\"\"\n\n def __init__(self, drop_prob: Optional[float] = None) -> None:\n super().__init__()\n self.drop_prob = drop_prob\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n return drop_path(hidden_states, self.drop_prob, self.training)\n\n def extra_repr(self) -> str:\n return \"p={}\".format(self.drop_prob)\n\n\n# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L57\nclass TimesformerSelfAttention(nn.Module):\n def __init__(self, config: TimesformerConfig):\n super().__init__()\n\n num_heads = config.num_attention_heads\n qkv_bias = config.qkv_bias\n attention_dropout_prob = config.attention_probs_dropout_prob\n\n self.num_heads = num_heads\n head_dim = config.hidden_size // num_heads\n self.scale = head_dim**-0.5\n self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attention_dropout_prob)\n\n def forward(self, hidden_states, output_attentions: bool = False):\n batch_size, hidden_size, num_channels = hidden_states.shape\n qkv = (\n self.qkv(hidden_states)\n .reshape(batch_size, hidden_size, 3, self.num_heads, num_channels // self.num_heads)\n .permute(2, 0, 3, 1, 4)\n )\n query, key, value = qkv[0], qkv[1], qkv[2]\n\n attention_probs = (query @ key.transpose(-2, -1)) * self.scale\n attention_probs = attention_probs.softmax(dim=-1)\n attention_probs = self.attn_drop(attention_probs)\n\n context_layer = (attention_probs @ value).transpose(1, 2).reshape(batch_size, hidden_size, num_channels)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n return outputs\n\n\nclass TimesformerSelfOutput(nn.Module):\n \"\"\"\n The residual connection is defined in TimesformerLayer instead of here (as is the case with other models), due to\n the layernorm applied before each block.\n \"\"\"\n\n def __init__(self, config: TimesformerConfig) -> None:\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n return hidden_states\n\n\nclass TimeSformerAttention(nn.Module):\n def __init__(self, config: TimesformerConfig) -> None:\n super().__init__()\n self.attention = TimesformerSelfAttention(config)\n self.output = TimesformerSelfOutput(config)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n output_attentions: bool = False,\n ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:\n self_outputs = self.attention(hidden_states, output_attentions)\n\n attention_output = self.output(self_outputs[0])\n\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L39\nclass TimesformerIntermediate(nn.Module):\n def __init__(self, config: TimesformerConfig) -> None:\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n return hidden_states\n\n\nclass TimesformerOutput(nn.Module):\n def __init__(self, config: TimesformerConfig) -> None:\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n return hidden_states\n\n\n# Adapted from https://github.com/facebookresearch/TimeSformer/blob/a5ef29a7b7264baff199a30b3306ac27de901133/timesformer/models/vit.py#L89\nclass TimesformerLayer(nn.Module):\n def __init__(self, config: TimesformerConfig, layer_index: int) -> None:\n super().__init__()\n\n attention_type = config.attention_type\n\n drop_path_rates = [\n x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)\n ] # stochastic depth decay rule\n drop_path_rate = drop_path_rates[layer_index]\n\n self.drop_path = TimeSformerDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()\n self.attention = TimeSformerAttention(config)\n self.intermediate = TimesformerIntermediate(config)\n self.output = TimesformerOutput(config)\n self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.config = config\n self.attention_type = attention_type\n if attention_type not in [\"divided_space_time\", \"space_only\", \"joint_space_time\"]:\n raise ValueError(\"Unknown attention type: {}\".format(attention_type))\n\n # Temporal Attention Parameters\n if self.attention_type == \"divided_space_time\":\n self.temporal_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.temporal_attention = TimeSformerAttention(config)\n self.temporal_dense = nn.Linear(config.hidden_size, config.hidden_size)\n\n def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False):\n num_frames = self.config.num_frames\n num_patch_width = self.config.image_size // self.config.patch_size\n batch_size = hidden_states.shape[0]\n num_spatial_tokens = (hidden_states.size(1) - 1) // num_frames\n num_patch_height = num_spatial_tokens // num_patch_width\n\n if self.attention_type in [\"space_only\", \"joint_space_time\"]:\n self_attention_outputs = self.attention(\n self.layernorm_before(hidden_states), output_attentions=output_attentions\n )\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n hidden_states = hidden_states + self.drop_path(attention_output)\n\n layer_output = self.layernorm_after(hidden_states)\n layer_output = self.intermediate(layer_output)\n layer_output = self.output(layer_output)\n layer_output = hidden_states + self.drop_path(layer_output)\n\n outputs = (layer_output,) + outputs\n\n return outputs\n\n elif self.attention_type == \"divided_space_time\":\n # Temporal\n temporal_embedding = hidden_states[:, 1:, :]\n temporal_embedding = temporal_embedding.reshape(\n batch_size, num_patch_height, num_patch_width, num_frames, temporal_embedding.shape[2]\n ).reshape(batch_size * num_patch_height * num_patch_width, num_frames, temporal_embedding.shape[2])\n\n temporal_attention_outputs = self.temporal_attention(\n self.temporal_layernorm(temporal_embedding),\n )\n attention_output = temporal_attention_outputs[0]\n\n residual_temporal = self.drop_path(attention_output)\n\n residual_temporal = residual_temporal.reshape(\n batch_size, num_patch_height, num_patch_width, num_frames, residual_temporal.shape[2]\n ).reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_temporal.shape[2])\n residual_temporal = self.temporal_dense(residual_temporal)\n temporal_embedding = hidden_states[:, 1:, :] + residual_temporal\n\n # Spatial\n init_cls_token = hidden_states[:, 0, :].unsqueeze(1)\n cls_token = init_cls_token.repeat(1, num_frames, 1)\n cls_token = cls_token.reshape(batch_size * num_frames, 1, cls_token.shape[2])\n spatial_embedding = temporal_embedding\n spatial_embedding = (\n spatial_embedding.reshape(\n batch_size, num_patch_height, num_patch_width, num_frames, spatial_embedding.shape[2]\n )\n .permute(0, 3, 1, 2, 4)\n .reshape(batch_size * num_frames, num_patch_height * num_patch_width, spatial_embedding.shape[2])\n )\n spatial_embedding = torch.cat((cls_token, spatial_embedding), 1)\n\n spatial_attention_outputs = self.attention(\n self.layernorm_before(spatial_embedding), output_attentions=output_attentions\n )\n attention_output = spatial_attention_outputs[0]\n outputs = spatial_attention_outputs[1:] # add self attentions if we output attention weights\n\n residual_spatial = self.drop_path(attention_output)\n\n # Taking care of CLS token\n cls_token = residual_spatial[:, 0, :]\n cls_token = cls_token.reshape(batch_size, num_frames, cls_token.shape[1])\n cls_token = torch.mean(cls_token, 1, True) # averaging for every frame\n residual_spatial = residual_spatial[:, 1:, :]\n residual_spatial = (\n residual_spatial.reshape(\n batch_size, num_frames, num_patch_height, num_patch_width, residual_spatial.shape[2]\n )\n .permute(0, 2, 3, 1, 4)\n .reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_spatial.shape[2])\n )\n residual = residual_spatial\n hidden_states = temporal_embedding\n\n # Mlp\n hidden_states = torch.cat((init_cls_token, hidden_states), 1) + torch.cat((cls_token, residual), 1)\n layer_output = self.layernorm_after(hidden_states)\n layer_output = self.intermediate(layer_output)\n layer_output = self.output(layer_output)\n layer_output = hidden_states + self.drop_path(layer_output)\n\n outputs = (layer_output,) + outputs\n\n return outputs\n\n\nclass TimesformerEncoder(nn.Module):\n def __init__(self, config: TimesformerConfig) -> None:\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([TimesformerLayer(config, ind) for ind in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n output_attentions: bool = False,\n output_hidden_states: bool = False,\n return_dict: bool = True,\n ) -> Union[tuple, BaseModelOutput]:\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(\n layer_module.__call__,\n hidden_states,\n output_attentions,\n )\n else:\n layer_outputs = layer_module(hidden_states, output_attentions)\n\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass TimesformerPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = TimesformerConfig\n base_model_prefix = \"timesformer\"\n main_input_name = \"pixel_values\"\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.trunc_normal_(module.weight, std=self.config.initializer_range)\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n elif isinstance(module, nn.LayerNorm):\n nn.init.constant_(module.bias, 0)\n nn.init.constant_(module.weight, 1.0)\n elif isinstance(module, TimesformerEmbeddings):\n nn.init.trunc_normal_(module.cls_token, std=self.config.initializer_range)\n nn.init.trunc_normal_(module.position_embeddings, std=self.config.initializer_range)\n module.patch_embeddings.apply(self._init_weights)\n\n\nTIMESFORMER_START_DOCSTRING = r\"\"\"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`TimesformerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nTIMESFORMER_INPUTS_DOCSTRING = r\"\"\"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`VideoMAEImageProcessor.preprocess`] for details.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare TimeSformer Model transformer outputting raw hidden-states without any specific head on top.\",\n TIMESFORMER_START_DOCSTRING,\n)\nclass TimesformerModel(TimesformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n\n self.embeddings = TimesformerEmbeddings(config)\n self.encoder = TimesformerEncoder(config)\n\n self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.patch_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(TIMESFORMER_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n pixel_values: torch.FloatTensor,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:\n r\"\"\"\n Returns:\n\n Examples:\n\n ```python\n >>> import av\n >>> import numpy as np\n\n >>> from transformers import AutoImageProcessor, TimesformerModel\n >>> from huggingface_hub import hf_hub_download\n\n >>> np.random.seed(0)\n\n\n >>> def read_video_pyav(container, indices):\n ... '''\n ... Decode the video with PyAV decoder.\n ... Args:\n ... container (`av.container.input.InputContainer`): PyAV container.\n ... indices (`List[int]`): List of frame indices to decode.\n ... Returns:\n ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).\n ... '''\n ... frames = []\n ... container.seek(0)\n ... start_index = indices[0]\n ... end_index = indices[-1]\n ... for i, frame in enumerate(container.decode(video=0)):\n ... if i > end_index:\n ... break\n ... if i >= start_index and i in indices:\n ... frames.append(frame)\n ... return np.stack([x.to_ndarray(format=\"rgb24\") for x in frames])\n\n\n >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):\n ... '''\n ... Sample a given number of frame indices from the video.\n ... Args:\n ... clip_len (`int`): Total number of frames to sample.\n ... frame_sample_rate (`int`): Sample every n-th frame.\n ... seg_len (`int`): Maximum allowed index of sample's last frame.\n ... Returns:\n ... indices (`List[int]`): List of sampled frame indices\n ... '''\n ... converted_len = int(clip_len * frame_sample_rate)\n ... end_idx = np.random.randint(converted_len, seg_len)\n ... start_idx = end_idx - converted_len\n ... indices = np.linspace(start_idx, end_idx, num=clip_len)\n ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)\n ... return indices\n\n\n >>> # video clip consists of 300 frames (10 seconds at 30 FPS)\n >>> file_path = hf_hub_download(\n ... repo_id=\"nielsr/video-demo\", filename=\"eating_spaghetti.mp4\", repo_type=\"dataset\"\n ... )\n >>> container = av.open(file_path)\n\n >>> # sample 8 frames\n >>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=4, seg_len=container.streams.video[0].frames)\n >>> video = read_video_pyav(container, indices)\n\n >>> image_processor = AutoImageProcessor.from_pretrained(\"MCG-NJU/videomae-base\")\n >>> model = TimesformerModel.from_pretrained(\"facebook/timesformer-base-finetuned-k400\")\n\n >>> # prepare video for the model\n >>> inputs = image_processor(list(video), return_tensors=\"pt\")\n\n >>> # forward pass\n >>> outputs = model(**inputs)\n >>> last_hidden_states = outputs.last_hidden_state\n >>> list(last_hidden_states.shape)\n [1, 1569, 768]\n ```\"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n embedding_output = self.embeddings(pixel_values)\n\n encoder_outputs = self.encoder(\n embedding_output,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n if self.layernorm is not None:\n sequence_output = self.layernorm(sequence_output)\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[1:]\n\n return BaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"TimeSformer Model transformer with a video classification head on top (a linear layer on top of the final hidden state\nof the [CLS] token) e.g. for ImageNet.\"\"\",\n TIMESFORMER_START_DOCSTRING,\n)\nclass TimesformerForVideoClassification(TimesformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.num_labels = config.num_labels\n self.timesformer = TimesformerModel(config)\n\n # Classifier head\n self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(TIMESFORMER_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n pixel_values: Optional[torch.Tensor] = None,\n labels: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, ImageClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the image classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\n Returns:\n\n Examples:\n\n ```python\n >>> import av\n >>> import torch\n >>> import numpy as np\n\n >>> from transformers import AutoImageProcessor, TimesformerForVideoClassification\n >>> from huggingface_hub import hf_hub_download\n\n >>> np.random.seed(0)\n\n\n >>> def read_video_pyav(container, indices):\n ... '''\n ... Decode the video with PyAV decoder.\n ... Args:\n ... container (`av.container.input.InputContainer`): PyAV container.\n ... indices (`List[int]`): List of frame indices to decode.\n ... Returns:\n ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).\n ... '''\n ... frames = []\n ... container.seek(0)\n ... start_index = indices[0]\n ... end_index = indices[-1]\n ... for i, frame in enumerate(container.decode(video=0)):\n ... if i > end_index:\n ... break\n ... if i >= start_index and i in indices:\n ... frames.append(frame)\n ... return np.stack([x.to_ndarray(format=\"rgb24\") for x in frames])\n\n\n >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):\n ... '''\n ... Sample a given number of frame indices from the video.\n ... Args:\n ... clip_len (`int`): Total number of frames to sample.\n ... frame_sample_rate (`int`): Sample every n-th frame.\n ... seg_len (`int`): Maximum allowed index of sample's last frame.\n ... Returns:\n ... indices (`List[int]`): List of sampled frame indices\n ... '''\n ... converted_len = int(clip_len * frame_sample_rate)\n ... end_idx = np.random.randint(converted_len, seg_len)\n ... start_idx = end_idx - converted_len\n ... indices = np.linspace(start_idx, end_idx, num=clip_len)\n ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)\n ... return indices\n\n\n >>> # video clip consists of 300 frames (10 seconds at 30 FPS)\n >>> file_path = hf_hub_download(\n ... repo_id=\"nielsr/video-demo\", filename=\"eating_spaghetti.mp4\", repo_type=\"dataset\"\n ... )\n >>> container = av.open(file_path)\n\n >>> # sample 8 frames\n >>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames)\n >>> video = read_video_pyav(container, indices)\n\n >>> image_processor = AutoImageProcessor.from_pretrained(\"MCG-NJU/videomae-base-finetuned-kinetics\")\n >>> model = TimesformerForVideoClassification.from_pretrained(\"facebook/timesformer-base-finetuned-k400\")\n\n >>> inputs = image_processor(list(video), return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... outputs = model(**inputs)\n ... logits = outputs.logits\n\n >>> # model predicts one of the 400 Kinetics-400 classes\n >>> predicted_label = logits.argmax(-1).item()\n >>> print(model.config.id2label[predicted_label])\n eating spaghetti\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.timesformer(\n pixel_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0][:, 0]\n\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return ImageClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n", "output": ["drop_path", "TimesformerModel", "TimesformerSelfOutput", "TimesformerEmbeddings", "TimeSformerDropPath", "TimesformerForVideoClassification", "TimesformerPatchEmbeddings", "TimeSformerAttention", "TimesformerPreTrainedModel", "TimesformerOutput", "TimesformerLayer", "TimesformerEncoder", "TimesformerSelfAttention", "TimesformerIntermediate"], "metadata": {"file_path": "transformers-main/src/transformers/models/timesformer/modeling_timesformer.py", "file_length": 10850, "symbol_dict": [{"symbol": "drop_path", "type": "mannual_defined_function", "byte_location": 6666, "location": 2176}, {"symbol": "TimesformerModel", "type": "mannual_defined_class", "byte_location": 23161, "location": 7280}, {"symbol": "TimesformerSelfAttention", "type": "mannual_defined_class", "byte_location": 8474, "location": 2776}, {"symbol": "TimesformerPatchEmbeddings", "type": "mannual_defined_class", "byte_location": 1664, "location": 544}, {"symbol": "TimesformerPreTrainedModel", "type": "mannual_defined_class", "byte_location": 20461, "location": 6515}, {"symbol": "TimesformerOutput", "type": "mannual_defined_class", "byte_location": 11991, "location": 3936}, {"symbol": "TimesformerIntermediate", "type": "mannual_defined_class", "byte_location": 11273, "location": 3712}, {"symbol": "TimeSformerAttention", "type": "mannual_defined_class", "byte_location": 10469, "location": 3435}, {"symbol": "TimesformerSelfOutput", "type": "mannual_defined_class", "byte_location": 9827, "location": 3245}, {"symbol": "TimesformerForVideoClassification", "type": "mannual_defined_class", "byte_location": 28848, "location": 8967}, {"symbol": "TimesformerEmbeddings", "type": "mannual_defined_class", "byte_location": 2860, "location": 934}, {"symbol": "TimeSformerDropPath", "type": "mannual_defined_class", "byte_location": 7847, "location": 2546}, {"symbol": "TimesformerEncoder", "type": "mannual_defined_class", "byte_location": 18689, "location": 6017}, {"symbol": "TimesformerLayer", "type": "mannual_defined_class", "byte_location": 12594, "location": 4153}]}} {"input": "# coding=utf-8\n# Copyright 2021 Google AI, Ross Wightman, The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch ViT model.\"\"\"\n\n\nimport collections.abc\nimport math\nfrom typing import Dict, List, Optional, Set, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n ImageClassifierOutput,\n MaskedImageModelingOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer\nfrom ...utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n replace_return_docstrings,\n)\nfrom .configuration_vit import ViTConfig\n\n\nlogger = logging.get_logger(__name__)\n\n# General docstring\n_CONFIG_FOR_DOC = \"ViTConfig\"\n\n# Base docstring\n_CHECKPOINT_FOR_DOC = \"google/vit-base-patch16-224-in21k\"\n_EXPECTED_OUTPUT_SHAPE = [1, 197, 768]\n\n# Image classification docstring\n_IMAGE_CLASS_CHECKPOINT = \"google/vit-base-patch16-224\"\n_IMAGE_CLASS_EXPECTED_OUTPUT = \"Egyptian cat\"\n\n\nVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"google/vit-base-patch16-224\",\n # See all ViT models at https://huggingface.co/models?filter=vit\n]\n\n\nclass ViTEmbeddings(nn.Module):\n \"\"\"\n Construct the CLS token, position and patch embeddings. Optionally, also the mask token.\n \"\"\"\n\n def __init__(self, config: ViTConfig, use_mask_token: bool = False) -> None:\n super().__init__()\n\n self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))\n self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None\n self.patch_embeddings = ViTPatchEmbeddings(config)\n num_patches = self.patch_embeddings.num_patches\n self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.config = config\n\n def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:\n \"\"\"\n This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher\n resolution images.\n\n Source:\n https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174\n \"\"\"\n\n num_patches = embeddings.shape[1] - 1\n num_positions = self.position_embeddings.shape[1] - 1\n if num_patches == num_positions and height == width:\n return self.position_embeddings\n class_pos_embed = self.position_embeddings[:, 0]\n patch_pos_embed = self.position_embeddings[:, 1:]\n dim = embeddings.shape[-1]\n h0 = height // self.config.patch_size\n w0 = width // self.config.patch_size\n # we add a small number to avoid floating point error in the interpolation\n # see discussion at https://github.com/facebookresearch/dino/issues/8\n h0, w0 = h0 + 0.1, w0 + 0.1\n patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)\n patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)\n patch_pos_embed = nn.functional.interpolate(\n patch_pos_embed,\n scale_factor=(h0 / math.sqrt(num_positions), w0 / math.sqrt(num_positions)),\n mode=\"bicubic\",\n align_corners=False,\n )\n assert int(h0) == patch_pos_embed.shape[-2] and int(w0) == patch_pos_embed.shape[-1]\n patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)\n return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)\n\n def forward(\n self,\n pixel_values: torch.Tensor,\n bool_masked_pos: Optional[torch.BoolTensor] = None,\n interpolate_pos_encoding: bool = False,\n ) -> torch.Tensor:\n batch_size, num_channels, height, width = pixel_values.shape\n embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)\n\n if bool_masked_pos is not None:\n seq_length = embeddings.shape[1]\n mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)\n # replace the masked visual tokens by mask_tokens\n mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)\n embeddings = embeddings * (1.0 - mask) + mask_tokens * mask\n\n # add the [CLS] token to the embedded patch tokens\n cls_tokens = self.cls_token.expand(batch_size, -1, -1)\n embeddings = torch.cat((cls_tokens, embeddings), dim=1)\n\n # add positional encoding to each token\n if interpolate_pos_encoding:\n embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)\n else:\n embeddings = embeddings + self.position_embeddings\n\n embeddings = self.dropout(embeddings)\n\n return embeddings\n\n\nclass ViTPatchEmbeddings(nn.Module):\n \"\"\"\n This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial\n `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a\n Transformer.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n image_size, patch_size = config.image_size, config.patch_size\n num_channels, hidden_size = config.num_channels, config.hidden_size\n\n image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)\n patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.num_patches = num_patches\n\n self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)\n\n def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:\n batch_size, num_channels, height, width = pixel_values.shape\n if num_channels != self.num_channels:\n raise ValueError(\n \"Make sure that the channel dimension of the pixel values match with the one set in the configuration.\"\n f\" Expected {self.num_channels} but got {num_channels}.\"\n )\n if not interpolate_pos_encoding:\n if height != self.image_size[0] or width != self.image_size[1]:\n raise ValueError(\n f\"Input image size ({height}*{width}) doesn't match model\"\n f\" ({self.image_size[0]}*{self.image_size[1]}).\"\n )\n embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)\n return embeddings\n\n\nclass ViTSelfAttention(nn.Module):\n def __init__(self, config: ViTConfig) -> None:\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size {config.hidden_size,} is not a multiple of the number of attention \"\n f\"heads {config.num_attention_heads}.\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)\n self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)\n self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False\n ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:\n mixed_query_layer = self.query(hidden_states)\n\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n return outputs\n\n\nclass ViTSelfOutput(nn.Module):\n \"\"\"\n The residual connection is defined in ViTLayer instead of here (as is the case with other models), due to the\n layernorm applied before each block.\n \"\"\"\n\n def __init__(self, config: ViTConfig) -> None:\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n return hidden_states\n\n\nclass ViTAttention(nn.Module):\n def __init__(self, config: ViTConfig) -> None:\n super().__init__()\n self.attention = ViTSelfAttention(config)\n self.output = ViTSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads: Set[int]) -> None:\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.attention.query = prune_linear_layer(self.attention.query, index)\n self.attention.key = prune_linear_layer(self.attention.key, index)\n self.attention.value = prune_linear_layer(self.attention.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)\n self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n head_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:\n self_outputs = self.attention(hidden_states, head_mask, output_attentions)\n\n attention_output = self.output(self_outputs[0], hidden_states)\n\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass ViTIntermediate(nn.Module):\n def __init__(self, config: ViTConfig) -> None:\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n\n return hidden_states\n\n\nclass ViTOutput(nn.Module):\n def __init__(self, config: ViTConfig) -> None:\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n hidden_states = hidden_states + input_tensor\n\n return hidden_states\n\n\nclass ViTLayer(nn.Module):\n \"\"\"This corresponds to the Block class in the timm implementation.\"\"\"\n\n def __init__(self, config: ViTConfig) -> None:\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = ViTAttention(config)\n self.intermediate = ViTIntermediate(config)\n self.output = ViTOutput(config)\n self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n head_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:\n self_attention_outputs = self.attention(\n self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention\n head_mask,\n output_attentions=output_attentions,\n )\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n # first residual connection\n hidden_states = attention_output + hidden_states\n\n # in ViT, layernorm is also applied after self-attention\n layer_output = self.layernorm_after(hidden_states)\n layer_output = self.intermediate(layer_output)\n\n # second residual connection is done here\n layer_output = self.output(layer_output, hidden_states)\n\n outputs = (layer_output,) + outputs\n\n return outputs\n\n\nclass ViTEncoder(nn.Module):\n def __init__(self, config: ViTConfig) -> None:\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([ViTLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n head_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n output_hidden_states: bool = False,\n return_dict: bool = True,\n ) -> Union[tuple, BaseModelOutput]:\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(\n layer_module.__call__,\n hidden_states,\n layer_head_mask,\n output_attentions,\n )\n else:\n layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)\n\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass ViTPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = ViTConfig\n base_model_prefix = \"vit\"\n main_input_name = \"pixel_values\"\n supports_gradient_checkpointing = True\n _no_split_modules = [\"ViTEmbeddings\", \"ViTLayer\"]\n\n def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid\n # `trunc_normal_cpu` not implemented in `half` issues\n module.weight.data = nn.init.trunc_normal_(\n module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range\n ).to(module.weight.dtype)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, ViTEmbeddings):\n module.position_embeddings.data = nn.init.trunc_normal_(\n module.position_embeddings.data.to(torch.float32),\n mean=0.0,\n std=self.config.initializer_range,\n ).to(module.position_embeddings.dtype)\n\n module.cls_token.data = nn.init.trunc_normal_(\n module.cls_token.data.to(torch.float32),\n mean=0.0,\n std=self.config.initializer_range,\n ).to(module.cls_token.dtype)\n\n\nVIT_START_DOCSTRING = r\"\"\"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ViTConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nVIT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]\n for details.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n interpolate_pos_encoding (`bool`, *optional*):\n Whether to interpolate the pre-trained position encodings.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare ViT Model transformer outputting raw hidden-states without any specific head on top.\",\n VIT_START_DOCSTRING,\n)\nclass ViTModel(ViTPreTrainedModel):\n def __init__(self, config: ViTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False):\n super().__init__(config)\n self.config = config\n\n self.embeddings = ViTEmbeddings(config, use_mask_token=use_mask_token)\n self.encoder = ViTEncoder(config)\n\n self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.pooler = ViTPooler(config) if add_pooling_layer else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self) -> ViTPatchEmbeddings:\n return self.embeddings.patch_embeddings\n\n def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPooling,\n config_class=_CONFIG_FOR_DOC,\n modality=\"vision\",\n expected_output=_EXPECTED_OUTPUT_SHAPE,\n )\n def forward(\n self,\n pixel_values: Optional[torch.Tensor] = None,\n bool_masked_pos: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n interpolate_pos_encoding: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPooling]:\n r\"\"\"\n bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):\n Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if pixel_values is None:\n raise ValueError(\"You have to specify pixel_values\")\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n # TODO: maybe have a cleaner way to cast the input (from `ImageProcessor` side?)\n expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype\n if pixel_values.dtype != expected_dtype:\n pixel_values = pixel_values.to(expected_dtype)\n\n embedding_output = self.embeddings(\n pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n sequence_output = self.layernorm(sequence_output)\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)\n return head_outputs + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\nclass ViTPooler(nn.Module):\n def __init__(self, config: ViTConfig):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\n@add_start_docstrings(\n \"\"\"ViT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886).\n\n \n\n Note that we provide a script to pre-train this model on custom data in our [examples\n directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n \n \"\"\",\n VIT_START_DOCSTRING,\n)\nclass ViTForMaskedImageModeling(ViTPreTrainedModel):\n def __init__(self, config: ViTConfig) -> None:\n super().__init__(config)\n\n self.vit = ViTModel(config, add_pooling_layer=False, use_mask_token=True)\n\n self.decoder = nn.Sequential(\n nn.Conv2d(\n in_channels=config.hidden_size,\n out_channels=config.encoder_stride**2 * config.num_channels,\n kernel_size=1,\n ),\n nn.PixelShuffle(config.encoder_stride),\n )\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=MaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n pixel_values: Optional[torch.Tensor] = None,\n bool_masked_pos: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n interpolate_pos_encoding: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[tuple, MaskedImageModelingOutput]:\n r\"\"\"\n bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):\n Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).\n\n Returns:\n\n Examples:\n ```python\n >>> from transformers import AutoImageProcessor, ViTForMaskedImageModeling\n >>> import torch\n >>> from PIL import Image\n >>> import requests\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> image_processor = AutoImageProcessor.from_pretrained(\"google/vit-base-patch16-224-in21k\")\n >>> model = ViTForMaskedImageModeling.from_pretrained(\"google/vit-base-patch16-224-in21k\")\n\n >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2\n >>> pixel_values = image_processor(images=image, return_tensors=\"pt\").pixel_values\n >>> # create random boolean mask of shape (batch_size, num_patches)\n >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()\n\n >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)\n >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction\n >>> list(reconstructed_pixel_values.shape)\n [1, 3, 224, 224]\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if bool_masked_pos is not None and (self.config.patch_size != self.config.encoder_stride):\n raise ValueError(\n \"When `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that \"\n \"the reconstructed image has the same dimensions as the input. \"\n f\"Got `patch_size` = {self.config.patch_size} and `encoder_stride` = {self.config.encoder_stride}.\"\n )\n\n outputs = self.vit(\n pixel_values,\n bool_masked_pos=bool_masked_pos,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n interpolate_pos_encoding=interpolate_pos_encoding,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n # Reshape to (batch_size, num_channels, height, width)\n sequence_output = sequence_output[:, 1:]\n batch_size, sequence_length, num_channels = sequence_output.shape\n height = width = math.floor(sequence_length**0.5)\n sequence_output = sequence_output.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)\n\n # Reconstruct pixel values\n reconstructed_pixel_values = self.decoder(sequence_output)\n\n masked_im_loss = None\n if bool_masked_pos is not None:\n size = self.config.image_size // self.config.patch_size\n bool_masked_pos = bool_masked_pos.reshape(-1, size, size)\n mask = (\n bool_masked_pos.repeat_interleave(self.config.patch_size, 1)\n .repeat_interleave(self.config.patch_size, 2)\n .unsqueeze(1)\n .contiguous()\n )\n reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction=\"none\")\n masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels\n\n if not return_dict:\n output = (reconstructed_pixel_values,) + outputs[1:]\n return ((masked_im_loss,) + output) if masked_im_loss is not None else output\n\n return MaskedImageModelingOutput(\n loss=masked_im_loss,\n reconstruction=reconstructed_pixel_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n\n \n\n Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n \n \"\"\",\n VIT_START_DOCSTRING,\n)\nclass ViTForImageClassification(ViTPreTrainedModel):\n def __init__(self, config: ViTConfig) -> None:\n super().__init__(config)\n\n self.num_labels = config.num_labels\n self.vit = ViTModel(config, add_pooling_layer=False)\n\n # Classifier head\n self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_IMAGE_CLASS_CHECKPOINT,\n output_type=ImageClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,\n )\n def forward(\n self,\n pixel_values: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n labels: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n interpolate_pos_encoding: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[tuple, ImageClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the image classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.vit(\n pixel_values,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n interpolate_pos_encoding=interpolate_pos_encoding,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.classifier(sequence_output[:, 0, :])\n\n loss = None\n if labels is not None:\n # move labels to correct device to enable model parallelism\n labels = labels.to(logits.device)\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return ImageClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n", "output": ["ViTSelfAttention", "ViTForImageClassification", "ViTPreTrainedModel", "ViTModel", "ViTOutput", "ViTForMaskedImageModeling", "ViTEncoder", "ViTAttention", "ViTPooler", "ViTEmbeddings", "ViTLayer", "ViTIntermediate", "ViTPatchEmbeddings", "ViTSelfOutput"], "metadata": {"file_path": "transformers-main/src/transformers/models/vit/modeling_vit.py", "file_length": 11144, "symbol_dict": [{"symbol": "ViTAttention", "type": "mannual_defined_class", "byte_location": 11106, "location": 3656}, {"symbol": "ViTSelfAttention", "type": "mannual_defined_class", "byte_location": 7617, "location": 2519}, {"symbol": "ViTSelfOutput", "type": "mannual_defined_class", "byte_location": 10460, "location": 3456}, {"symbol": "ViTEncoder", "type": "mannual_defined_class", "byte_location": 15582, "location": 5078}, {"symbol": "ViTPooler", "type": "mannual_defined_class", "byte_location": 25424, "location": 8013}, {"symbol": "ViTForImageClassification", "type": "mannual_defined_class", "byte_location": 31964, "location": 10072}, {"symbol": "ViTLayer", "type": "mannual_defined_class", "byte_location": 13903, "location": 4571}, {"symbol": "ViTPatchEmbeddings", "type": "mannual_defined_class", "byte_location": 5664, "location": 1923}, {"symbol": "ViTPreTrainedModel", "type": "mannual_defined_class", "byte_location": 17506, "location": 5631}, {"symbol": "ViTForMaskedImageModeling", "type": "mannual_defined_class", "byte_location": 26370, "location": 8325}, {"symbol": "ViTEmbeddings", "type": "mannual_defined_class", "byte_location": 1916, "location": 667}, {"symbol": "ViTIntermediate", "type": "mannual_defined_class", "byte_location": 12786, "location": 4215}, {"symbol": "ViTOutput", "type": "mannual_defined_class", "byte_location": 13373, "location": 4401}, {"symbol": "ViTModel", "type": "mannual_defined_class", "byte_location": 21190, "location": 6711}]}} {"input": "# coding=utf-8\n# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch GPT-J model.\"\"\"\n\nimport warnings\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.fx\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...modeling_outputs import (\n BaseModelOutputWithPast,\n CausalLMOutputWithPast,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutputWithPast,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n is_torch_fx_proxy,\n logging,\n)\nfrom ...utils.model_parallel_utils import assert_device_map, get_device_map\nfrom .configuration_gptj import GPTJConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"hf-internal-testing/tiny-random-gptj\"\n_REAL_CHECKPOINT_FOR_DOC = \"EleutherAI/gpt-j-6B\"\n_CONFIG_FOR_DOC = \"GPTJConfig\"\n\n\nGPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"EleutherAI/gpt-j-6B\",\n # See all GPT-J models at https://huggingface.co/models?filter=gptj\n]\n\n\ndef create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:\n inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))\n sinusoid_inp = torch.einsum(\"i , j -> i j\", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()\n return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)\n\n\n@torch.fx.wrap\ndef get_embed_positions(embed_positions, position_ids):\n return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1)\n\n\ndef rotate_every_two(x: torch.Tensor) -> torch.Tensor:\n x1 = x[:, :, :, ::2]\n x2 = x[:, :, :, 1::2]\n x = torch.stack((-x2, x1), dim=-1)\n return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\n\n\ndef apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:\n sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)\n cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)\n return (tensor * cos) + (rotate_every_two(tensor) * sin)\n\n\nclass GPTJAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n max_positions = config.max_position_embeddings\n self.register_buffer(\n \"bias\",\n torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(\n 1, 1, max_positions, max_positions\n ),\n persistent=False,\n )\n self.register_buffer(\"masked_bias\", torch.tensor(-1e9), persistent=False)\n\n self.attn_dropout = nn.Dropout(config.attn_pdrop)\n self.resid_dropout = nn.Dropout(config.resid_pdrop)\n\n self.embed_dim = config.hidden_size\n self.num_attention_heads = config.num_attention_heads\n self.head_dim = self.embed_dim // self.num_attention_heads\n if self.head_dim * self.num_attention_heads != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and\"\n f\" `num_attention_heads`: {self.num_attention_heads}).\"\n )\n self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())\n\n self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)\n self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)\n self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)\n self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)\n self.rotary_dim = config.rotary_dim\n pos_embd_dim = self.rotary_dim or self.embed_dim\n self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)\n\n def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):\n \"\"\"\n Splits hidden dim into attn_head_size and num_attention_heads\n \"\"\"\n new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)\n tensor = tensor.view(new_shape)\n if rotary:\n return tensor\n if len(tensor.shape) == 5:\n return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)\n elif len(tensor.shape) == 4:\n return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n else:\n raise ValueError(f\"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}\")\n\n def _merge_heads(self, tensor, num_attention_heads, attn_head_size):\n \"\"\"\n Merges attn_head_size dim and num_attn_heads dim into hidden dim\n \"\"\"\n if len(tensor.shape) == 5:\n tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()\n elif len(tensor.shape) == 4:\n tensor = tensor.permute(0, 2, 1, 3).contiguous()\n else:\n raise ValueError(f\"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}\")\n new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)\n return tensor.view(new_shape)\n\n def _attn(\n self,\n query,\n key,\n value,\n attention_mask=None,\n head_mask=None,\n ):\n # compute causal mask from causal mask buffer\n query_length, key_length = query.size(-2), key.size(-2)\n causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]\n\n # Keep the attention weights computation in fp32 to avoid overflow issues\n query = query.to(torch.float32)\n key = key.to(torch.float32)\n\n attn_weights = torch.matmul(query, key.transpose(-1, -2))\n\n mask_value = torch.finfo(attn_weights.dtype).min\n # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.\n # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`\n mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)\n attn_weights = torch.where(causal_mask, attn_weights, mask_value)\n\n attn_weights = attn_weights / self.scale_attn\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_weights = attn_weights + attention_mask\n\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n attn_weights = attn_weights.to(value.dtype)\n attn_weights = self.attn_dropout(attn_weights)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n\n return attn_output, attn_weights\n\n def _get_embed_positions(self, position_ids):\n embed_positions = self.embed_positions\n if embed_positions.device != position_ids.device:\n embed_positions = embed_positions.to(position_ids.device)\n self.embed_positions = embed_positions\n return embed_positions.repeat(position_ids.shape[0], 1, 1)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = False,\n output_attentions: Optional[bool] = False,\n ) -> Union[\n Tuple[torch.Tensor, Tuple[torch.Tensor]],\n Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],\n ]:\n query = self.q_proj(hidden_states)\n key = self.k_proj(hidden_states)\n value = self.v_proj(hidden_states)\n\n query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)\n key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)\n value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)\n\n if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():\n # The logic to conditionally copy to GPU could not be traced, so we do this\n # every time in the torch.fx case\n embed_positions = get_embed_positions(self.embed_positions, position_ids)\n else:\n embed_positions = self._get_embed_positions(position_ids)\n\n repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])\n sincos = torch.gather(embed_positions, 1, repeated_position_ids)\n sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)\n\n if self.rotary_dim is not None:\n k_rot = key[:, :, :, : self.rotary_dim]\n k_pass = key[:, :, :, self.rotary_dim :]\n\n q_rot = query[:, :, :, : self.rotary_dim]\n q_pass = query[:, :, :, self.rotary_dim :]\n\n k_rot = apply_rotary_pos_emb(k_rot, sin, cos)\n q_rot = apply_rotary_pos_emb(q_rot, sin, cos)\n\n key = torch.cat([k_rot, k_pass], dim=-1)\n query = torch.cat([q_rot, q_pass], dim=-1)\n else:\n key = apply_rotary_pos_emb(key, sin, cos)\n query = apply_rotary_pos_emb(query, sin, cos)\n\n key = key.permute(0, 2, 1, 3)\n query = query.permute(0, 2, 1, 3)\n\n if layer_past is not None:\n past_key = layer_past[0]\n past_value = layer_past[1]\n key = torch.cat((past_key, key), dim=-2)\n value = torch.cat((past_value, value), dim=-2)\n\n if use_cache is True:\n # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.\n # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128\n present = (key.to(hidden_states.dtype), value)\n else:\n present = None\n\n # compute self-attention: V x Softmax(QK^T)\n attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)\n\n attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)\n attn_output = self.out_proj(attn_output)\n attn_output = self.resid_dropout(attn_output)\n\n outputs = (attn_output, present)\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs # a, present, (attentions)\n\n\nclass GPTJMLP(nn.Module):\n def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim\n super().__init__()\n embed_dim = config.n_embd\n\n self.fc_in = nn.Linear(embed_dim, intermediate_size)\n self.fc_out = nn.Linear(intermediate_size, embed_dim)\n\n self.act = ACT2FN[config.activation_function]\n self.dropout = nn.Dropout(config.resid_pdrop)\n\n def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:\n hidden_states = self.fc_in(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states = self.fc_out(hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states\n\n\nclass GPTJBlock(nn.Module):\n def __init__(self, config):\n super().__init__()\n inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd\n self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n self.attn = GPTJAttention(config)\n self.mlp = GPTJMLP(inner_dim, config)\n\n def forward(\n self,\n hidden_states: Optional[torch.FloatTensor],\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = False,\n output_attentions: Optional[bool] = False,\n ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:\n residual = hidden_states\n hidden_states = self.ln_1(hidden_states)\n attn_outputs = self.attn(\n hidden_states=hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attn_outputs[0] # output_attn: a, present, (attentions)\n outputs = attn_outputs[1:]\n\n feed_forward_hidden_states = self.mlp(hidden_states)\n hidden_states = attn_output + feed_forward_hidden_states + residual\n\n if use_cache:\n outputs = (hidden_states,) + outputs\n else:\n outputs = (hidden_states,) + outputs[1:]\n\n return outputs # hidden_states, present, (attentions)\n\n\nclass GPTJPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = GPTJConfig\n base_model_prefix = \"transformer\"\n is_parallelizable = True\n supports_gradient_checkpointing = True\n _no_split_modules = [\"GPTJBlock\"]\n _skip_keys_device_placement = \"past_key_values\"\n\n def __init__(self, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (nn.Linear,)):\n # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nGPTJ_START_DOCSTRING = r\"\"\"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nGPTJ_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\nPARALLELIZE_DOCSTRING = r\"\"\"\n This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute\n attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks\n across all devices.\n\n Args:\n device_map (`Dict[int, list]`, optional, defaults to None):\n A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always\n automatically mapped to the first device (for esoteric reasons). That means that the first device should\n have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the\n following number of attention modules:\n\n - gpt-j-6B: 28\n\n Example:\n\n ```python\n # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:\n model = GPTJForCausalLM.from_pretrained(\"EleutherAI/gpt-j-6B\")\n device_map = {\n 0: [0, 1, 2, 3, 4, 5, 6],\n 1: [7, 8, 9, 10, 11, 12, 13],\n 2: [14, 15, 16, 17, 18, 19, 20],\n 3: [21, 22, 23, 24, 25, 26, 27],\n }\n model.parallelize(device_map)\n ```\n\"\"\"\n\nDEPARALLELIZE_DOCSTRING = r\"\"\"\n Moves the model to CPU from a model parallel state.\n\n Example:\n\n ```python\n # On a 4 GPU machine with gpt-j-6B:\n model = GPTJForCausalLM.from_pretrained(\"EleutherAI/gpt-j-6B\")\n device_map = {\n 0: [0, 1, 2, 3, 4, 5, 6],\n 1: [7, 8, 9, 10, 11, 12, 13],\n 2: [14, 15, 16, 17, 18, 19, 20],\n 3: [21, 22, 23, 24, 25, 26, 27],\n }\n model.parallelize(device_map) # Splits the model across several devices\n model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()\n ```\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.\",\n GPTJ_START_DOCSTRING,\n)\nclass GPTJModel(GPTJPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embed_dim = config.n_embd\n self.vocab_size = config.vocab_size\n self.wte = nn.Embedding(config.vocab_size, self.embed_dim)\n self.drop = nn.Dropout(config.embd_pdrop)\n self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])\n self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n self.gradient_checkpointing = False\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\n def parallelize(self, device_map=None):\n warnings.warn(\n \"`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your\"\n \" model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own\"\n \" `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,\"\n \" ...}\",\n FutureWarning,\n )\n # Check validity of device_map\n self.device_map = (\n get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map\n )\n assert_device_map(self.device_map, len(self.h))\n self.model_parallel = True\n self.first_device = \"cpu\" if \"cpu\" in self.device_map.keys() else \"cuda:\" + str(min(self.device_map.keys()))\n self.last_device = \"cuda:\" + str(max(self.device_map.keys()))\n self.wte = self.wte.to(self.first_device)\n # Load onto devices\n for k, v in self.device_map.items():\n for block in v:\n cuda_device = \"cuda:\" + str(k)\n self.h[block] = self.h[block].to(cuda_device)\n # ln_f to last\n self.ln_f = self.ln_f.to(self.last_device)\n\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\n def deparallelize(self):\n warnings.warn(\n \"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.\",\n FutureWarning,\n )\n self.model_parallel = False\n self.device_map = None\n self.first_device = \"cpu\"\n self.last_device = \"cpu\"\n self.wte = self.wte.to(\"cpu\")\n for index in range(len(self.h)):\n self.h[index] = self.h[index].to(\"cpu\")\n self.ln_f = self.ln_f.to(\"cpu\")\n torch.cuda.empty_cache()\n\n def get_input_embeddings(self):\n return self.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.wte = new_embeddings\n\n @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPast]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n past_length = 0\n past_key_values = tuple([None] * len(self.h))\n else:\n past_length = past_key_values[0][0].size(-2)\n\n if position_ids is None:\n position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0)\n\n # Attention mask.\n if attention_mask is not None:\n if batch_size <= 0:\n raise ValueError(\"batch_size has to be defined and > 0\")\n attention_mask = attention_mask.view(batch_size, -1)\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = attention_mask[:, None, None, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and the dtype's smallest value for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility\n attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x num_attention_heads x N x N\n # head_mask has shape n_layer x batch x num_attention_heads x N x N\n head_mask = self.get_head_mask(head_mask, self.config.n_layer)\n\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n\n hidden_states = inputs_embeds\n\n if token_type_ids is not None:\n token_type_embeds = self.wte(token_type_ids)\n hidden_states = hidden_states + token_type_embeds\n\n hidden_states = self.drop(hidden_states)\n\n output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n # Model parallel\n if self.model_parallel:\n torch.cuda.set_device(hidden_states.device)\n # Ensure layer_past is on same device as hidden_states (might not be correct)\n if layer_past is not None:\n layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)\n # Ensure that attention_mask is always on the same device as hidden_states\n if attention_mask is not None:\n attention_mask = attention_mask.to(hidden_states.device)\n if isinstance(head_mask, torch.Tensor):\n head_mask = head_mask.to(hidden_states.device)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if self.gradient_checkpointing and self.training:\n outputs = self._gradient_checkpointing_func(\n block.__call__,\n hidden_states,\n None,\n attention_mask,\n position_ids,\n head_mask[i],\n use_cache,\n output_attentions,\n )\n else:\n outputs = block(\n hidden_states=hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask[i],\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n\n # Model Parallel: If it's the last layer for that device, put things on the next device\n if self.model_parallel:\n for k, v in self.device_map.items():\n if i == v[-1] and \"cuda:\" + str(k) != self.last_device:\n hidden_states = hidden_states.to(\"cuda:\" + str(k + 1))\n\n hidden_states = self.ln_f(hidden_states)\n\n hidden_states = hidden_states.view(output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT-J Model transformer with a language modeling head on top.\n \"\"\",\n GPTJ_START_DOCSTRING,\n)\nclass GPTJForCausalLM(GPTJPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.transformer = GPTJModel(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size)\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\n def parallelize(self, device_map=None):\n warnings.warn(\n \"`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load\"\n \" your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own\"\n \" `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':\"\n \" 0, 'transformer.h.1': 1, ...}\",\n FutureWarning,\n )\n self.device_map = (\n get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))\n if device_map is None\n else device_map\n )\n assert_device_map(self.device_map, len(self.transformer.h))\n self.transformer.parallelize(self.device_map)\n self.lm_head = self.lm_head.to(self.transformer.first_device)\n self.model_parallel = True\n\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\n def deparallelize(self):\n warnings.warn(\n \"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.\",\n FutureWarning,\n )\n self.transformer.deparallelize()\n self.transformer = self.transformer.to(\"cpu\")\n self.lm_head = self.lm_head.to(\"cpu\")\n self.model_parallel = False\n torch.cuda.empty_cache()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # Omit tokens covered by past_key_values\n if past_key_values:\n past_length = past_key_values[0][0].shape[2]\n\n # Some generation methods already pass only the last input ID\n if input_ids.shape[1] > past_length:\n remove_prefix_length = past_length\n else:\n # Default to old behavior: keep only final ID\n remove_prefix_length = input_ids.shape[1] - 1\n\n input_ids = input_ids[:, remove_prefix_length:]\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -input_ids.shape[1] :]\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -input_ids.shape[1] :]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n )\n\n return model_inputs\n\n @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=CausalLMOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`\n are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n\n # Set device for model parallelism\n if self.model_parallel:\n torch.cuda.set_device(self.transformer.first_device)\n hidden_states = hidden_states.to(self.lm_head.weight.device)\n\n # make sure sampling in fp16 works correctly and\n # compute loss in fp32 to match with mesh-tf version\n # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179\n lm_logits = self.lm_head(hidden_states).to(torch.float32)\n\n loss = None\n if labels is not None:\n # move labels to correct device to enable model parallelism\n labels = labels.to(lm_logits.device)\n # Shift so that tokens < n predict n\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n loss = loss.to(hidden_states.dtype)\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n @staticmethod\n def _reorder_cache(\n past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor\n ) -> Tuple[Tuple[torch.Tensor]]:\n \"\"\"\n This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or\n [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct\n beam_idx at every generation step.\n \"\"\"\n return tuple(\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n for layer_past in past_key_values\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT-J Model transformer with a sequence classification head on top (linear layer).\n\n [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT, GPT-2, GPT-Neo) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n \"\"\",\n GPTJ_START_DOCSTRING,\n)\nclass GPTJForSequenceClassification(GPTJPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.transformer = GPTJModel(config)\n self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=\"ydshieh/tiny-random-gptj-for-sequence-classification\",\n output_type=SequenceClassifierOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, SequenceClassifierOutputWithPast]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n logits = self.score(hidden_states)\n\n if input_ids is not None:\n batch_size = input_ids.shape[0]\n else:\n batch_size = inputs_embeds.shape[0]\n\n if self.config.pad_token_id is None and batch_size != 1:\n raise ValueError(\"Cannot handle batch sizes > 1 if no padding token is defined.\")\n if self.config.pad_token_id is None:\n sequence_lengths = -1\n else:\n if input_ids is not None:\n # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility\n sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1\n sequence_lengths = sequence_lengths % input_ids.shape[-1]\n sequence_lengths = sequence_lengths.to(logits.device)\n else:\n sequence_lengths = -1\n logger.warning(\n f\"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be \"\n \"unexpected if using padding tokens in conjunction with `inputs_embeds.`\"\n )\n\n pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]\n\n loss = None\n if labels is not None:\n labels = labels.to(pooled_logits.device)\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(pooled_logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(pooled_logits, labels)\n if not return_dict:\n output = (pooled_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutputWithPast(\n loss=loss,\n logits=pooled_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n GPTJ_START_DOCSTRING,\n)\nclass GPTJForQuestionAnswering(GPTJPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.transformer = GPTJModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n start_positions: Optional[torch.LongTensor] = None,\n end_positions: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, QuestionAnsweringModelOutput]:\n r\"\"\"\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1).to(start_logits.device)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1).to(end_logits.device)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n", "output": ["rotate_every_two", "apply_rotary_pos_emb", "create_sinusoidal_positions", "GPTJAttention", "GPTJForQuestionAnswering", "GPTJForSequenceClassification", "GPTJModel", "GPTJMLP", "GPTJPreTrainedModel", "GPTJForCausalLM", "GPTJBlock"], "metadata": {"file_path": "transformers-main/src/transformers/models/gptj/modeling_gptj.py", "file_length": 15806, "symbol_dict": [{"symbol": "apply_rotary_pos_emb", "type": "mannual_defined_function", "byte_location": 2470, "location": 923}, {"symbol": "create_sinusoidal_positions", "type": "mannual_defined_function", "byte_location": 1724, "location": 590}, {"symbol": "rotate_every_two", "type": "mannual_defined_function", "byte_location": 2238, "location": 813}, {"symbol": "GPTJMLP", "type": "mannual_defined_class", "byte_location": 11141, "location": 3931}, {"symbol": "GPTJModel", "type": "mannual_defined_class", "byte_location": 20182, "location": 6760}, {"symbol": "GPTJForQuestionAnswering", "type": "mannual_defined_class", "byte_location": 46027, "location": 14514}, {"symbol": "GPTJForSequenceClassification", "type": "mannual_defined_class", "byte_location": 40188, "location": 12798}, {"symbol": "GPTJPreTrainedModel", "type": "mannual_defined_class", "byte_location": 13582, "location": 4730}, {"symbol": "GPTJBlock", "type": "mannual_defined_class", "byte_location": 11877, "location": 4176}, {"symbol": "GPTJForCausalLM", "type": "mannual_defined_class", "byte_location": 31156, "location": 10019}, {"symbol": "GPTJAttention", "type": "mannual_defined_class", "byte_location": 2755, "location": 1044}]}} {"input": "# coding=utf-8\n# Copyright 2022 The EleutherAI and HuggingFace Teams. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 GPT-J model.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n)\nfrom ...modeling_tf_outputs import (\n TFBaseModelOutputWithPast,\n TFCausalLMOutputWithPast,\n TFQuestionAnsweringModelOutput,\n TFSequenceClassifierOutputWithPast,\n)\nfrom ...modeling_tf_utils import (\n TFCausalLanguageModelingLoss,\n TFModelInputType,\n TFPreTrainedModel,\n TFQuestionAnsweringLoss,\n TFSequenceClassificationLoss,\n TFSharedEmbeddings,\n get_initializer,\n keras,\n keras_serializable,\n unpack_inputs,\n)\nfrom ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax\nfrom ...utils import logging\nfrom .configuration_gptj import GPTJConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"EleutherAI/gpt-j-6B\"\n_CONFIG_FOR_DOC = \"GPTJConfig\"\n\nGPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"EleutherAI/gpt-j-6B\",\n # See all GPT-J models at https://huggingface.co/models?filter=gptj\n]\n\n\ndef create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor:\n inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32)\n sinusoid_inp = tf.cast(tf.einsum(\"i , j -> i j\", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32)\n sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)\n out = tf.concat((sin, cos), axis=1)\n return out\n\n\ndef rotate_every_two(x: tf.Tensor) -> tf.Tensor:\n rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1)\n new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])]\n rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape)\n return rotate_half_tensor\n\n\ndef apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor:\n sin_pos, cos_pos = sincos\n sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3)\n cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3)\n return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)\n\n\nclass TFGPTJAttention(keras.layers.Layer):\n def __init__(self, config: GPTJConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.embed_dim = config.hidden_size\n self.num_attention_heads = config.num_attention_heads\n self.head_dim = self.embed_dim // self.num_attention_heads\n if self.head_dim * self.num_attention_heads != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and\"\n f\" `num_attention_heads`: {self.num_attention_heads}).\"\n )\n self.scale_attn = self.head_dim**0.5\n self.rotary_dim = config.rotary_dim\n\n self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)\n self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)\n\n self.q_proj = keras.layers.Dense(\n self.embed_dim,\n use_bias=False,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"q_proj\",\n )\n self.k_proj = keras.layers.Dense(\n self.embed_dim,\n use_bias=False,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"k_proj\",\n )\n self.v_proj = keras.layers.Dense(\n self.embed_dim,\n use_bias=False,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"v_proj\",\n )\n self.out_proj = keras.layers.Dense(\n self.embed_dim,\n use_bias=False,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"out_proj\",\n )\n\n self.max_positions = config.max_position_embeddings\n self.lower_triangle_mask = tf.reshape(\n tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8),\n (1, 1, self.max_positions, self.max_positions),\n )\n pos_embd_dim = self.rotary_dim or self.embed_dim\n self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim)\n\n def get_causal_mask(self, key_length, query_length) -> tf.Tensor:\n return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length : key_length, :key_length], tf.bool)\n\n @staticmethod\n def get_masked_bias(dtype: tf.DType) -> tf.Tensor:\n return tf.cast(tf.constant(-1e9), dtype)\n\n def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor:\n \"\"\"\n Splits hidden dim into attn_head_size and num_attention_heads\n \"\"\"\n new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim]\n hidden_states = tf.reshape(hidden_states, new_shape)\n if rotary:\n return hidden_states\n if len(shape_list(hidden_states)) == 4:\n return tf.transpose(hidden_states, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)\n if len(shape_list(hidden_states)) == 5:\n return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) # (batch, blocks, head, block_length, head_features)\n raise ValueError(f\"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}\")\n\n def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Merges attn_head_size dim and num_attn_heads dim into hidden dim\n \"\"\"\n if len(shape_list(hidden_states)) == 4:\n hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3))\n elif len(shape_list(hidden_states)) == 5:\n hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4))\n else:\n raise ValueError(f\"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}\")\n new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim]\n return tf.reshape(hidden_states, new_shape)\n\n def _attn(\n self,\n query: tf.Tensor,\n key: tf.Tensor,\n value: tf.Tensor,\n attention_mask: tf.Tensor | None = None,\n head_mask: tf.Tensor | None = None,\n ) -> Tuple[tf.Tensor, tf.Tensor]:\n # compute causal mask from causal mask buffer\n query_length, key_length = shape_list(query)[-2], shape_list(key)[-2]\n causal_mask = self.get_causal_mask(key_length, query_length)\n\n # Keep the attention weights computation in fp32 to avoid overflow issues\n query = tf.cast(query, tf.float32)\n key = tf.cast(key, tf.float32)\n\n attn_weights = tf.matmul(query, key, transpose_b=True)\n attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype))\n\n attn_weights = attn_weights / self.scale_attn\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_weights = attn_weights + attention_mask\n\n attn_weights = stable_softmax(attn_weights, axis=-1)\n attn_weights = tf.cast(attn_weights, value.dtype)\n attn_weights = self.attn_dropout(attn_weights)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = tf.matmul(attn_weights, value)\n\n return attn_output, attn_weights\n\n def call(\n self,\n hidden_states: tf.Tensor,\n layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]] = None,\n attention_mask: tf.Tensor | None = None,\n position_ids: tf.Tensor | None = None,\n head_mask: tf.Tensor | None = None,\n use_cache: bool = False,\n output_attentions: bool = False,\n ):\n query = self.q_proj(hidden_states)\n key = self.k_proj(hidden_states)\n value = self.v_proj(hidden_states)\n\n query = self._split_heads(query, True)\n key = self._split_heads(key, True)\n value = self._split_heads(value, False)\n\n sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype)\n sincos = tf.split(sincos, 2, axis=-1)\n if self.rotary_dim is not None:\n k_rot = key[:, :, :, : self.rotary_dim]\n k_pass = key[:, :, :, self.rotary_dim :]\n\n q_rot = query[:, :, :, : self.rotary_dim]\n q_pass = query[:, :, :, self.rotary_dim :]\n\n k_rot = apply_rotary_pos_emb(k_rot, sincos)\n q_rot = apply_rotary_pos_emb(q_rot, sincos)\n\n key = tf.concat((k_rot, k_pass), axis=-1)\n query = tf.concat((q_rot, q_pass), axis=-1)\n else:\n key = apply_rotary_pos_emb(key, sincos)\n query = apply_rotary_pos_emb(query, sincos)\n\n key = tf.transpose(key, (0, 2, 1, 3))\n query = tf.transpose(query, (0, 2, 1, 3))\n\n if layer_past is not None:\n past_key = layer_past[0]\n past_value = layer_past[1]\n key = tf.concat((past_key, key), axis=-2)\n value = tf.concat((past_value, value), axis=-2)\n\n if use_cache is True:\n present = (key, value)\n else:\n present = None\n\n # compute self-attention: V x Softmax(QK^T)\n attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)\n\n attn_output = self._merge_heads(attn_output)\n attn_output = self.out_proj(attn_output)\n attn_output = self.resid_dropout(attn_output)\n\n outputs = (attn_output, present)\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs # a, present, (attentions)\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"q_proj\", None) is not None:\n with tf.name_scope(self.q_proj.name):\n self.q_proj.build([None, None, self.embed_dim])\n if getattr(self, \"k_proj\", None) is not None:\n with tf.name_scope(self.k_proj.name):\n self.k_proj.build([None, None, self.embed_dim])\n if getattr(self, \"v_proj\", None) is not None:\n with tf.name_scope(self.v_proj.name):\n self.v_proj.build([None, None, self.embed_dim])\n if getattr(self, \"out_proj\", None) is not None:\n with tf.name_scope(self.out_proj.name):\n self.out_proj.build([None, None, self.embed_dim])\n\n\nclass TFGPTJMLP(keras.layers.Layer):\n def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs):\n super().__init__(**kwargs)\n embed_dim = config.n_embd\n\n self.fc_in = keras.layers.Dense(\n intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name=\"fc_in\"\n )\n self.fc_out = keras.layers.Dense(\n embed_dim, kernel_initializer=get_initializer(config.initializer_range), name=\"fc_out\"\n )\n\n self.act = get_tf_activation(config.activation_function)\n self.dropout = keras.layers.Dropout(config.embd_pdrop)\n self.embed_dim = config.n_embd\n self.intermediate_size = intermediate_size\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.fc_in(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states = self.fc_out(hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"fc_in\", None) is not None:\n with tf.name_scope(self.fc_in.name):\n self.fc_in.build([None, None, self.embed_dim])\n if getattr(self, \"fc_out\", None) is not None:\n with tf.name_scope(self.fc_out.name):\n self.fc_out.build([None, None, self.intermediate_size])\n\n\nclass TFGPTJBlock(keras.layers.Layer):\n def __init__(self, config: GPTJConfig, **kwargs):\n super().__init__(**kwargs)\n inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd\n self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name=\"ln_1\")\n self.attn = TFGPTJAttention(config, name=\"attn\")\n self.mlp = TFGPTJMLP(inner_dim, config, name=\"mlp\")\n self.config = config\n\n def call(\n self,\n hidden_states: tf.Tensor,\n layer_past: tf.Tensor | None = None,\n attention_mask: tf.Tensor | None = None,\n position_ids: tf.Tensor | None = None,\n head_mask: tf.Tensor | None = None,\n use_cache: bool = False,\n output_attentions: bool = False,\n ):\n residual = hidden_states\n hidden_states = self.ln_1(hidden_states)\n attn_outputs = self.attn(\n hidden_states=hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n ) # attn_outputs: attn_output, present, (attentions)\n attn_output = attn_outputs[0]\n outputs = attn_outputs[1:]\n\n feed_forward_hidden_states = self.mlp(hidden_states)\n hidden_states = attn_output + feed_forward_hidden_states + residual\n\n if use_cache:\n outputs = (hidden_states,) + outputs\n else:\n outputs = (hidden_states,) + outputs[1:]\n return outputs # hidden_states, present, (attentions)\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"ln_1\", None) is not None:\n with tf.name_scope(self.ln_1.name):\n self.ln_1.build([None, None, self.config.n_embd])\n if getattr(self, \"attn\", None) is not None:\n with tf.name_scope(self.attn.name):\n self.attn.build(None)\n if getattr(self, \"mlp\", None) is not None:\n with tf.name_scope(self.mlp.name):\n self.mlp.build(None)\n\n\n@keras_serializable\nclass TFGPTJMainLayer(keras.layers.Layer):\n config_class = GPTJConfig\n\n def __init__(self, config: GPTJConfig, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n self.config = config\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.use_cache = config.use_cache\n self.return_dict = config.use_return_dict\n\n self.num_hidden_layers = config.n_layer\n self.n_embd = config.n_embd\n self.n_positions = config.n_positions\n self.initializer_range = config.initializer_range\n\n self.wte = TFSharedEmbeddings(\n config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name=\"wte\"\n )\n self.drop = keras.layers.Dropout(config.embd_pdrop)\n self.h = [TFGPTJBlock(config, name=f\"h_._{i}\") for i in range(config.n_layer)]\n self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name=\"ln_f\")\n self.embed_dim = config.n_embd\n\n def get_input_embeddings(self):\n return self.wte\n\n def set_input_embeddings(self, value: tf.Tensor):\n self.wte.weight = value\n self.wte.vocab_size = shape_list(value)[0]\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n raise NotImplementedError\n\n @unpack_inputs\n def call(\n self,\n input_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = shape_list(input_ids)\n input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])\n elif inputs_embeds is not None:\n input_shape = shape_list(inputs_embeds)[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if past_key_values is None:\n past_length = 0\n past_key_values = [None] * len(self.h)\n else:\n past_length = shape_list(past_key_values[0][0])[-2]\n\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)\n\n if attention_mask is not None:\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask_shape = shape_list(attention_mask)\n attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]))\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n one_cst = tf.constant(1.0)\n attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)\n attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n raise NotImplementedError\n else:\n head_mask = [None] * self.num_hidden_layers\n # head_mask = tf.constant([0] * self.num_hidden_layers)\n\n position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])\n\n if inputs_embeds is None:\n check_embeddings_within_bounds(input_ids, self.wte.vocab_size)\n inputs_embeds = self.wte(input_ids, mode=\"embedding\")\n\n if token_type_ids is not None:\n token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])\n token_type_embeds = self.wte(token_type_ids, mode=\"embedding\")\n else:\n token_type_embeds = tf.constant(0.0)\n\n token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype)\n hidden_states = inputs_embeds + token_type_embeds\n hidden_states = self.drop(hidden_states, training=training)\n\n output_shape = input_shape + [shape_list(hidden_states)[-1]]\n\n presents = () if use_cache else None\n all_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)\n\n outputs = block(\n hidden_states=hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask[i],\n use_cache=use_cache,\n output_attentions=output_attentions,\n training=training,\n )\n\n hidden_states = outputs[0]\n if use_cache:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_attentions = all_attentions + (outputs[2 if use_cache else 1],)\n\n hidden_states = self.ln_f(hidden_states)\n\n hidden_states = tf.reshape(hidden_states, output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if output_attentions:\n # let the number of heads free (-1) so we can extract attention even after head pruning\n attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]\n all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)\n\n return TFBaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"wte\", None) is not None:\n with tf.name_scope(self.wte.name):\n self.wte.build(None)\n if getattr(self, \"ln_f\", None) is not None:\n with tf.name_scope(self.ln_f.name):\n self.ln_f.build([None, None, self.embed_dim])\n if getattr(self, \"h\", None) is not None:\n for layer in self.h:\n with tf.name_scope(layer.name):\n layer.build(None)\n\n\nclass TFGPTJPreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = GPTJConfig\n base_model_prefix = \"transformer\"\n # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model\n _keys_to_ignore_on_load_unexpected = [r\"h.\\d+.attn.bias\"]\n\n\nGPTJ_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should \"just work\" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Parameters:\n config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nGPTJ_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of\n input past key value states). Indices of input sequence tokens in the vocabulary.\n\n If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`List[tf.Tensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past` output below). Can be used to speed up sequential decoding. The token ids which have their past\n given to this model should not be passed as input ids as they have already been computed.\n attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used\n in eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.\",\n GPTJ_START_DOCSTRING,\n)\nclass TFGPTJModel(TFGPTJPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFGPTJMainLayer(config, name=\"transformer\")\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFBaseModelOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n token_type_ids: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:\n r\"\"\"\n use_cache (`bool`, *optional*, defaults to `True`):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past`). Set to `False` during training, `True` during generation\n \"\"\"\n\n outputs = self.transformer(\n input_ids=input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n return outputs\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"transformer\", None) is not None:\n with tf.name_scope(self.transformer.name):\n self.transformer.build(None)\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT-J Model transformer with a language modeling head on top.\n \"\"\",\n GPTJ_START_DOCSTRING,\n)\nclass TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFGPTJMainLayer(config, name=\"transformer\")\n self.lm_head = keras.layers.Dense(\n config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name=\"lm_head\"\n )\n self.config = config\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # only last token for inputs_ids if past is defined in kwargs\n if past_key_values:\n inputs = tf.expand_dims(inputs[:, -1], -1)\n if token_type_ids is not None:\n token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)\n\n position_ids = kwargs.get(\"position_ids\", None)\n attention_mask = kwargs.get(\"attention_mask\", None)\n\n if attention_mask is not None and position_ids is None:\n position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)\n if past_key_values:\n position_ids = tf.expand_dims(position_ids[:, -1], -1)\n\n return {\n \"input_ids\": inputs,\n \"attention_mask\": attention_mask,\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": use_cache,\n \"token_type_ids\": token_type_ids,\n }\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFCausalLMOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n token_type_ids: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n labels: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`\n are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`\n \"\"\"\n\n transformer_outputs = self.transformer(\n input_ids=input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n hidden_states = transformer_outputs[0]\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # shift labels to the left and cut last logit token\n shifted_logits = lm_logits[:, :-1]\n labels = labels[:, 1:]\n loss = self.hf_compute_loss(labels, shifted_logits)\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFCausalLMOutputWithPast(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"transformer\", None) is not None:\n with tf.name_scope(self.transformer.name):\n self.transformer.build(None)\n if getattr(self, \"lm_head\", None) is not None:\n with tf.name_scope(self.lm_head.name):\n self.lm_head.build([None, None, self.config.n_embd])\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT-J Model transformer with a sequence classification head on top (linear layer).\n\n [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT, GPT-2, GPT-Neo) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n \"\"\",\n GPTJ_START_DOCSTRING,\n)\nclass TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss):\n _keys_to_ignore_on_load_missing = [r\"h.\\d+.attn.masked_bias\", r\"h.\\d+.attn.bias\", r\"lm_head.weight\"]\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n self.transformer = TFGPTJMainLayer(config, name=\"transformer\")\n self.score = keras.layers.Dense(\n self.num_labels,\n use_bias=False,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"score\",\n )\n self.config = config\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFSequenceClassifierOutputWithPast,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n token_type_ids: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n labels: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n transformer_outputs = self.transformer(\n input_ids=input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n hidden_states = transformer_outputs[0]\n logits = self.score(hidden_states)\n logits_shape = shape_list(logits)\n in_logits = None\n if self.config.pad_token_id is None:\n sequence_lengths = -1\n else:\n if input_ids is not None:\n sequence_lengths = (\n tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)\n - 1\n )\n sequence_lengths = tf.where(\n sequence_lengths >= 0,\n sequence_lengths,\n tf.cast(shape_list(input_ids[-1]), sequence_lengths.dtype) - 1,\n )\n in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)\n else:\n sequence_lengths = -1\n logger.warning(\n f\"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be \"\n \"unexpected if using padding tokens in conjunction with `inputs_embeds.`\"\n )\n loss = None\n\n if labels is not None:\n if self.config.pad_token_id is None and logits_shape[0] != 1:\n raise ValueError(\"Cannot handle batch sizes > 1 if no padding token is defined.\")\n\n if not tf.is_tensor(sequence_lengths):\n in_logits = logits[0 : logits_shape[0], sequence_lengths]\n\n loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels]))\n pooled_logits = in_logits if in_logits is not None else logits\n\n if not return_dict:\n output = (pooled_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFSequenceClassifierOutputWithPast(\n loss=loss,\n logits=pooled_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"transformer\", None) is not None:\n with tf.name_scope(self.transformer.name):\n self.transformer.build(None)\n if getattr(self, \"score\", None) is not None:\n with tf.name_scope(self.score.name):\n self.score.build([None, None, self.config.n_embd])\n\n\n@add_start_docstrings(\n \"\"\"\n The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like\n SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n GPTJ_START_DOCSTRING,\n)\nclass TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss):\n _keys_to_ignore_on_load_missing = [r\"h.\\d+.attn.masked_bias\", r\"h.\\d+.attn.bias\", r\"lm_head.weight\"]\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.num_labels = config.num_labels\n self.transformer = TFGPTJMainLayer(config, name=\"transformer\")\n self.qa_outputs = keras.layers.Dense(\n self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name=\"qa_outputs\"\n )\n self.config = config\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n token_type_ids: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n start_positions: np.ndarray | tf.Tensor | None = None,\n end_positions: np.ndarray | tf.Tensor | None = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n start_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n \"\"\"\n\n transformer_outputs = self.transformer(\n input_ids=input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n sequence_output = transformer_outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = tf.split(logits, 2, axis=-1)\n start_logits = tf.squeeze(start_logits, axis=-1)\n end_logits = tf.squeeze(end_logits, axis=-1)\n\n loss = None\n if start_positions is not None and end_positions is not None:\n labels = {\"start_position\": start_positions}\n labels[\"end_position\"] = end_positions\n loss = self.hf_compute_loss(labels, (start_logits, end_logits))\n\n if not return_dict:\n output = (start_logits, end_logits) + transformer_outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFQuestionAnsweringModelOutput(\n loss=loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"transformer\", None) is not None:\n with tf.name_scope(self.transformer.name):\n self.transformer.build(None)\n if getattr(self, \"qa_outputs\", None) is not None:\n with tf.name_scope(self.qa_outputs.name):\n self.qa_outputs.build([None, None, self.config.hidden_size])\n", "output": ["rotate_every_two", "create_sinusoidal_positions", "apply_rotary_pos_emb", "TFGPTJPreTrainedModel", "TFGPTJAttention", "TFGPTJModel", "TFGPTJForSequenceClassification", "TFGPTJMainLayer", "TFGPTJForQuestionAnswering", "TFGPTJBlock", "TFGPTJMLP", "TFGPTJForCausalLM"], "metadata": {"file_path": "transformers-main/src/transformers/models/gptj/modeling_tf_gptj.py", "file_length": 15070, "symbol_dict": [{"symbol": "rotate_every_two", "type": "mannual_defined_function", "byte_location": 2214, "location": 798}, {"symbol": "apply_rotary_pos_emb", "type": "mannual_defined_function", "byte_location": 2556, "location": 936}, {"symbol": "create_sinusoidal_positions", "type": "mannual_defined_function", "byte_location": 1835, "location": 624}, {"symbol": "TFGPTJForQuestionAnswering", "type": "mannual_defined_class", "byte_location": 43706, "location": 13700}, {"symbol": "TFGPTJMLP", "type": "mannual_defined_class", "byte_location": 11163, "location": 3914}, {"symbol": "TFGPTJForSequenceClassification", "type": "mannual_defined_class", "byte_location": 38121, "location": 11997}, {"symbol": "TFGPTJPreTrainedModel", "type": "mannual_defined_class", "byte_location": 22711, "location": 7524}, {"symbol": "TFGPTJForCausalLM", "type": "mannual_defined_class", "byte_location": 32293, "location": 10183}, {"symbol": "TFGPTJMainLayer", "type": "mannual_defined_class", "byte_location": 14840, "location": 5099}, {"symbol": "TFGPTJAttention", "type": "mannual_defined_class", "byte_location": 2842, "location": 1063}, {"symbol": "TFGPTJModel", "type": "mannual_defined_class", "byte_location": 29825, "location": 9409}, {"symbol": "TFGPTJBlock", "type": "mannual_defined_class", "byte_location": 12616, "location": 4380}]}} {"input": "# coding=utf-8\n# Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.\n#\n# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX\n# and OPT implementations in this library. It has been modified from its\n# original forms to accommodate minor architectural differences compared\n# to GPT-NeoX and OPT used by the Meta AI team that trained the model.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch Persimmon model.\"\"\"\nimport math\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...cache_utils import Cache, DynamicCache\nfrom ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask\nfrom ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings\nfrom .configuration_persimmon import PersimmonConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"PersimmonConfig\"\n\n\n# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Persimmon\nclass PersimmonRotaryEmbedding(nn.Module):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):\n super().__init__()\n\n self.dim = dim\n self.max_position_embeddings = max_position_embeddings\n self.base = base\n inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))\n self.register_buffer(\"inv_freq\", inv_freq, persistent=False)\n\n # Build here to make `torch.jit.trace` work.\n self._set_cos_sin_cache(\n seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()\n )\n\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n self.max_seq_len_cached = seq_len\n t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)\n\n freqs = torch.outer(t, self.inv_freq)\n # Different from paper, but it uses a different permutation in order to obtain the same calculation\n emb = torch.cat((freqs, freqs), dim=-1)\n self.register_buffer(\"cos_cached\", emb.cos().to(dtype), persistent=False)\n self.register_buffer(\"sin_cached\", emb.sin().to(dtype), persistent=False)\n\n def forward(self, x, seq_len=None):\n # x: [bs, num_attention_heads, seq_len, head_size]\n if seq_len > self.max_seq_len_cached:\n self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)\n\n return (\n self.cos_cached[:seq_len].to(dtype=x.dtype),\n self.sin_cached[:seq_len].to(dtype=x.dtype),\n )\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->Persimmon\nclass PersimmonLinearScalingRotaryEmbedding(PersimmonRotaryEmbedding):\n \"\"\"PersimmonRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev\"\"\"\n\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n self.scaling_factor = scaling_factor\n super().__init__(dim, max_position_embeddings, base, device)\n\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n self.max_seq_len_cached = seq_len\n t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)\n t = t / self.scaling_factor\n\n freqs = torch.outer(t, self.inv_freq)\n # Different from paper, but it uses a different permutation in order to obtain the same calculation\n emb = torch.cat((freqs, freqs), dim=-1)\n self.register_buffer(\"cos_cached\", emb.cos().to(dtype), persistent=False)\n self.register_buffer(\"sin_cached\", emb.sin().to(dtype), persistent=False)\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->Persimmon\nclass PersimmonDynamicNTKScalingRotaryEmbedding(PersimmonRotaryEmbedding):\n \"\"\"PersimmonRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla\"\"\"\n\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n self.scaling_factor = scaling_factor\n super().__init__(dim, max_position_embeddings, base, device)\n\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n self.max_seq_len_cached = seq_len\n\n if seq_len > self.max_position_embeddings:\n base = self.base * (\n (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)\n ) ** (self.dim / (self.dim - 2))\n inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))\n self.register_buffer(\"inv_freq\", inv_freq, persistent=False)\n\n t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)\n\n freqs = torch.outer(t, self.inv_freq)\n # Different from paper, but it uses a different permutation in order to obtain the same calculation\n emb = torch.cat((freqs, freqs), dim=-1)\n self.register_buffer(\"cos_cached\", emb.cos().to(dtype), persistent=False)\n self.register_buffer(\"sin_cached\", emb.sin().to(dtype), persistent=False)\n\n\n# Copied from transformers.models.llama.modeling_llama.rotate_half\ndef rotate_half(x):\n \"\"\"Rotates half the hidden dims of the input.\"\"\"\n x1 = x[..., : x.shape[-1] // 2]\n x2 = x[..., x.shape[-1] // 2 :]\n return torch.cat((-x2, x1), dim=-1)\n\n\n# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):\n \"\"\"Applies Rotary Position Embedding to the query and key tensors.\n\n Args:\n q (`torch.Tensor`): The query tensor.\n k (`torch.Tensor`): The key tensor.\n cos (`torch.Tensor`): The cosine part of the rotary embedding.\n sin (`torch.Tensor`): The sine part of the rotary embedding.\n position_ids (`torch.Tensor`):\n The position indices of the tokens corresponding to the query and key tensors. For example, this can be\n used to pass offsetted position ids when working with a KV-cache.\n unsqueeze_dim (`int`, *optional*, defaults to 1):\n The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\n Returns:\n `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.\n \"\"\"\n cos = cos[position_ids].unsqueeze(unsqueeze_dim)\n sin = sin[position_ids].unsqueeze(unsqueeze_dim)\n q_embed = (q * cos) + (rotate_half(q) * sin)\n k_embed = (k * cos) + (rotate_half(k) * sin)\n return q_embed, k_embed\n\n\n# Copied from transformers.models.gpt_neox.modeling_gpt_neox.GPTNeoXMLP with GPTNeoX->Persimmon\nclass PersimmonMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)\n self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)\n self.act = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states):\n hidden_states = self.dense_h_to_4h(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states = self.dense_4h_to_h(hidden_states)\n return hidden_states\n\n\nclass PersimmonAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(self, config: PersimmonConfig, layer_idx: Optional[int] = None):\n super().__init__()\n self.config = config\n self.layer_idx = layer_idx\n if layer_idx is None:\n logger.warning_once(\n f\"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will \"\n \"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` \"\n \"when creating this class.\"\n )\n\n self.hidden_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n self.max_position_embeddings = config.max_position_embeddings\n self.rope_theta = config.rope_theta\n self.partial_rotary_factor = config.partial_rotary_factor\n self.is_causal = True\n\n if (self.head_dim * self.num_heads) != self.hidden_size:\n raise ValueError(\n f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n f\" and `num_heads`: {self.num_heads}).\"\n )\n self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True)\n self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=True)\n self.qk_layernorm = config.qk_layernorm\n\n if self.qk_layernorm:\n self.q_layernorm = nn.LayerNorm(\n config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True\n )\n self.k_layernorm = nn.LayerNorm(\n config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True\n )\n self.attention_dropout = nn.Dropout(config.attention_dropout)\n self._init_rope()\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = PersimmonRotaryEmbedding(\n int(self.partial_rotary_factor * self.head_dim),\n max_position_embeddings=self.max_position_embeddings,\n base=self.rope_theta,\n )\n else:\n scaling_type = self.config.rope_scaling[\"type\"]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = PersimmonLinearScalingRotaryEmbedding(\n int(self.partial_rotary_factor * self.head_dim),\n max_position_embeddings=self.max_position_embeddings,\n scaling_factor=scaling_factor,\n base=self.rope_theta,\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = PersimmonDynamicNTKScalingRotaryEmbedding(\n int(self.partial_rotary_factor * self.head_dim),\n max_position_embeddings=self.max_position_embeddings,\n scaling_factor=scaling_factor,\n base=self.rope_theta,\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n # Copied from transformers.models.bloom.modeling_bloom.BloomAttention._split_heads\n def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory\n storage as `fused_qkv`\n\n Args:\n fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim]\n\n Returns:\n query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]\n value: [batch_size, seq_length, num_heads, head_dim]\n \"\"\"\n batch_size, seq_length, three_times_hidden_size = fused_qkv.shape\n fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)\n return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Cache] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n bsz, q_len, _ = hidden_states.size()\n\n # [batch_size, seq_length, 3 x hidden_size]\n fused_qkv = self.query_key_value(hidden_states)\n\n # 3 x [batch_size, seq_length, num_heads, head_dim]\n (query_states, key_states, value_states) = self._split_heads(fused_qkv)\n\n if self.qk_layernorm:\n query_states = self.q_layernorm(query_states)\n key_states = self.k_layernorm(key_states)\n\n # [batch_size, num_heads, seq_length, head_dim] -> [batch_size, seq_length, num_heads, head_dim]\n query_states = query_states.transpose(1, 2)\n value_states = value_states.transpose(1, 2)\n key_states = key_states.transpose(1, 2)\n\n kv_seq_len = key_states.shape[-2]\n if past_key_value is not None:\n if self.layer_idx is None:\n raise ValueError(\n f\"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} \"\n \"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class \"\n \"with a layer index.\"\n )\n kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)\n cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)\n\n # Partial rotary embedding\n query_rot, query_pass = (\n query_states[..., : self.rotary_emb.dim],\n query_states[..., self.rotary_emb.dim :],\n )\n key_rot, key_pass = (\n key_states[..., : self.rotary_emb.dim],\n key_states[..., self.rotary_emb.dim :],\n )\n # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]\n query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)\n\n # [batch_size, seq_length, num_heads, head_dim]\n query_states = torch.cat((query_rot, query_pass), dim=-1)\n key_states = torch.cat((key_rot, key_pass), dim=-1)\n\n if past_key_value is not None:\n # Specific to RoPE models with partial rotation\n cache_kwargs = {\"sin\": sin, \"cos\": cos, \"partial_rotation_size\": self.rotary_emb.dim}\n key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)\n\n attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)\n\n if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is\"\n f\" {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights + attention_mask\n\n # upcast attention to fp32\n attn_weights = nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query_states.dtype)\n attn_weights = self.attention_dropout(attn_weights)\n\n attn_output = torch.matmul(attn_weights, value_states)\n\n if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous()\n attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)\n\n attn_output = self.dense(attn_output)\n\n if not output_attentions:\n attn_weights = None\n\n return attn_output, attn_weights, past_key_value\n\n\nclass PersimmonDecoderLayer(nn.Module):\n def __init__(self, config: PersimmonConfig, layer_idx: int):\n super().__init__()\n self.hidden_size = config.hidden_size\n self.self_attn = PersimmonAttention(config=config, layer_idx=layer_idx)\n self.mlp = PersimmonMLP(config)\n self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n \"\"\"\n Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range\n `[0, config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*):\n cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n \"\"\"\n\n residual = hidden_states\n\n hidden_states = self.input_layernorm(hidden_states)\n\n # Self Attention\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = residual + hidden_states\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n\n hidden_states = self.dropout(hidden_states)\n hidden_states = hidden_states + residual\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (self_attn_weights,)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs\n\n\nPERSIMMON_START_DOCSTRING = r\"\"\"\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`PersimmonConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Persimmon Model outputting raw hidden-states without any specific head on top.\",\n PERSIMMON_START_DOCSTRING,\n)\nclass PersimmonPreTrainedModel(PreTrainedModel):\n config_class = PersimmonConfig\n base_model_prefix = \"model\"\n supports_gradient_checkpointing = True\n _no_split_modules = [\"PersimmonDecoderLayer\"]\n _skip_keys_device_placement = \"past_key_values\"\n _supports_cache_class = True\n\n def _init_weights(self, module):\n std = self.config.initializer_range\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n\nPERSIMMON_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance;\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Persimmon Model outputting raw hidden-states without any specific head on top.\",\n PERSIMMON_START_DOCSTRING,\n)\nclass PersimmonModel(PersimmonPreTrainedModel):\n \"\"\"\n Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PersimmonDecoderLayer`]\n\n Args:\n config: PersimmonConfig\n \"\"\"\n\n def __init__(self, config: PersimmonConfig):\n super().__init__(config)\n self.padding_idx = config.pad_token_id\n self.vocab_size = config.vocab_size\n\n self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)\n self.layers = nn.ModuleList(\n [PersimmonDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]\n )\n self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.gradient_checkpointing = False\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n @add_start_docstrings_to_model_forward(PERSIMMON_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPast]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\")\n elif input_ids is not None:\n batch_size, seq_length = input_ids.shape\n elif inputs_embeds is not None:\n batch_size, seq_length, _ = inputs_embeds.shape\n else:\n raise ValueError(\"You have to specify either decoder_input_ids or decoder_inputs_embeds\")\n\n seq_length_with_past = seq_length\n past_key_values_length = 0\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n if use_cache:\n use_legacy_cache = not isinstance(past_key_values, Cache)\n if use_legacy_cache:\n past_key_values = DynamicCache.from_legacy_cache(past_key_values)\n past_key_values_length = past_key_values.get_usable_length(seq_length)\n seq_length_with_past = seq_length_with_past + past_key_values_length\n\n if position_ids is None:\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n position_ids = torch.arange(\n past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device\n )\n position_ids = position_ids.unsqueeze(0)\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids)\n # embed positions\n if attention_mask is None:\n attention_mask = torch.ones(\n (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device\n )\n attention_mask = _prepare_4d_causal_attention_mask(\n attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length\n )\n\n hidden_states = inputs_embeds\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n next_decoder_cache = None\n\n for decoder_layer in self.layers:\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(\n decoder_layer.__call__,\n hidden_states,\n attention_mask,\n position_ids,\n past_key_values,\n output_attentions,\n )\n else:\n layer_outputs = decoder_layer(\n hidden_states,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_value=past_key_values,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n\n hidden_states = layer_outputs[0]\n\n if use_cache:\n next_decoder_cache = layer_outputs[2 if output_attentions else 1]\n\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n\n hidden_states = self.final_layernorm(hidden_states)\n\n # add hidden states from the last decoder layer\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n next_cache = None\n if use_cache:\n next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache\n\n if not return_dict:\n return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n )\n\n\nclass PersimmonForCausalLM(PersimmonPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with LLAMA->PERSIMMON,Llama->Persimmon\n def __init__(self, config):\n super().__init__(config)\n self.model = PersimmonModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings\n def get_output_embeddings(self):\n return self.lm_head\n\n # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder\n def set_decoder(self, decoder):\n self.model = decoder\n\n # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(PERSIMMON_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, PersimmonForCausalLM\n\n >>> model = PersimmonForCausalLM.from_pretrained(\"adept/persimmon-8b-base\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"adept/persimmon-8b-base\")\n\n >>> prompt = \"human: Hey, what should I eat for dinner?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n 'human: Hey, what should I eat for dinner?\\n\\ncat: \ud83d\udc31\\n\\nhuman: \ud83d\ude10\\n\\n'\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.prepare_inputs_for_generation\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values is not None:\n if isinstance(past_key_values, Cache):\n cache_length = past_key_values.get_seq_length()\n past_length = past_key_values.seen_tokens\n max_cache_length = past_key_values.get_max_length()\n else:\n cache_length = past_length = past_key_values[0][0].shape[2]\n max_cache_length = None\n\n # Keep only the unprocessed tokens:\n # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where\n # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as\n # input)\n if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:\n input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]\n # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard\n # input_ids based on the past_length.\n elif past_length < input_ids.shape[1]:\n input_ids = input_ids[:, past_length:]\n # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.\n\n # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.\n if (\n max_cache_length is not None\n and attention_mask is not None\n and cache_length + input_ids.shape[1] > max_cache_length\n ):\n attention_mask = attention_mask[:, -max_cache_length:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -input_ids.shape[1] :]\n\n if past_key_value := getattr(self.model.layers[0].self_attn, \"past_key_value\", None):\n # generation with static cache\n seen_tokens = past_key_value.get_seq_length()\n input_ids = input_ids[:, seen_tokens:]\n position_ids = position_ids[:, seen_tokens:]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past\n\n\n@add_start_docstrings(\n \"\"\"\n The Persimmon transformer with a sequence classification head on top (linear layer).\n\n [`PersimmonForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n \"\"\",\n PERSIMMON_START_DOCSTRING,\n)\n# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with LLAMA->PERSIMMON,Llama->Persimmon\nclass PersimmonForSequenceClassification(PersimmonPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.model = PersimmonModel(config)\n self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n @add_start_docstrings_to_model_forward(PERSIMMON_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, SequenceClassifierOutputWithPast]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n logits = self.score(hidden_states)\n\n if input_ids is not None:\n batch_size = input_ids.shape[0]\n else:\n batch_size = inputs_embeds.shape[0]\n\n if self.config.pad_token_id is None and batch_size != 1:\n raise ValueError(\"Cannot handle batch sizes > 1 if no padding token is defined.\")\n if self.config.pad_token_id is None:\n sequence_lengths = -1\n else:\n if input_ids is not None:\n # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility\n sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1\n sequence_lengths = sequence_lengths % input_ids.shape[-1]\n sequence_lengths = sequence_lengths.to(logits.device)\n else:\n sequence_lengths = -1\n\n pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]\n\n loss = None\n if labels is not None:\n labels = labels.to(logits.device)\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(pooled_logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(pooled_logits, labels)\n if not return_dict:\n output = (pooled_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutputWithPast(\n loss=loss,\n logits=pooled_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n", "output": ["rotate_half", "apply_rotary_pos_emb", "PersimmonPreTrainedModel", "PersimmonLinearScalingRotaryEmbedding", "PersimmonMLP", "PersimmonForSequenceClassification", "PersimmonDynamicNTKScalingRotaryEmbedding", "PersimmonDecoderLayer", "PersimmonRotaryEmbedding", "PersimmonModel", "PersimmonForCausalLM", "PersimmonAttention"], "metadata": {"file_path": "transformers-main/src/transformers/models/persimmon/modeling_persimmon.py", "file_length": 14538, "symbol_dict": [{"symbol": "rotate_half", "type": "mannual_defined_function", "byte_location": 6160, "location": 2084}, {"symbol": "apply_rotary_pos_emb", "type": "mannual_defined_function", "byte_location": 6427, "location": 2195}, {"symbol": "PersimmonPreTrainedModel", "type": "mannual_defined_class", "byte_location": 21416, "location": 6898}, {"symbol": "PersimmonMLP", "type": "mannual_defined_class", "byte_location": 8231, "location": 2765}, {"symbol": "PersimmonAttention", "type": "mannual_defined_class", "byte_location": 8770, "location": 2955}, {"symbol": "PersimmonForCausalLM", "type": "mannual_defined_class", "byte_location": 32768, "location": 10113}, {"symbol": "PersimmonRotaryEmbedding", "type": "mannual_defined_class", "byte_location": 1825, "location": 552}, {"symbol": "PersimmonModel", "type": "mannual_defined_class", "byte_location": 26552, "location": 8310}, {"symbol": "PersimmonDynamicNTKScalingRotaryEmbedding", "type": "mannual_defined_class", "byte_location": 4668, "location": 1562}, {"symbol": "PersimmonDecoderLayer", "type": "mannual_defined_class", "byte_location": 17219, "location": 5703}, {"symbol": "PersimmonLinearScalingRotaryEmbedding", "type": "mannual_defined_class", "byte_location": 3544, "location": 1175}, {"symbol": "PersimmonForSequenceClassification", "type": "mannual_defined_class", "byte_location": 42651, "location": 13120}]}} {"input": "# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch LiLT model.\"\"\"\n\nimport math\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer\nfrom ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings\nfrom .configuration_lilt import LiltConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"LiltConfig\"\n\nLILT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"SCUT-DLVCLab/lilt-roberta-en-base\",\n # See all LiLT models at https://huggingface.co/models?filter=lilt\n]\n\n\nclass LiltTextEmbeddings(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\n \"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False\n )\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n\n # End copy\n self.padding_idx = config.pad_token_id\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\n )\n\n def forward(\n self,\n input_ids=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n ):\n if position_ids is None:\n if input_ids is not None:\n # Create the position ids from the input token ids. Any padded tokens remain padded.\n position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to(\n input_ids.device\n )\n else:\n position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)\n\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings, position_ids\n\n def create_position_ids_from_input_ids(self, input_ids, padding_idx):\n \"\"\"\n Args:\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding\n symbols are ignored. This is modified from fairseq's `utils.make_positions`.\n x: torch.Tensor x:\n Returns: torch.Tensor\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask\n return incremental_indices.long() + padding_idx\n\n def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n \"\"\"\n Args:\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.:\n inputs_embeds: torch.Tensor\n Returns: torch.Tensor\n \"\"\"\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n\n position_ids = torch.arange(\n self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device\n )\n return position_ids.unsqueeze(0).expand(input_shape)\n\n\nclass LiltLayoutEmbeddings(nn.Module):\n def __init__(self, config):\n super().__init__()\n # we divide the hidden_size by 6 here as there are 6 different layout embeddings,\n # namely left_position, upper_position, right_position, lower_position, height, width\n self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)\n self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)\n self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)\n self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)\n\n self.padding_idx = config.pad_token_id\n self.box_position_embeddings = nn.Embedding(\n config.max_position_embeddings,\n config.hidden_size // config.channel_shrink_ratio,\n padding_idx=self.padding_idx,\n )\n self.box_linear_embeddings = nn.Linear(\n in_features=config.hidden_size, out_features=config.hidden_size // config.channel_shrink_ratio\n )\n self.LayerNorm = nn.LayerNorm(config.hidden_size // config.channel_shrink_ratio, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, bbox=None, position_ids=None):\n try:\n left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])\n upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])\n right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])\n lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])\n except IndexError as e:\n raise IndexError(\"The `bbox` coordinate values should be within 0-1000 range.\") from e\n\n h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])\n w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])\n\n spatial_position_embeddings = torch.cat(\n [\n left_position_embeddings,\n upper_position_embeddings,\n right_position_embeddings,\n lower_position_embeddings,\n h_position_embeddings,\n w_position_embeddings,\n ],\n dim=-1,\n )\n spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings)\n box_position_embeddings = self.box_position_embeddings(position_ids)\n\n spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings\n\n spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings)\n spatial_position_embeddings = self.dropout(spatial_position_embeddings)\n\n return spatial_position_embeddings\n\n\nclass LiltSelfAttention(nn.Module):\n def __init__(self, config, position_embedding_type=None):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.layout_query = nn.Linear(\n config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio\n )\n self.layout_key = nn.Linear(\n config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio\n )\n self.layout_value = nn.Linear(\n config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio\n )\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = position_embedding_type or getattr(\n config, \"position_embedding_type\", \"absolute\"\n )\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.channel_shrink_ratio = config.channel_shrink_ratio\n\n def transpose_for_scores(self, x, r=1):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size // r)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n layout_inputs,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n layout_value_layer = self.transpose_for_scores(self.layout_value(layout_inputs), r=self.channel_shrink_ratio)\n layout_key_layer = self.transpose_for_scores(self.layout_key(layout_inputs), r=self.channel_shrink_ratio)\n layout_query_layer = self.transpose_for_scores(self.layout_query(layout_inputs), r=self.channel_shrink_ratio)\n\n mixed_query_layer = self.query(hidden_states)\n\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n layout_attention_scores = torch.matmul(layout_query_layer, layout_key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n tmp_attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n tmp_layout_attention_scores = layout_attention_scores / math.sqrt(\n self.attention_head_size // self.channel_shrink_ratio\n )\n attention_scores = tmp_attention_scores + tmp_layout_attention_scores\n layout_attention_scores = tmp_layout_attention_scores + tmp_attention_scores\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n layout_attention_scores = layout_attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n layout_attention_probs = nn.Softmax(dim=-1)(layout_attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n layout_attention_probs = self.dropout(layout_attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n layout_attention_probs = layout_attention_probs * head_mask\n\n layout_context_layer = torch.matmul(layout_attention_probs, layout_value_layer)\n\n layout_context_layer = layout_context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = layout_context_layer.size()[:-2] + (self.all_head_size // self.channel_shrink_ratio,)\n layout_context_layer = layout_context_layer.view(*new_context_layer_shape)\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (\n ((context_layer, layout_context_layer), attention_probs)\n if output_attentions\n else ((context_layer, layout_context_layer),)\n )\n\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput\nclass LiltSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass LiltAttention(nn.Module):\n def __init__(self, config, position_embedding_type=None):\n super().__init__()\n self.self = LiltSelfAttention(config, position_embedding_type=position_embedding_type)\n self.output = LiltSelfOutput(config)\n self.pruned_heads = set()\n\n ori_hidden_size = config.hidden_size\n config.hidden_size = config.hidden_size // config.channel_shrink_ratio\n self.layout_output = LiltSelfOutput(config)\n config.hidden_size = ori_hidden_size\n\n # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n layout_inputs: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = False,\n ) -> Tuple[torch.Tensor]:\n self_outputs = self.self(\n hidden_states,\n layout_inputs,\n attention_mask,\n head_mask,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0][0], hidden_states)\n layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs)\n outputs = ((attention_output, layout_attention_output),) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass LiltIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput\nclass LiltOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass LiltLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = LiltAttention(config)\n self.intermediate = LiltIntermediate(config)\n self.output = LiltOutput(config)\n\n ori_hidden_size = config.hidden_size\n ori_intermediate_size = config.intermediate_size\n config.hidden_size = config.hidden_size // config.channel_shrink_ratio\n config.intermediate_size = config.intermediate_size // config.channel_shrink_ratio\n self.layout_intermediate = LiltIntermediate(config)\n self.layout_output = LiltOutput(config)\n config.hidden_size = ori_hidden_size\n config.intermediate_size = ori_intermediate_size\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n layout_inputs: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = False,\n ) -> Tuple[torch.Tensor]:\n self_attention_outputs = self.attention(\n hidden_states,\n layout_inputs,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n )\n attention_output = self_attention_outputs[0][0]\n layout_attention_output = self_attention_outputs[0][1]\n\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n layout_layer_output = apply_chunking_to_forward(\n self.layout_feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, layout_attention_output\n )\n outputs = ((layer_output, layout_layer_output),) + outputs\n\n return outputs\n\n # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n def layout_feed_forward_chunk(self, attention_output):\n intermediate_output = self.layout_intermediate(attention_output)\n layer_output = self.layout_output(intermediate_output, attention_output)\n return layer_output\n\n\nclass LiltEncoder(nn.Module):\n # Copied from transformers.models.bert.modeling_bert.BertEncoder.__init__ with Bert->Lilt\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([LiltLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n layout_inputs: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = False,\n output_hidden_states: Optional[bool] = False,\n return_dict: Optional[bool] = True,\n ) -> Union[Tuple[torch.Tensor], BaseModelOutput]:\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(\n layer_module.__call__,\n hidden_states,\n layout_inputs,\n attention_mask,\n layer_head_mask,\n output_attentions,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n layout_inputs,\n attention_mask,\n layer_head_mask,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0][0]\n layout_inputs = layer_outputs[0][1]\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n all_hidden_states,\n all_self_attentions,\n ]\n if v is not None\n )\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler\nclass LiltPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass LiltPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = LiltConfig\n base_model_prefix = \"lilt\"\n supports_gradient_checkpointing = True\n _no_split_modules = []\n\n # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nLILT_START_DOCSTRING = r\"\"\"\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`LiltConfig`]): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nLILT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n\n bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):\n Bounding boxes of each input sequence tokens. Selected in the range `[0,\n config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)\n format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,\n y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare LiLT Model transformer outputting raw hidden-states without any specific head on top.\",\n LILT_START_DOCSTRING,\n)\nclass LiltModel(LiltPreTrainedModel):\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = LiltTextEmbeddings(config)\n self.layout_embeddings = LiltLayoutEmbeddings(config)\n self.encoder = LiltEncoder(config)\n\n self.pooler = LiltPooler(config) if add_pooling_layer else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n bbox: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n token_type_ids: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:\n r\"\"\"\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import AutoTokenizer, AutoModel\n >>> from datasets import load_dataset\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n >>> model = AutoModel.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n\n >>> dataset = load_dataset(\"nielsr/funsd-layoutlmv3\", split=\"train\")\n >>> example = dataset[0]\n >>> words = example[\"tokens\"]\n >>> boxes = example[\"bboxes\"]\n\n >>> encoding = tokenizer(words, boxes=boxes, return_tensors=\"pt\")\n\n >>> outputs = model(**encoding)\n >>> last_hidden_states = outputs.last_hidden_state\n ```\"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n batch_size, seq_length = input_shape\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if bbox is None:\n bbox = torch.zeros(input_shape + (4,), dtype=torch.long, device=device)\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output, position_ids = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n )\n\n layout_embedding_output = self.layout_embeddings(bbox=bbox, position_ids=position_ids)\n\n encoder_outputs = self.encoder(\n embedding_output,\n layout_embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n LiLT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n \"\"\",\n LILT_START_DOCSTRING,\n)\nclass LiltForSequenceClassification(LiltPreTrainedModel):\n # Copied from transformers.models.roberta.modeling_roberta.RobertaForSequenceClassification.__init__ with Roberta->Lilt, roberta->lilt\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.lilt = LiltModel(config, add_pooling_layer=False)\n self.classifier = LiltClassificationHead(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n bbox: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification\n >>> from datasets import load_dataset\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n >>> model = AutoModelForSequenceClassification.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n\n >>> dataset = load_dataset(\"nielsr/funsd-layoutlmv3\", split=\"train\")\n >>> example = dataset[0]\n >>> words = example[\"tokens\"]\n >>> boxes = example[\"bboxes\"]\n\n >>> encoding = tokenizer(words, boxes=boxes, return_tensors=\"pt\")\n\n >>> outputs = model(**encoding)\n >>> predicted_class_idx = outputs.logits.argmax(-1).item()\n >>> predicted_class = model.config.id2label[predicted_class_idx]\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.lilt(\n input_ids,\n bbox=bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n # move labels to correct device to enable model parallelism\n labels = labels.to(logits.device)\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Lilt Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n LILT_START_DOCSTRING,\n)\nclass LiltForTokenClassification(LiltPreTrainedModel):\n # Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification.__init__ with Roberta->Lilt, roberta->lilt\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.lilt = LiltModel(config, add_pooling_layer=False)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n bbox: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import AutoTokenizer, AutoModelForTokenClassification\n >>> from datasets import load_dataset\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n >>> model = AutoModelForTokenClassification.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n\n >>> dataset = load_dataset(\"nielsr/funsd-layoutlmv3\", split=\"train\")\n >>> example = dataset[0]\n >>> words = example[\"tokens\"]\n >>> boxes = example[\"bboxes\"]\n\n >>> encoding = tokenizer(words, boxes=boxes, return_tensors=\"pt\")\n\n >>> outputs = model(**encoding)\n >>> predicted_class_indices = outputs.logits.argmax(-1)\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.lilt(\n input_ids,\n bbox=bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n # move labels to correct device to enable model parallelism\n labels = labels.to(logits.device)\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n# Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Lilt\nclass LiltClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n Lilt Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n LILT_START_DOCSTRING,\n)\nclass LiltForQuestionAnswering(LiltPreTrainedModel):\n # Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering.__init__ with Roberta->Lilt, roberta->lilt\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.lilt = LiltModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n bbox: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n start_positions: Optional[torch.LongTensor] = None,\n end_positions: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:\n r\"\"\"\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import AutoTokenizer, AutoModelForQuestionAnswering\n >>> from datasets import load_dataset\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n >>> model = AutoModelForQuestionAnswering.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n\n >>> dataset = load_dataset(\"nielsr/funsd-layoutlmv3\", split=\"train\")\n >>> example = dataset[0]\n >>> words = example[\"tokens\"]\n >>> boxes = example[\"bboxes\"]\n\n >>> encoding = tokenizer(words, boxes=boxes, return_tensors=\"pt\")\n\n >>> outputs = model(**encoding)\n\n >>> answer_start_index = outputs.start_logits.argmax()\n >>> answer_end_index = outputs.end_logits.argmax()\n\n >>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1]\n >>> predicted_answer = tokenizer.decode(predict_answer_tokens)\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.lilt(\n input_ids,\n bbox=bbox,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n", "output": ["LiltTextEmbeddings", "LiltLayoutEmbeddings", "LiltAttention", "LiltClassificationHead", "LiltIntermediate", "LiltPreTrainedModel", "LiltSelfOutput", "LiltEncoder", "LiltOutput", "LiltForSequenceClassification", "LiltForTokenClassification", "LiltPooler", "LiltForQuestionAnswering", "LiltSelfAttention", "LiltLayer", "LiltModel"], "metadata": {"file_path": "transformers-main/src/transformers/models/lilt/modeling_lilt.py", "file_length": 15895, "symbol_dict": [{"symbol": "LiltOutput", "type": "mannual_defined_class", "byte_location": 19203, "location": 6052}, {"symbol": "LiltForQuestionAnswering", "type": "mannual_defined_class", "byte_location": 47401, "location": 14314}, {"symbol": "LiltIntermediate", "type": "mannual_defined_class", "byte_location": 18571, "location": 5854}, {"symbol": "LiltClassificationHead", "type": "mannual_defined_class", "byte_location": 46344, "location": 13978}, {"symbol": "LiltEncoder", "type": "mannual_defined_class", "byte_location": 22346, "location": 6994}, {"symbol": "LiltForSequenceClassification", "type": "mannual_defined_class", "byte_location": 37249, "location": 11307}, {"symbol": "LiltModel", "type": "mannual_defined_class", "byte_location": 30922, "location": 9386}, {"symbol": "LiltPreTrainedModel", "type": "mannual_defined_class", "byte_location": 25532, "location": 7897}, {"symbol": "LiltAttention", "type": "mannual_defined_class", "byte_location": 16258, "location": 5109}, {"symbol": "LiltForTokenClassification", "type": "mannual_defined_class", "byte_location": 42349, "location": 12794}, {"symbol": "LiltLayoutEmbeddings", "type": "mannual_defined_class", "byte_location": 5560, "location": 1711}, {"symbol": "LiltSelfAttention", "type": "mannual_defined_class", "byte_location": 8422, "location": 2638}, {"symbol": "LiltTextEmbeddings", "type": "mannual_defined_class", "byte_location": 1596, "location": 510}, {"symbol": "LiltSelfOutput", "type": "mannual_defined_class", "byte_location": 15649, "location": 4909}, {"symbol": "LiltLayer", "type": "mannual_defined_class", "byte_location": 19814, "location": 6252}, {"symbol": "LiltPooler", "type": "mannual_defined_class", "byte_location": 24970, "location": 7718}]}} {"input": "# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch M-CTC-T model.\"\"\"\n\n\nimport math\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\n\nfrom ....activations import ACT2FN\nfrom ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ....integrations.deepspeed import is_deepspeed_zero3_enabled\nfrom ....modeling_attn_mask_utils import _prepare_4d_attention_mask\nfrom ....modeling_outputs import BaseModelOutput, CausalLMOutput\nfrom ....modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ....utils import logging\nfrom .configuration_mctct import MCTCTConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_HIDDEN_STATES_START_POSITION = 1\n\n_CONFIG_FOR_DOC = \"MCTCTConfig\"\n\n# Base docstring\n_CHECKPOINT_FOR_DOC = \"speechbrain/m-ctc-t-large\"\n_EXPECTED_OUTPUT_SHAPE = [1, 195, 1536]\n\n# CTC docstring\n_CTC_EXPECTED_OUTPUT = '\"Mr. Quilter is the apostle of the middle classes, and we\\'re glad to welcome his gospel.\"'\n_CTC_EXPECTED_LOSS = 1885.65\n\n\nMCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"speechbrain/m-ctc-t-large\",\n # See all M-CTC-T models at https://huggingface.co/models?filter=mctct\n]\n\n\nclass MCTCTConv1dSubsampler(nn.Module):\n \"\"\"\n Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation\n via gated linear units (https://arxiv.org/abs/1911.08460)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.glu_dim = config.conv_glu_dim\n\n self.dropout = nn.Dropout(config.conv_dropout)\n\n self.num_layers = config.num_conv_layers\n self.in_channels = config.input_feat_per_channel * config.input_channels\n\n if self.num_layers > 1:\n if config.conv_channels is None:\n raise ValueError(\n \"Need to specify `conv_channels` configuration in `MCTCTConfig` to use multiple convolution\"\n \" layers.\"\n )\n\n self.mid_channels = config.conv_channels\n else:\n self.mid_channels = None\n\n self.out_channels = config.hidden_size * 2 # considering GLU halving\n self.kernel_size = config.conv_kernel\n self.stride = config.conv_stride\n\n # NOTE: MCTCT by construction only uses one convolution kernel. I've made this flexible to allow for\n # multiple layers of convolutions, but not sure if this model definition should just restrict it\n # to one layer. This becomes especially relevant when considering the padding like line 1 of forward().\n self.conv_layers = nn.ModuleList(\n nn.Conv1d(\n self.in_channels if i == 0 else self.mid_channels[i],\n self.mid_channels[i] if i < self.num_layers - 1 else self.out_channels,\n kernel_size=k,\n stride=self.stride[i],\n padding=\"valid\",\n )\n for i, k in enumerate(self.kernel_size)\n )\n\n def forward(self, input_features):\n # NOTE: in reference to the NOTE in __init__, right now it just calculates padding as if\n # there will be just one conv layer.\n padding = sum([size // 2 for size in self.kernel_size]) # (7, 7) -> (3, 3)\n\n input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), \"constant\", 0)\n hidden_states = input_features.transpose(1, 2).contiguous() # -> Batch x Frame x Time\n for conv in self.conv_layers:\n hidden_states = conv(hidden_states)\n hidden_states = nn.functional.glu(hidden_states, dim=self.glu_dim)\n hidden_states = self.dropout(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2).contiguous() # -> Batch x Time x Frame\n return hidden_states\n\n\nclass MCTCTEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n # self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.LayerNorm = MCTCTLayerNorm()\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\n \"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False\n )\n self.register_buffer(\n \"token_type_ids\",\n torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),\n persistent=False,\n )\n\n def forward(\n self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n input_shape = input_features.size() if input_features is not None else inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n\n # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs\n # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves\n # issue #5664\n if token_type_ids is None:\n if hasattr(self, \"token_type_ids\"):\n buffered_token_type_ids = self.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_features)\n\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass MCTCTSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = config.attention_head_dim\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False)\n self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)\n self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def reshape_fortran(self, x, shape):\n if len(x.shape) > 0:\n x = x.permute(*reversed(range(len(x.shape))))\n return x.reshape(*reversed(shape)).permute(*reversed(range(len(shape))))\n\n def relative_position_embedding_rotate(self, scores):\n # NOTE: should re-evaluate whether this re-implementation was truly necessary\n # or the reason why my complete re-haul worked was due to some other part\n # of the code. Adding this and the reshape fortrain code seems very undesirable.\n scores = scores.permute(0, 2, 3, 1) # e.g. [10, 1839, 14, 4]\n\n batch, hidden_state, seq_len, heads = scores.shape\n\n # e.g. [10, 1853, 14, 4]\n scores = torch.cat((scores, torch.zeros((batch, seq_len, seq_len, heads), device=scores.device)), dim=1)\n\n # e.g. [10, 25942, 1, 4]\n scores = self.reshape_fortran(scores, [batch, (hidden_state + seq_len) * seq_len, 1, heads])\n\n # e.g. [10, 25928, 1, 4]\n scores = scores[:, : (seq_len + hidden_state - 1) * seq_len]\n\n # e.g. [10, 1852, 14, 4]\n scores = self.reshape_fortran(scores, [batch, hidden_state + seq_len - 1, seq_len, heads])\n\n halfpoint = hidden_state // 2\n scores = scores[:, halfpoint : halfpoint + seq_len].transpose(1, 2) # e.g. [10, 14, 14, 4]\n\n return scores.permute(0, 3, 1, 2)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n mixed_query_layer = mixed_query_layer / math.sqrt(self.attention_head_size)\n\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n # relative key position embeddings\n positional_embedding = self.distance_embedding.weight\n relative_position_scores = torch.einsum(\"lh, bche -> bcle\", positional_embedding, query_layer.transpose(2, 3))\n\n relative_position_scores = self.relative_position_embedding_rotate(relative_position_scores)\n attention_scores = attention_scores + relative_position_scores\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in MCTCTModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).flatten(start_dim=-2)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n return outputs\n\n\nclass MCTCTLayerNorm(nn.Module):\n def __init__(self):\n super().__init__()\n self.singleton_weight = nn.Parameter(torch.ones(1))\n self.singleton_bias = nn.Parameter(torch.zeros(1))\n\n def forward(self, hidden_states):\n return (hidden_states * self.singleton_weight) + self.singleton_bias\n\n\nclass MCTCTSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass MCTCTAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = MCTCTSelfAttention(config)\n self.output = MCTCTSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n\n return outputs\n\n\nclass MCTCTIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass MCTCTOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass MCTCTLayer(nn.Module):\n def __init__(self, config: MCTCTConfig):\n super().__init__()\n\n self.seq_len_dim = 1\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n\n self.intermediate = MCTCTIntermediate(config)\n self.attention = MCTCTAttention(config)\n self.is_decoder = config.is_decoder\n self.output = MCTCTOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n self_attention_outputs = self.attention(\n hidden_states, attention_mask, head_mask, output_attentions=output_attentions\n )\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n\n outputs = (layer_output,) + outputs\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass MCTCTPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = MCTCTConfig\n base_model_prefix = \"mctct\"\n main_input_name = \"input_features\"\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n std = self.config.initializer_range\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, MCTCTLayerNorm):\n module.singleton_weight.data.fill_(1.0)\n module.singleton_bias.data.zero_()\n if isinstance(module, (nn.Linear, nn.Conv1d)):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n\n def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):\n \"\"\"\n Computes the output length of the convolutional layers\n \"\"\"\n dilation = 1\n for _, kernel_sz, stride in zip(\n range(self.config.num_conv_layers), self.config.conv_kernel, self.config.conv_stride\n ):\n padding = kernel_sz // 2\n input_lengths = input_lengths + 2 * padding - dilation * (kernel_sz - 1) - 1\n input_lengths = torch.div(input_lengths, stride, rounding_mode=\"trunc\") + 1\n\n return input_lengths\n\n def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):\n # generate creates 3D attention mask, because of the shape of input_features\n # convert it to 2D if thats the case\n if len(attention_mask.shape) > 2:\n attention_mask = attention_mask[:, :, -1]\n\n # subsampled_lengths = attention_mask.sum(-1)\n subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))\n bsz = attention_mask.size()[0]\n attention_mask = torch.zeros(\n (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device\n )\n\n # these two operations makes sure that all values\n # before the output lengths indices are attended to\n attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1\n attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()\n return attention_mask\n\n\nMCTCT_START_DOCSTRING = r\"\"\"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MCTCTConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nMCTCT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_features (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`Wav2Vec2CTCTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\nclass MCTCTEncoder(MCTCTPreTrainedModel):\n def __init__(self, config: MCTCTConfig):\n super().__init__(config)\n self.hidden_dropout_prob = config.hidden_dropout_prob\n\n self.layer_norm = MCTCTLayerNorm()\n self.conv = MCTCTConv1dSubsampler(config)\n self.layers = nn.ModuleList([MCTCTLayer(config) for _ in range(config.num_hidden_layers)])\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n input_features: torch.Tensor,\n attention_mask: torch.Tensor,\n head_mask: torch.Tensor,\n output_attentions: bool = False,\n output_hidden_states: bool = False,\n return_dict: bool = True,\n ) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n input_features = self.layer_norm(input_features)\n\n inputs_embeds = self.conv(input_features)\n\n # subsample attention mask if necessary\n if attention_mask is not None:\n attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)\n\n hidden_states = nn.functional.dropout(inputs_embeds, p=self.hidden_dropout_prob, training=self.training)\n\n # expand attention_mask\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)\n\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n # check if head_mask has a correct number of layers specified if desired\n if head_mask is not None:\n if head_mask.size()[0] != len(self.layers):\n raise ValueError(\n f\"The head_mask should be specified for {len(self.layers)} layers, \"\n f\"but it is for {head_mask.size()[0]}.\"\n )\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = torch.rand([])\n\n skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(\n encoder_layer.__call__,\n hidden_states,\n attention_mask,\n (head_mask[idx] if head_mask is not None else None),\n output_attentions,\n )\n else:\n layer_outputs = encoder_layer(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions\n )\n\n\n@add_start_docstrings(\n \"The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.\",\n MCTCT_START_DOCSTRING,\n)\nclass MCTCTModel(MCTCTPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n\n self.encoder = MCTCTEncoder(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_EXPECTED_OUTPUT_SHAPE,\n )\n def forward(\n self,\n input_features: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_features is None:\n raise ValueError(\"You have to specify input_features.\")\n\n encoder_outputs = self.encoder(\n input_features,\n attention_mask=attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[1:]\n\n return BaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\"\"\",\n MCTCT_START_DOCSTRING,\n)\nclass MCTCTForCTC(MCTCTPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.mctct = MCTCTModel(config)\n\n if config.vocab_size is None:\n raise ValueError(\n f\"You are trying to instantiate {self.__class__} with a configuration that \"\n \"does not define the vocabulary size of the language model head. Please \"\n \"instantiate the model as follows: `MCTCTForCTC.from_pretrained(..., vocab_size=vocab_size)`. \"\n \"or define `vocab_size` of your model's configuration.\"\n )\n output_hidden_size = config.hidden_size\n\n self.ctc_head = nn.Linear(output_hidden_size, config.vocab_size)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=CausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n expected_output=_CTC_EXPECTED_OUTPUT,\n expected_loss=_CTC_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_features: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.LongTensor] = None,\n ) -> Union[Tuple, CausalLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):\n Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to\n the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.mctct(\n input_features,\n attention_mask=attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n\n logits = self.ctc_head(hidden_states)\n\n loss = None\n if labels is not None:\n if labels.max() >= self.config.vocab_size:\n raise ValueError(f\"Label values must be <= vocab_size: {self.config.vocab_size}\")\n\n # retrieve loss input_lengths from attention_mask\n attention_mask = (\n attention_mask\n if attention_mask is not None\n else torch.ones(input_features.shape[:-1], dtype=torch.long)\n )\n input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)\n # assuming that padded tokens are filled with -100\n # when not being attended to\n labels_mask = labels >= 0\n target_lengths = labels_mask.sum(-1)\n flattened_targets = labels.masked_select(labels_mask)\n\n # ctc_loss doesn't support fp16\n log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)\n\n with torch.backends.cudnn.flags(enabled=False):\n loss = nn.functional.ctc_loss(\n log_probs,\n flattened_targets,\n input_lengths,\n target_lengths,\n blank=self.config.pad_token_id,\n reduction=self.config.ctc_loss_reduction,\n zero_infinity=self.config.ctc_zero_infinity,\n )\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n", "output": ["MCTCTIntermediate", "MCTCTSelfOutput", "MCTCTOutput", "MCTCTPreTrainedModel", "MCTCTForCTC", "MCTCTAttention", "MCTCTModel", "MCTCTConv1dSubsampler", "MCTCTEncoder", "MCTCTEmbeddings", "MCTCTLayer", "MCTCTSelfAttention", "MCTCTLayerNorm"], "metadata": {"file_path": "transformers-main/src/transformers/models/deprecated/mctct/modeling_mctct.py", "file_length": 10161, "symbol_dict": [{"symbol": "MCTCTAttention", "type": "mannual_defined_class", "byte_location": 13003, "location": 4215}, {"symbol": "MCTCTSelfOutput", "type": "mannual_defined_class", "byte_location": 12396, "location": 4022}, {"symbol": "MCTCTLayer", "type": "mannual_defined_class", "byte_location": 15698, "location": 5081}, {"symbol": "MCTCTPreTrainedModel", "type": "mannual_defined_class", "byte_location": 16992, "location": 5465}, {"symbol": "MCTCTModel", "type": "mannual_defined_class", "byte_location": 26514, "location": 8243}, {"symbol": "MCTCTLayerNorm", "type": "mannual_defined_class", "byte_location": 12075, "location": 3912}, {"symbol": "MCTCTIntermediate", "type": "mannual_defined_class", "byte_location": 14567, "location": 4725}, {"symbol": "MCTCTConv1dSubsampler", "type": "mannual_defined_class", "byte_location": 1883, "location": 668}, {"symbol": "MCTCTForCTC", "type": "mannual_defined_class", "byte_location": 28725, "location": 8899}, {"symbol": "MCTCTOutput", "type": "mannual_defined_class", "byte_location": 15118, "location": 4895}, {"symbol": "MCTCTEncoder", "type": "mannual_defined_class", "byte_location": 22251, "location": 7012}, {"symbol": "MCTCTSelfAttention", "type": "mannual_defined_class", "byte_location": 7327, "location": 2315}, {"symbol": "MCTCTEmbeddings", "type": "mannual_defined_class", "byte_location": 4526, "location": 1451}]}} {"input": "# coding=utf-8\n# Copyright 2021 The Fairseq Authors The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 XGLM model.\"\"\"\n\n\nfrom __future__ import annotations\n\nimport math\nimport random\nfrom typing import Any, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\n\n# Public API\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions, TFCausalLMOutputWithCrossAttentions\nfrom ...modeling_tf_utils import (\n TFCausalLanguageModelingLoss,\n TFModelInputType,\n TFPreTrainedModel,\n TFSharedEmbeddings,\n get_initializer,\n keras,\n keras_serializable,\n unpack_inputs,\n)\nfrom ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax\nfrom ...utils import logging\nfrom .configuration_xglm import XGLMConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"facebook/xglm-564M\"\n_CONFIG_FOR_DOC = \"XGLMConfig\"\n\n\nTF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/xglm-564M\",\n # See all XGLM models at https://huggingface.co/models?filter=xglm\n]\n\n\nLARGE_NEGATIVE = -1e8\n\n\ndef create_sinusoidal_positions(num_positions: int, embedding_dim: int, padding_idx: Optional[int]) -> tf.Tensor:\n half_dim = embedding_dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = tf.exp(tf.range(half_dim, dtype=tf.float32) * -emb)\n emb = tf.expand_dims(tf.range(num_positions, dtype=tf.float32), axis=1) * tf.expand_dims(emb, axis=0)\n emb = tf.reshape(tf.concat([tf.sin(emb), tf.cos(emb)], axis=1), (num_positions, -1))\n if embedding_dim % 2 == 1:\n # zero pad\n emb = tf.concat([emb, tf.zeros((num_positions, 1))], axis=1)\n if padding_idx is not None:\n _padding_mask = tf.concat(\n [\n tf.ones((padding_idx, shape_list(emb)[1])),\n tf.zeros((1, shape_list(emb)[1])),\n tf.ones((shape_list(emb)[0] - padding_idx - 1, shape_list(emb)[1])),\n ],\n axis=0,\n )\n emb *= _padding_mask\n\n return tf.constant(emb, name=\"embed_positions\")\n\n\ndef _create_position_ids_from_input_ids(\n input_ids: tf.Tensor, past_key_values_length: int, padding_idx: Optional[int]\n) -> tf.Tensor:\n \"\"\"\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\n are ignored. This is modified from fairseq's `utils.make_positions`.\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = tf.where(input_ids != padding_idx, 1, 0)\n incremental_indices = (tf.cast(tf.cumsum(mask, axis=1), dtype=mask.dtype) + past_key_values_length) * mask\n return tf.cast(incremental_indices, dtype=tf.int64) + padding_idx\n\n\ndef _create_position_ids_from_inputs_embeds(\n inputs_embeds: tf.Tensor, past_key_values_length: int, padding_idx: Optional[int]\n) -> tf.Tensor:\n \"\"\"\n Args:\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n inputs_embeds: tf.Tensor\n Returns: tf.Tensor\n \"\"\"\n input_shape = shape_list(inputs_embeds)[:-1]\n sequence_length = input_shape[1]\n\n position_ids = tf.range(padding_idx + 1, sequence_length + padding_idx + 1, dtype=tf.int64)\n\n return tf.broadcast_to(tf.expand_dims(position_ids, axis=0), input_shape) + past_key_values_length\n\n\n# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask\ndef _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n bsz = input_ids_shape[0]\n tgt_len = input_ids_shape[1]\n mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE\n mask_cond = tf.range(shape_list(mask)[-1])\n\n mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)\n\n if past_key_values_length > 0:\n mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)\n\n return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))\n\n\n# Copied from transformers.models.bart.modeling_tf_bart._expand_mask\ndef _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n src_len = shape_list(mask)[1]\n tgt_len = tgt_len if tgt_len is not None else src_len\n one_cst = tf.constant(1.0)\n mask = tf.cast(mask, dtype=one_cst.dtype)\n expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))\n\n return (one_cst - expanded_mask) * LARGE_NEGATIVE\n\n\n# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->XGLM\nclass TFXGLMAttention(keras.layers.Layer):\n \"\"\"Multi-headed attention from \"Attention Is All You Need\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n\n self.num_heads = num_heads\n self.dropout = keras.layers.Dropout(dropout)\n self.head_dim = embed_dim // num_heads\n if (self.head_dim * num_heads) != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}\"\n f\" and `num_heads`: {num_heads}).\"\n )\n self.scaling = self.head_dim**-0.5\n self.is_decoder = is_decoder\n\n self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name=\"k_proj\")\n self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name=\"q_proj\")\n self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name=\"v_proj\")\n self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name=\"out_proj\")\n\n def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):\n return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))\n\n def call(\n self,\n hidden_states: tf.Tensor,\n key_value_states: tf.Tensor | None = None,\n past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,\n attention_mask: tf.Tensor | None = None,\n layer_head_mask: tf.Tensor | None = None,\n training: Optional[bool] = False,\n ) -> Tuple[tf.Tensor, tf.Tensor | None]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n bsz, tgt_len, embed_dim = shape_list(hidden_states)\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = tf.concat([past_key_value[0], key_states], axis=2)\n value_states = tf.concat([past_key_value[1], value_states], axis=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)\n key_states = tf.reshape(key_states, proj_shape)\n value_states = tf.reshape(value_states, proj_shape)\n\n src_len = shape_list(key_states)[1]\n attn_weights = tf.matmul(query_states, key_states, transpose_b=True)\n\n tf.debugging.assert_equal(\n shape_list(attn_weights),\n [bsz * self.num_heads, tgt_len, src_len],\n message=(\n f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is\"\n f\" {shape_list(attn_weights)}\"\n ),\n )\n\n if attention_mask is not None:\n tf.debugging.assert_equal(\n shape_list(attention_mask),\n [bsz, 1, tgt_len, src_len],\n message=(\n f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is\"\n f\" {shape_list(attention_mask)}\"\n ),\n )\n\n attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)\n attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask\n attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))\n\n attn_weights = stable_softmax(attn_weights, axis=-1)\n\n if layer_head_mask is not None:\n tf.debugging.assert_equal(\n shape_list(layer_head_mask),\n [self.num_heads],\n message=(\n f\"Head mask for a single layer should be of size {(self.num_heads)}, but is\"\n f\" {shape_list(layer_head_mask)}\"\n ),\n )\n\n attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(\n attn_weights, (bsz, self.num_heads, tgt_len, src_len)\n )\n attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))\n\n attn_probs = self.dropout(attn_weights, training=training)\n attn_output = tf.matmul(attn_probs, value_states)\n\n tf.debugging.assert_equal(\n shape_list(attn_output),\n [bsz * self.num_heads, tgt_len, self.head_dim],\n message=(\n f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is\"\n f\" {shape_list(attn_output)}\"\n ),\n )\n\n attn_output = tf.transpose(\n tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)\n )\n attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))\n\n attn_output = self.out_proj(attn_output)\n attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))\n\n return attn_output, attn_weights, past_key_value\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"k_proj\", None) is not None:\n with tf.name_scope(self.k_proj.name):\n self.k_proj.build([None, None, self.embed_dim])\n if getattr(self, \"q_proj\", None) is not None:\n with tf.name_scope(self.q_proj.name):\n self.q_proj.build([None, None, self.embed_dim])\n if getattr(self, \"v_proj\", None) is not None:\n with tf.name_scope(self.v_proj.name):\n self.v_proj.build([None, None, self.embed_dim])\n if getattr(self, \"out_proj\", None) is not None:\n with tf.name_scope(self.out_proj.name):\n self.out_proj.build([None, None, self.embed_dim])\n\n\nclass TFXGLMDecoderLayer(keras.layers.Layer):\n def __init__(self, config: XGLMConfig, **kwargs: Any) -> None:\n super().__init__(**kwargs)\n self.embed_dim = config.d_model\n self.self_attn = TFXGLMAttention(\n embed_dim=self.embed_dim,\n num_heads=config.attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n name=\"self_attn\",\n )\n self.dropout = keras.layers.Dropout(config.dropout)\n self.activation_fn = get_tf_activation(config.activation_function)\n self.activation_dropout = keras.layers.Dropout(config.activation_dropout)\n\n if config.add_cross_attention:\n self.encoder_attn = TFXGLMAttention(\n embed_dim=self.embed_dim,\n num_heads=config.attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n name=\"encoder_attn\",\n )\n self.encoder_attn_layer_norm = keras.layers.LayerNormalization(\n epsilon=1e-5, name=\"encoder_attn_layer_norm\"\n )\n\n self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name=\"self_attn_layer_norm\")\n self.fc1 = keras.layers.Dense(config.ffn_dim, name=\"fc1\")\n self.fc2 = keras.layers.Dense(self.embed_dim, name=\"fc2\")\n self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name=\"final_layer_norm\")\n self.config = config\n\n # Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer.call\n def call(\n self,\n hidden_states: tf.Tensor,\n attention_mask: tf.Tensor | None = None,\n encoder_hidden_states: tf.Tensor | None = None,\n encoder_attention_mask: tf.Tensor | None = None,\n layer_head_mask: tf.Tensor | None = None,\n cross_attn_layer_head_mask: tf.Tensor | None = None,\n past_key_value: Tuple[tf.Tensor] | None = None,\n training: Optional[bool] = False,\n ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:\n \"\"\"\n Args:\n hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*\n attention_mask (`tf.Tensor`): attention mask of size\n *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\n encoder_hidden_states (`tf.Tensor`):\n cross attention input to the layer of shape *(batch, seq_len, embed_dim)*\n encoder_attention_mask (`tf.Tensor`): encoder attention mask of size\n *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.\n layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n *(decoder_attention_heads,)*\n cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.\n *(decoder_attention_heads,)*\n past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Self Attention\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n # add present self-attn cache to positions 1,2 of present_key_value tuple\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n past_key_value=self_attn_past_key_value,\n attention_mask=attention_mask,\n layer_head_mask=layer_head_mask,\n )\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n # Cross-Attention Block\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n\n # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(\n hidden_states=hidden_states,\n key_value_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n layer_head_mask=cross_attn_layer_head_mask,\n past_key_value=cross_attn_past_key_value,\n )\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n # add cross-attn to positions 3,4 of present_key_value tuple\n present_key_value = present_key_value + cross_attn_present_key_value\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.activation_dropout(hidden_states, training=training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n return (\n hidden_states,\n self_attn_weights,\n cross_attn_weights,\n present_key_value,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"self_attn\", None) is not None:\n with tf.name_scope(self.self_attn.name):\n self.self_attn.build(None)\n if getattr(self, \"self_attn_layer_norm\", None) is not None:\n with tf.name_scope(self.self_attn_layer_norm.name):\n self.self_attn_layer_norm.build([None, None, self.embed_dim])\n if getattr(self, \"fc1\", None) is not None:\n with tf.name_scope(self.fc1.name):\n self.fc1.build([None, None, self.embed_dim])\n if getattr(self, \"fc2\", None) is not None:\n with tf.name_scope(self.fc2.name):\n self.fc2.build([None, None, self.config.ffn_dim])\n if getattr(self, \"final_layer_norm\", None) is not None:\n with tf.name_scope(self.final_layer_norm.name):\n self.final_layer_norm.build([None, None, self.embed_dim])\n if getattr(self, \"encoder_attn\", None) is not None:\n with tf.name_scope(self.encoder_attn.name):\n self.encoder_attn.build(None)\n if getattr(self, \"encoder_attn_layer_norm\", None) is not None:\n with tf.name_scope(self.encoder_attn_layer_norm.name):\n self.encoder_attn_layer_norm.build([None, None, self.embed_dim])\n\n\n@keras_serializable\nclass TFXGLMMainLayer(keras.layers.Layer):\n config_class = XGLMConfig\n\n def __init__(\n self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs, **kwargs: Any\n ) -> None:\n super().__init__(*inputs, **kwargs)\n\n self.config = config\n self.padding_idx = config.pad_token_id\n self.max_target_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = TFSharedEmbeddings(\n config.vocab_size, config.d_model, self.padding_idx, name=\"embed_tokens\"\n )\n\n self.offset = 2\n self._embed_positions_weights = create_sinusoidal_positions(\n num_positions=config.max_position_embeddings + self.offset,\n embedding_dim=config.d_model,\n padding_idx=config.pad_token_id,\n )\n\n self.dropout = keras.layers.Dropout(config.dropout)\n self.layers = [TFXGLMDecoderLayer(config, name=f\"layers.{i}\") for i in range(config.num_layers)]\n self.layerdrop = config.layerdrop\n self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name=\"layer_norm\")\n\n def get_input_embeddings(self) -> TFSharedEmbeddings:\n return self.embed_tokens\n\n def set_input_embeddings(self, value: TFSharedEmbeddings) -> None:\n self.embed_tokens = value\n\n def _prepare_decoder_attention_mask(\n self,\n attention_mask: tf.Tensor | None,\n input_shape: tf.TensorShape,\n past_key_values_length: int,\n ) -> tf.Tensor:\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length)\n combined_attention_mask = tf.cond(\n input_shape[-1] > 1, lambda: combined_attention_mask, lambda: tf.ones_like(combined_attention_mask)\n )\n if attention_mask is None:\n return combined_attention_mask\n expand_attention_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1])\n return expand_attention_mask + combined_attention_mask\n\n def embed_positions(self, position_ids: np.ndarray | tf.Tensor | None = None) -> tf.Tensor:\n position_ids += self.offset\n positions = tf.gather(self._embed_positions_weights, position_ids, axis=0)\n return positions\n\n @unpack_inputs\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n encoder_hidden_states: np.ndarray | tf.Tensor | None = None,\n encoder_attention_mask: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n **kwargs: Any,\n ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = tf.shape(input_ids)\n input_ids = tf.reshape(input_ids, (-1, input_shape[-1]))\n elif inputs_embeds is not None:\n input_shape = tf.shape(inputs_embeds)[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if position_ids is None:\n position_ids = tf.expand_dims(\n tf.range(past_key_values_length, input_shape[-1] + past_key_values_length), axis=0\n )\n position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])\n\n if inputs_embeds is None:\n check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size)\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n\n attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length)\n\n # expand encoder attention mask\n if encoder_hidden_states is not None and encoder_attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])\n\n # embed positions\n positions = self.embed_positions(position_ids)\n\n hidden_states = tf.cast(inputs_embeds, dtype=tf.float32) + positions\n\n hidden_states = self.dropout(hidden_states, training=training)\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None\n next_decoder_cache = () if use_cache else None\n\n # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired\n for attn_mask_name, attn_mask in [(\"head_mask\", head_mask), (\"cross_attn_head_mask\", cross_attn_head_mask)]:\n if attn_mask is not None:\n tf.debugging.assert_equal(\n shape_list(attn_mask)[0],\n len(self.layers),\n message=(\n f\"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for\"\n f\" {shape_list(attn_mask)[0]}.\"\n ),\n )\n\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n dropout_probability = random.uniform(0, 1)\n if training and (dropout_probability < self.layerdrop):\n continue\n\n past_key_value = past_key_values[idx] if past_key_values is not None else None\n\n hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(\n hidden_states,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),\n past_key_value=past_key_value,\n )\n\n if use_cache:\n next_decoder_cache += (present_key_value,)\n\n if output_attentions:\n all_self_attns += (layer_self_attn,)\n\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_cross_attn,)\n\n hidden_states = self.layer_norm(hidden_states)\n\n # add hidden states from the last decoder layer\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n next_cache = next_decoder_cache if use_cache else None\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]\n if v is not None\n )\n return TFBaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n cross_attentions=all_cross_attentions,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"layer_norm\", None) is not None:\n with tf.name_scope(self.layer_norm.name):\n self.layer_norm.build([None, None, self.config.d_model])\n if getattr(self, \"embed_tokens\", None) is not None:\n with tf.name_scope(self.embed_tokens.name):\n self.embed_tokens.build(None)\n if getattr(self, \"layers\", None) is not None:\n for layer in self.layers:\n with tf.name_scope(layer.name):\n layer.build(None)\n\n\nclass TFXGLMPreTrainedModel(TFPreTrainedModel):\n config_class = XGLMConfig\n base_model_prefix = \"model\"\n\n\nXGLM_START_DOCSTRING = r\"\"\"\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TensorFlow models and layers in `transformers` accept two formats as input:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional argument.\n\n The reason the second format is supported is that Keras methods prefer this format when passing inputs to models\n and layers. Because of this support, when using methods like `model.fit()` things should \"just work\" for you - just\n pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second\n format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with\n the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first\n positional argument:\n\n - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n Note that when creating models and layers with\n [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry\n about any of this, as you can just pass inputs like you would to any other Python function!\n\n \n\n Args:\n config ([`XGLMConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nXGLM_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`tf.Tensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`tf.Tensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of\n the decoder.\n encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`tf.Tensor` of shape `(num_layers, attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`tf.Tensor` of shape `(num_layers, attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.num_layers`)\n contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*, defaults to `True`):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`). Set to `False` during training, `True` during generation\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare XGLM Model transformer outputting raw hidden-states without any specific head on top.\",\n XGLM_START_DOCSTRING,\n)\nclass TFXGLMModel(TFXGLMPreTrainedModel):\n \"\"\"\n Transformer decoder consisting of *config.num_layers* layers. Each layer is a [`TFXGLMDecoderLayer`]\n\n Args:\n config: XGLMConfig\n embed_tokens: [TFSharedEmbeddings]: output embedding\n \"\"\"\n\n def __init__(\n self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs: Any, **kwargs: Any\n ) -> None:\n super().__init__(config, *inputs, **kwargs)\n\n self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name=\"model\")\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFBaseModelOutputWithPastAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n encoder_hidden_states: np.ndarray | tf.Tensor | None = None,\n encoder_attention_mask: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n **kwargs: Any,\n ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n head_mask=head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n return outputs\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"model\", None) is not None:\n with tf.name_scope(self.model.name):\n self.model.build(None)\n\n\n@add_start_docstrings(\n \"\"\"\n The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n XGLM_START_DOCSTRING,\n)\nclass TFXGLMForCausalLM(TFXGLMPreTrainedModel, TFCausalLanguageModelingLoss):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"model.embed_positions.weights\",\n r\"lm_head.weight\",\n ]\n _keys_to_ignore_on_save = [\n r\"model.embed_positions.weights\",\n ]\n\n def __init__(\n self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs: Any, **kwargs: Any\n ) -> None:\n super().__init__(config, *inputs, **kwargs)\n\n self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name=\"model\")\n self.lm_head = keras.layers.Dense(\n config.vocab_size,\n use_bias=False,\n kernel_initializer=get_initializer(config.init_std),\n name=\"lm_head\",\n )\n self.config = config\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):\n # only last token for inputs_ids if past is defined in kwargs\n if past_key_values:\n inputs = tf.expand_dims(inputs[:, -1], -1)\n\n position_ids = kwargs.get(\"position_ids\", None)\n attention_mask = kwargs.get(\"attention_mask\", None)\n\n if attention_mask is not None and position_ids is None:\n position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)\n if past_key_values:\n position_ids = tf.expand_dims(position_ids[:, -1], -1)\n\n return {\n \"input_ids\": inputs,\n \"attention_mask\": attention_mask,\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": use_cache,\n }\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n @add_code_sample_docstrings(\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFCausalLMOutputWithCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: TFModelInputType | None = None,\n attention_mask: np.ndarray | tf.Tensor | None = None,\n position_ids: np.ndarray | tf.Tensor | None = None,\n encoder_hidden_states: np.ndarray | tf.Tensor | None = None,\n encoder_attention_mask: np.ndarray | tf.Tensor | None = None,\n head_mask: np.ndarray | tf.Tensor | None = None,\n cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,\n past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,\n inputs_embeds: np.ndarray | tf.Tensor | None = None,\n labels: np.ndarray | tf.Tensor | None = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n **kwargs: Any,\n ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`\n are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`\n \"\"\"\n\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n head_mask=head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n hidden_states = outputs[0]\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # shift labels to the left and cut last logit token\n labels = tf.concat(\n [labels[:, 1:], tf.fill((labels.shape[0], 1), tf.cast(self.config.pad_token_id, labels.dtype))],\n axis=-1,\n )\n loss = self.hf_compute_loss(labels, lm_logits)\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TFCausalLMOutputWithCrossAttentions(\n loss=loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, \"model\", None) is not None:\n with tf.name_scope(self.model.name):\n self.model.build(None)\n if getattr(self, \"lm_head\", None) is not None:\n with tf.name_scope(self.lm_head.name):\n self.lm_head.build([None, None, self.config.hidden_size])\n\n def tf_to_pt_weight_rename(self, tf_weight):\n if tf_weight == \"lm_head.weight\":\n return tf_weight, \"model.embed_tokens.weight\"\n else:\n return (tf_weight,)\n", "output": ["_create_position_ids_from_inputs_embeds", "_create_position_ids_from_input_ids", "_make_causal_mask", "_expand_mask", "create_sinusoidal_positions", "TFXGLMMainLayer", "TFXGLMAttention", "TFXGLMForCausalLM", "TFXGLMDecoderLayer", "TFXGLMPreTrainedModel", "TFXGLMModel"], "metadata": {"file_path": "transformers-main/src/transformers/models/xglm/modeling_tf_xglm.py", "file_length": 14306, "symbol_dict": [{"symbol": "create_sinusoidal_positions", "type": "mannual_defined_function", "byte_location": 1822, "location": 620}, {"symbol": "_create_position_ids_from_input_ids", "type": "mannual_defined_function", "byte_location": 2795, "location": 985}, {"symbol": "_make_causal_mask", "type": "mannual_defined_function", "byte_location": 4198, "location": 1459}, {"symbol": "_expand_mask", "type": "mannual_defined_function", "byte_location": 4881, "location": 1742}, {"symbol": "_create_position_ids_from_inputs_embeds", "type": "mannual_defined_function", "byte_location": 3490, "location": 1211}, {"symbol": "TFXGLMPreTrainedModel", "type": "mannual_defined_class", "byte_location": 29383, "location": 9568}, {"symbol": "TFXGLMMainLayer", "type": "mannual_defined_class", "byte_location": 20013, "location": 6693}, {"symbol": "TFXGLMDecoderLayer", "type": "mannual_defined_class", "byte_location": 13016, "location": 4536}, {"symbol": "TFXGLMForCausalLM", "type": "mannual_defined_class", "byte_location": 39579, "location": 12438}, {"symbol": "TFXGLMAttention", "type": "mannual_defined_class", "byte_location": 5439, "location": 1974}, {"symbol": "TFXGLMModel", "type": "mannual_defined_class", "byte_location": 36734, "location": 11524}]}} {"input": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport gc\nimport os\nimport unittest\nfrom collections import OrderedDict\nfrom tempfile import TemporaryDirectory\n\nimport torch\nimport torch.nn as nn\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nfrom accelerate.big_modeling import (\n cpu_offload,\n cpu_offload_with_hook,\n disk_offload,\n dispatch_model,\n init_empty_weights,\n init_on_device,\n load_checkpoint_and_dispatch,\n)\nfrom accelerate.hooks import remove_hook_from_submodules\nfrom accelerate.test_utils import require_bnb, require_cuda, require_mps, require_multi_gpu, slow\nfrom accelerate.utils import is_torch_version, offload_state_dict\n\n\nclass ModelForTest(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = nn.Linear(3, 4)\n self.batchnorm = nn.BatchNorm1d(4)\n self.linear2 = nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear2(self.batchnorm(self.linear1(x)))\n\n\nclass LinearWithNonPersistentBuffers(nn.Module):\n def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None) -> None:\n factory_kwargs = {\"device\": device, \"dtype\": dtype}\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.register_buffer(\"weight\", torch.ones((out_features, in_features), **factory_kwargs))\n if bias:\n self.register_buffer(\"bias\", torch.ones(out_features, **factory_kwargs), persistent=False)\n else:\n self.register_buffer(\"bias\", None)\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n return torch.nn.functional.linear(input, self.weight, self.bias)\n\n\nclass ModelForTestNonPersistentBuffers(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = LinearWithNonPersistentBuffers(3, 4)\n self.batchnorm = nn.BatchNorm1d(4)\n self.linear2 = LinearWithNonPersistentBuffers(4, 5)\n\n def forward(self, x):\n return self.linear2(self.batchnorm(self.linear1(x)))\n\n\nclass ModelForTestCopy(nn.Module):\n def __init__(self, id: int):\n super().__init__()\n self.id = id\n self.linear1 = nn.Linear(3, 4)\n self.batchnorm = nn.BatchNorm1d(4)\n self.linear2 = nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear2(self.batchnorm(self.linear1(x))), self.id\n\n\nclass ModelForTestTiedWeights(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = nn.Linear(4, 4)\n self.batchnorm = nn.BatchNorm1d(4)\n self.linear2 = nn.Linear(4, 4)\n\n def forward(self, x):\n return self.linear2(self.batchnorm(self.linear1(x)))\n\n\nclass BiggerModelForTest(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = nn.Linear(3, 4)\n self.linear2 = nn.Linear(4, 5)\n self.batchnorm = nn.BatchNorm1d(5)\n self.linear3 = nn.Linear(5, 6)\n self.linear4 = nn.Linear(6, 5)\n\n def forward(self, x):\n return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n\n\n# To test preload_module_classes\nclass ModuleWithUnusedSubModules(nn.Module):\n def __init__(self, input_dim, output_dim):\n super().__init__()\n self.linear = nn.Linear(input_dim, output_dim)\n\n def forward(self, x):\n return x @ self.linear.weight.t() + self.linear.bias\n\n\nclass ModelWithUnusedSubModulesForTest(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear1 = ModuleWithUnusedSubModules(3, 4)\n self.linear2 = ModuleWithUnusedSubModules(4, 5)\n self.batchnorm = nn.BatchNorm1d(5)\n self.linear3 = ModuleWithUnusedSubModules(5, 6)\n self.linear4 = ModuleWithUnusedSubModules(6, 5)\n\n def forward(self, x):\n return self.linear4(self.linear3(self.batchnorm(self.linear2(self.linear1(x)))))\n\n\nclass BigModelingTester(unittest.TestCase):\n def test_init_empty_weights(self):\n # base use\n with init_empty_weights():\n module = nn.Linear(4, 5)\n self.assertEqual(module.weight.device, torch.device(\"meta\"))\n\n # base use with buffers, they are not touched\n with init_empty_weights():\n module = nn.BatchNorm1d(4)\n self.assertEqual(module.weight.device, torch.device(\"meta\"))\n self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n\n # Use with include_buffers=True\n register_parameter_func = nn.Module.register_parameter\n register_buffer_func = nn.Module.register_buffer\n with init_empty_weights(include_buffers=True):\n module = nn.BatchNorm1d(4)\n # nn.Module.register_parameter/buffer shouldn't be changed with torch >= 2.0\n if is_torch_version(\">=\", \"2.0\"):\n self.assertEqual(register_parameter_func, nn.Module.register_parameter)\n self.assertEqual(register_buffer_func, nn.Module.register_buffer)\n self.assertEqual(module.weight.device, torch.device(\"meta\"))\n self.assertEqual(module.running_mean.device, torch.device(\"meta\"))\n\n # Double check we didn't break PyTorch\n module = nn.BatchNorm1d(4)\n self.assertEqual(module.weight.device, torch.device(\"cpu\"))\n self.assertEqual(module.running_mean.device, torch.device(\"cpu\"))\n\n def test_init_empty_weights_very_large_model(self):\n # This is a 100 billion parameters model.\n with init_empty_weights():\n _ = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n\n @require_cuda\n def test_init_on_device_cuda(self):\n device = torch.device(\"cuda:0\")\n with init_on_device(device):\n model = nn.Linear(10, 10)\n self.assertEqual(model.weight.device, device)\n self.assertEqual(model.weight.device, device)\n\n @require_mps\n def test_init_on_device_mps(self):\n device = torch.device(\"mps:0\")\n with init_on_device(device):\n model = nn.Linear(10, 10)\n self.assertEqual(model.weight.device, device)\n self.assertEqual(model.weight.device, device)\n\n def test_cpu_offload(self):\n model = ModelForTest()\n x = torch.randn(2, 3)\n expected = model(x)\n\n device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n\n cpu_offload(model, execution_device=device)\n output = model(x)\n self.assertTrue(\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n\n # Clean up for next test.\n remove_hook_from_submodules(model)\n\n cpu_offload(model, execution_device=device, offload_buffers=True)\n output = model(x)\n self.assertTrue(\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n\n def test_cpu_offload_with_unused_submodules(self):\n model = ModelWithUnusedSubModulesForTest()\n x = torch.randn(2, 3)\n expected = model(x)\n\n device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n\n cpu_offload(model, execution_device=device, preload_module_classes=[\"ModuleWithUnusedSubModules\"])\n output = model(x)\n self.assertTrue(\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n\n # Clean up for next test.\n remove_hook_from_submodules(model)\n\n cpu_offload(\n model,\n execution_device=device,\n offload_buffers=True,\n preload_module_classes=[\"ModuleWithUnusedSubModules\"],\n )\n output = model(x)\n self.assertTrue(\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n\n @slow\n @require_cuda\n def test_cpu_offload_gpt2(self):\n tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n\n gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n cpu_offload(gpt2, execution_device=0)\n outputs = gpt2.generate(inputs[\"input_ids\"])\n self.assertEqual(\n tokenizer.decode(outputs[0].tolist()),\n \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n )\n\n def test_disk_offload(self):\n model = ModelForTest()\n x = torch.randn(2, 3)\n expected = model(x)\n\n device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n\n with TemporaryDirectory() as tmp_dir:\n disk_offload(model, tmp_dir, execution_device=device)\n output = model(x)\n self.assertTrue(\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n\n # Clean up for next test.\n remove_hook_from_submodules(model)\n\n with TemporaryDirectory() as tmp_dir:\n disk_offload(model, tmp_dir, execution_device=device, offload_buffers=True)\n output = model(x)\n self.assertTrue(\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n\n def test_disk_offload_with_unused_submodules(self):\n model = ModelWithUnusedSubModulesForTest()\n x = torch.randn(2, 3)\n expected = model(x)\n\n device = torch.device(0 if torch.cuda.is_available() else \"cpu\")\n\n with TemporaryDirectory() as tmp_dir:\n disk_offload(\n model, tmp_dir, execution_device=device, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n )\n output = model(x)\n self.assertTrue(\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n\n # Clean up for next test.\n remove_hook_from_submodules(model)\n\n with TemporaryDirectory() as tmp_dir:\n disk_offload(\n model,\n tmp_dir,\n execution_device=device,\n offload_buffers=True,\n preload_module_classes=[\"ModuleWithUnusedSubModules\"],\n )\n output = model(x)\n self.assertTrue(\n torch.allclose(expected, output.cpu(), 1e-4, 1e-5), msg=f\"Expected: {expected}\\nActual: {output.cpu()}\"\n )\n\n @slow\n @require_cuda\n def test_disk_offload_gpt2(self):\n tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n\n gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n with TemporaryDirectory() as tmp_dir:\n disk_offload(gpt2, tmp_dir, execution_device=0)\n outputs = gpt2.generate(inputs[\"input_ids\"])\n self.assertEqual(\n tokenizer.decode(outputs[0].tolist()),\n \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n )\n\n @require_cuda\n def test_dispatch_model(self):\n model = ModelForTest()\n device_map = {\"linear1\": \"disk\", \"batchnorm\": \"cpu\", \"linear2\": 0}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(model, device_map, offload_dir=tmp_dir)\n output = model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_cuda\n def test_dispatch_model_with_non_persistent_buffers(self):\n model = ModelForTestNonPersistentBuffers()\n device_map = {\"linear1\": 0, \"batchnorm\": \"cpu\", \"linear2\": \"disk\"}\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(model, device_map, offload_dir=tmp_dir, offload_buffers=True)\n output = model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_mps\n def test_dispatch_model_mps(self):\n model = ModelForTest()\n device_map = {\"linear1\": \"mps\", \"batchnorm\": \"disk\", \"linear2\": \"disk\"}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(model, device_map, offload_dir=tmp_dir)\n output = model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_cuda\n def test_dispatch_model_tied_weights(self):\n model = ModelForTestTiedWeights()\n model.linear1.weight = model.linear2.weight\n device_map = {\"linear1\": 0, \"batchnorm\": 0, \"linear2\": 0}\n\n dispatch_model(model, device_map)\n self.assertIs(model.linear2.weight, model.linear1.weight)\n\n @require_multi_gpu\n def test_dispatch_model_tied_weights_memory(self):\n # Test that we do not duplicate tied weights at any point during dispatch_model call.\n\n torch.cuda.empty_cache() # Needed in case we run several tests in a row.\n\n model = nn.Sequential(\n OrderedDict(\n [\n (\"linear0\", nn.Linear(5000, 5000, bias=False)),\n (\"linear1\", nn.Linear(5000, 5000, bias=False)),\n (\"linear2\", nn.Linear(5000, 5000, bias=False)),\n (\"linear3\", nn.Linear(5000, 5000, bias=False)),\n (\"linear4\", nn.Linear(5000, 5000, bias=False)),\n ]\n )\n )\n model.linear2.weight = model.linear0.weight\n model.linear3.weight = model.linear0.weight\n model.linear4.weight = model.linear0.weight\n\n x = torch.randn(5, 5000)\n with torch.no_grad():\n expected = model(x)\n\n # We should need only 5000 * 5000 * 32 // 8 * 1e-6 = 100 MB on the device 0 for the four linear weights.\n device_map = {\"linear0\": 0, \"linear1\": 1, \"linear2\": 0, \"linear3\": 0, \"linear4\": 0}\n\n # Just to intialize CUDA context.\n a = torch.rand(5).to(\"cuda:0\") # noqa: F841\n\n free_memory_bytes = torch.cuda.mem_get_info(\"cuda:0\")[0]\n required_memory_bytes = 5000 * 5000 * (32 // 8)\n\n # Leaving 50 MB of free memory for possible buffers, etc.\n n_vals = (free_memory_bytes - required_memory_bytes - int(50e6)) // (32 // 8)\n foo = torch.rand(n_vals, device=\"cuda:0\") # noqa: F841\n\n # If this does OOM: there is an issue in somewhere in dispatch_model, memory of tied weights is duplicated.\n try:\n dispatch_model(model, device_map)\n except torch.cuda.OutOfMemoryError as e:\n raise torch.cuda.OutOfMemoryError(\n f\"OOM error in dispatch_model. This is a bug and should not happen, see test_dispatch_model_tied_weights_memory. {e}\"\n )\n except Exception as e:\n raise e\n\n with torch.no_grad():\n output = model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_cuda\n def test_dispatch_model_tied_weights_memory_with_nested_offload_cpu(self):\n # Test that we do not duplicate tied weights at any point during dispatch_model call.\n\n torch.cuda.empty_cache() # Needed in case we run several tests in a row.\n\n class SubModule(torch.nn.Module):\n def __init__(self, ref_to_parameter):\n super().__init__()\n self.parameter = ref_to_parameter\n\n def forward(self, x):\n return x + torch.max(self.parameter)\n\n class LinearModuleAndSubModule(torch.nn.Linear):\n def __init__(self, in_features, out_features):\n super().__init__(in_features, out_features, bias=False)\n self.weight_submodule = SubModule(self.weight)\n self.weight_submodule2 = SubModule(self.weight)\n self.weight_submodule3 = SubModule(self.weight)\n self.weight_submodule4 = SubModule(self.weight)\n\n def forward(self, x):\n a = torch.nn.functional.linear(self.weight_submodule(x), self.weight)\n b = torch.nn.functional.linear(self.weight_submodule2(x), self.weight)\n c = torch.nn.functional.linear(self.weight_submodule3(x), self.weight)\n d = torch.nn.functional.linear(self.weight_submodule4(x), self.weight)\n return a + b + c + d\n\n class ModelWithSubmodules(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.compute = LinearModuleAndSubModule(5000, 5000)\n self.compute1 = LinearModuleAndSubModule(5000, 5000)\n\n def forward(self, x):\n a = self.compute(x)\n b = self.compute1(x)\n return a + b\n\n # We should need only 2 * 5000 * 5000 * 32 // 8 * 1e-6 = 200 MB on the device 0 for the whole model forward, and not 600 MB.\n device_map = {\"compute\": 0, \"compute1\": \"cpu\"}\n\n model = ModelWithSubmodules()\n\n x = torch.randn(1, 5000)\n with torch.no_grad():\n expected = model(x)\n\n # Just to intialize CUDA context.\n a = torch.rand(5).to(\"cuda:0\") # noqa: F841\n\n free_memory_bytes = torch.cuda.mem_get_info(\"cuda:0\")[0]\n required_memory_bytes = 2 * 5000 * 5000 * (32 // 8) # 200 MB\n\n # Leaving 150 MB of free memory for possible buffers, etc.\n n_vals = (free_memory_bytes - required_memory_bytes - int(150e6)) // (32 // 8)\n foo = torch.rand(n_vals, device=\"cuda:0\") # noqa: F841\n\n free_memory_bytes_before_dispatch = torch.cuda.mem_get_info(\"cuda:0\")[0]\n dispatch_model(model, device_map)\n free_memory_bytes_after_dispatch = torch.cuda.mem_get_info(\"cuda:0\")[0]\n\n self.assertTrue((free_memory_bytes_after_dispatch - free_memory_bytes_before_dispatch) * 1e-6 < 130)\n\n original_pointer = model.compute1._hf_hook.weights_map[\"weight\"].data_ptr()\n\n with torch.no_grad():\n try:\n output = model(x)\n except torch.cuda.OutOfMemoryError as e:\n raise torch.cuda.OutOfMemoryError(\n f\"OOM error in dispatch_model. This is a bug and should not happen, see test_dispatch_model_tied_weights_memory_with_nested_offload_cpu. {e}\"\n )\n except Exception as e:\n raise e\n\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n torch.cuda.empty_cache()\n\n free_memory_bytes_after_infer = torch.cuda.mem_get_info(\"cuda:0\")[0]\n\n # Check that we have no more references on GPU for the offloaded tied weight.\n self.assertTrue(len(model.compute1.weight_submodule._hf_hook.tied_params_map[original_pointer]) == 0)\n self.assertTrue(len(model.compute1._hf_hook.tied_params_map[original_pointer]) == 0)\n self.assertTrue((free_memory_bytes_after_infer - free_memory_bytes_after_dispatch) * 1e-6 < 130)\n\n # Test is flacky otherwise.\n del model\n gc.collect()\n\n @require_cuda\n def test_dispatch_model_tied_weights_memory_with_nested_offload_disk(self):\n # Test that we do not duplicate tied weights at any point during dispatch_model call.\n\n torch.cuda.empty_cache() # Needed in case we run several tests in a row.\n\n class SubModule(torch.nn.Module):\n def __init__(self, ref_to_parameter):\n super().__init__()\n self.parameter = ref_to_parameter\n\n def forward(self, x):\n return x + torch.max(self.parameter)\n\n class LinearModuleAndSubModule(torch.nn.Linear):\n def __init__(self, in_features, out_features):\n super().__init__(in_features, out_features, bias=False)\n self.weight_submodule = SubModule(self.weight)\n self.weight_submodule2 = SubModule(self.weight)\n self.weight_submodule3 = SubModule(self.weight)\n self.weight_submodule4 = SubModule(self.weight)\n\n def forward(self, x):\n a = torch.nn.functional.linear(self.weight_submodule(x), self.weight)\n b = torch.nn.functional.linear(self.weight_submodule2(x), self.weight)\n c = torch.nn.functional.linear(self.weight_submodule3(x), self.weight)\n d = torch.nn.functional.linear(self.weight_submodule4(x), self.weight)\n return a + b + c + d\n\n class ModelWithSubmodules(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.compute = LinearModuleAndSubModule(5000, 5000)\n self.compute1 = LinearModuleAndSubModule(5000, 5000)\n\n def forward(self, x):\n a = self.compute(x)\n b = self.compute1(x)\n return a + b\n\n # We should need only 2 * 5000 * 5000 * 32 // 8 * 1e-6 = 200 MB on the device 0 for the whole model forward, and not 600 MB.\n device_map = {\"compute\": 0, \"compute1\": \"disk\"}\n\n model = ModelWithSubmodules()\n\n x = torch.randn(1, 5000)\n with torch.no_grad():\n expected = model(x)\n\n # Just to intialize CUDA context.\n a = torch.rand(5).to(\"cuda:0\") # noqa: F841\n\n free_memory_bytes = torch.cuda.mem_get_info(\"cuda:0\")[0]\n required_memory_bytes = 2 * 5000 * 5000 * (32 // 8) # 200 MB\n\n # Leaving 150 MB of free memory for possible buffers, etc.\n n_vals = (free_memory_bytes - required_memory_bytes - int(200e6)) // (32 // 8)\n foo = torch.rand(n_vals, device=\"cuda:0\") # noqa: F841\n\n free_memory_bytes_before_dispatch = torch.cuda.mem_get_info(\"cuda:0\")[0]\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(model, device_map, offload_dir=tmp_dir)\n free_memory_bytes_after_dispatch = torch.cuda.mem_get_info(\"cuda:0\")[0]\n\n self.assertTrue((free_memory_bytes_after_dispatch - free_memory_bytes_before_dispatch) * 1e-6 < 130)\n\n with torch.no_grad():\n try:\n output = model(x)\n except torch.cuda.OutOfMemoryError as e:\n raise torch.cuda.OutOfMemoryError(\n f\"OOM error in dispatch_model. This is a bug and should not happen, see test_dispatch_model_tied_weights_memory_with_nested_offload_disk. {e}\"\n )\n except Exception as e:\n raise e\n\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n torch.cuda.empty_cache()\n\n free_memory_bytes_after_infer = torch.cuda.mem_get_info(\"cuda:0\")[0]\n\n # Check that we have no more references on GPU for the offloaded tied weight.\n n_non_empty = 0\n for pointer, pointer_dict in model.compute1.weight_submodule._hf_hook.tied_params_map.items():\n if len(pointer_dict) > 0:\n n_non_empty += 1\n self.assertTrue(n_non_empty == 1) # `compute` layer one.\n\n n_non_empty = 0\n for pointer, pointer_dict in model.compute1._hf_hook.tied_params_map.items():\n if len(pointer_dict) > 0:\n n_non_empty += 1\n self.assertTrue(n_non_empty == 1) # `compute` layer one.\n\n self.assertTrue((free_memory_bytes_after_infer - free_memory_bytes_after_dispatch) * 1e-6 < 130)\n\n @require_multi_gpu\n def test_dispatch_model_multi_gpu(self):\n model = BiggerModelForTest()\n device_map = {\"linear1\": \"cpu\", \"linear2\": \"disk\", \"batchnorm\": \"cpu\", \"linear3\": 0, \"linear4\": 1}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(model, device_map, offload_dir=tmp_dir)\n output = model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_cuda\n def test_dispatch_model_copy(self):\n original_model = ModelForTestCopy(id=1)\n device_map = {\"linear1\": 0, \"batchnorm\": \"cpu\", \"linear2\": 0}\n\n x = torch.randn(2, 3)\n expected, original_output_id = original_model(x)\n\n dispatch_model(original_model, device_map)\n\n copied_model = copy.deepcopy(original_model)\n copied_model.id = 2\n output, copied_output_id = copied_model(x)\n\n self.assertEqual(original_model.id, original_output_id)\n self.assertEqual(copied_model.id, copied_output_id)\n self.assertFalse(copied_model.linear1.forward is original_model.linear1.forward)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_cuda\n def test_dispatch_model_move_offloaded_model(self):\n model = ModelForTest()\n device_map = {\"linear1\": \"disk\", \"batchnorm\": \"cpu\", \"linear2\": 0}\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(model, device_map, offload_dir=tmp_dir)\n with self.assertRaises(RuntimeError):\n model.to(0)\n\n @require_multi_gpu\n def test_dispatch_model_move_model_warning(self):\n model = ModelForTest()\n device_map = {\"linear1\": 0, \"batchnorm\": 0, \"linear2\": 1}\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(model, device_map, offload_dir=tmp_dir)\n with self.assertLogs(\"accelerate.big_modeling\", level=\"WARNING\"):\n model.to(\"cpu\")\n with self.assertLogs(\"accelerate.big_modeling\", level=\"WARNING\"):\n model.cuda(0)\n with self.assertRaises(RuntimeError):\n x = torch.randn(2, 3)\n model(x)\n\n @slow\n @require_multi_gpu\n def test_dispatch_model_gpt2_on_two_gpus(self):\n tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n inputs = tokenizer(\"Hello world! My name is\", return_tensors=\"pt\").to(0)\n\n gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n # Dispatch on GPUs 0 and 1\n device_map = {\n \"transformer.wte\": 0,\n \"transformer.wpe\": 0,\n \"transformer.ln_f\": 1,\n \"lm_head\": 0,\n }\n for i in range(12):\n device_map[f\"transformer.h.{i}\"] = 0 if i <= 5 else 1\n\n gpt2 = dispatch_model(gpt2, device_map)\n outputs = gpt2.generate(inputs[\"input_ids\"])\n self.assertEqual(\n tokenizer.decode(outputs[0].tolist()),\n \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n )\n\n # Dispatch with a bit of CPU offload\n gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n for i in range(4):\n device_map[f\"transformer.h.{i}\"] = \"cpu\"\n gpt2 = dispatch_model(gpt2, device_map)\n outputs = gpt2.generate(inputs[\"input_ids\"])\n self.assertEqual(\n tokenizer.decode(outputs[0].tolist()),\n \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n )\n # Dispatch with a bit of CPU and disk offload\n gpt2 = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n for i in range(2):\n device_map[f\"transformer.h.{i}\"] = \"disk\"\n\n with TemporaryDirectory() as tmp_dir:\n state_dict = {\n k: p for k, p in gpt2.state_dict().items() if \"transformer.h.0\" in k or \"transformer.h.1\" in k\n }\n offload_state_dict(tmp_dir, state_dict)\n gpt2 = dispatch_model(gpt2, device_map, offload_dir=tmp_dir)\n outputs = gpt2.generate(inputs[\"input_ids\"])\n self.assertEqual(\n tokenizer.decode(outputs[0].tolist()),\n \"Hello world! My name is Kiyoshi, and I'm a student at the University of Tokyo\",\n )\n\n @require_cuda\n def test_dispatch_model_with_unused_submodules(self):\n model = ModelWithUnusedSubModulesForTest()\n device_map = {\"linear1\": \"cpu\", \"linear2\": \"disk\", \"batchnorm\": \"cpu\", \"linear3\": 0, \"linear4\": 0}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(\n model, device_map, offload_dir=tmp_dir, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n )\n output = model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_mps\n def test_dispatch_model_with_unused_submodules_mps(self):\n model = ModelWithUnusedSubModulesForTest()\n device_map = {\"linear1\": \"mps\", \"linear2\": \"mps\", \"batchnorm\": \"mps\", \"linear3\": \"mps\", \"linear4\": \"disk\"}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(\n model, device_map, offload_dir=tmp_dir, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n )\n output = model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_multi_gpu\n def test_dispatch_model_with_unused_submodules_multi_gpu(self):\n model = ModelWithUnusedSubModulesForTest()\n device_map = {\"linear1\": \"cpu\", \"linear2\": \"disk\", \"batchnorm\": \"cpu\", \"linear3\": 0, \"linear4\": 1}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n dispatch_model(\n model, device_map, offload_dir=tmp_dir, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n )\n output = model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_cuda\n def test_dispatch_model_force_hooks(self):\n model = ModelForTest()\n device_map = {\"\": 0}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n dispatch_model(model, device_map, force_hooks=True)\n output = model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_cuda\n def test_load_checkpoint_and_dispatch(self):\n model = ModelForTest()\n device_map = {\"linear1\": \"cpu\", \"batchnorm\": \"cpu\", \"linear2\": 0}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n torch.save(model.state_dict(), checkpoint)\n\n new_model = ModelForTest()\n new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map)\n\n # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n self.assertEqual(new_model.linear1.weight.device, torch.device(\"meta\"))\n self.assertEqual(new_model.linear2.weight.device, torch.device(0))\n\n output = new_model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_mps\n def test_load_checkpoint_and_dispatch_mps(self):\n model = ModelForTest()\n device_map = {\"linear1\": \"mps\", \"batchnorm\": \"mps\", \"linear2\": \"disk\"}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n torch.save(model.state_dict(), checkpoint)\n\n new_model = ModelForTest()\n new_model = load_checkpoint_and_dispatch(\n new_model, checkpoint, device_map=device_map, offload_folder=tmp_dir\n )\n\n # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n self.assertEqual(new_model.linear1.weight.device, torch.device(\"mps:0\"))\n self.assertEqual(new_model.linear2.weight.device, torch.device(\"meta\"))\n\n output = new_model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_multi_gpu\n def test_load_checkpoint_and_dispatch_multi_gpu(self):\n model = BiggerModelForTest()\n device_map = {\"linear1\": \"cpu\", \"linear2\": \"cpu\", \"batchnorm\": 0, \"linear3\": 0, \"linear4\": 1}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n torch.save(model.state_dict(), checkpoint)\n\n new_model = BiggerModelForTest()\n new_model = load_checkpoint_and_dispatch(new_model, checkpoint, device_map=device_map)\n\n # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n self.assertEqual(new_model.linear1.weight.device, torch.device(\"meta\"))\n self.assertEqual(new_model.linear2.weight.device, torch.device(\"meta\"))\n self.assertEqual(new_model.linear3.weight.device, torch.device(0))\n self.assertEqual(new_model.linear4.weight.device, torch.device(1))\n\n output = new_model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_cuda\n def test_load_checkpoint_and_dispatch_with_unused_submodules(self):\n model = ModelWithUnusedSubModulesForTest()\n device_map = {\"linear1\": \"cpu\", \"linear2\": \"cpu\", \"batchnorm\": 0, \"linear3\": 0, \"linear4\": 0}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n torch.save(model.state_dict(), checkpoint)\n\n new_model = ModelWithUnusedSubModulesForTest()\n new_model = load_checkpoint_and_dispatch(\n new_model, checkpoint, device_map=device_map, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n )\n\n # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n self.assertEqual(new_model.linear1.linear.weight.device, torch.device(\"meta\"))\n self.assertEqual(new_model.linear2.linear.weight.device, torch.device(\"meta\"))\n self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0))\n self.assertEqual(new_model.linear4.linear.weight.device, torch.device(0))\n\n output = new_model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_mps\n def test_load_checkpoint_and_dispatch_with_unused_submodules_mps(self):\n model = ModelWithUnusedSubModulesForTest()\n device_map = {\"linear1\": \"mps\", \"linear2\": \"mps\", \"batchnorm\": \"mps\", \"linear3\": \"disk\", \"linear4\": \"disk\"}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n torch.save(model.state_dict(), checkpoint)\n\n new_model = ModelWithUnusedSubModulesForTest()\n new_model = load_checkpoint_and_dispatch(\n new_model,\n checkpoint,\n device_map=device_map,\n preload_module_classes=[\"ModuleWithUnusedSubModules\"],\n offload_folder=tmp_dir,\n )\n\n # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n self.assertEqual(new_model.linear1.linear.weight.device, torch.device(\"mps:0\"))\n self.assertEqual(new_model.linear2.linear.weight.device, torch.device(\"mps:0\"))\n self.assertEqual(new_model.linear3.linear.weight.device, torch.device(\"meta\"))\n self.assertEqual(new_model.linear4.linear.weight.device, torch.device(\"meta\"))\n\n output = new_model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_multi_gpu\n def test_load_checkpoint_and_dispatch_multi_gpu_with_unused_submodules(self):\n model = ModelWithUnusedSubModulesForTest()\n device_map = {\"linear1\": \"cpu\", \"linear2\": \"cpu\", \"batchnorm\": 0, \"linear3\": 0, \"linear4\": 1}\n\n x = torch.randn(2, 3)\n expected = model(x)\n\n with TemporaryDirectory() as tmp_dir:\n checkpoint = os.path.join(tmp_dir, \"pt_model.bin\")\n torch.save(model.state_dict(), checkpoint)\n\n new_model = ModelWithUnusedSubModulesForTest()\n new_model = load_checkpoint_and_dispatch(\n new_model, checkpoint, device_map=device_map, preload_module_classes=[\"ModuleWithUnusedSubModules\"]\n )\n\n # CPU-offloaded weights are on the meta device while waiting for the forward pass.\n self.assertEqual(new_model.linear1.linear.weight.device, torch.device(\"meta\"))\n self.assertEqual(new_model.linear2.linear.weight.device, torch.device(\"meta\"))\n self.assertEqual(new_model.linear3.linear.weight.device, torch.device(0))\n self.assertEqual(new_model.linear4.linear.weight.device, torch.device(1))\n\n output = new_model(x)\n self.assertTrue(torch.allclose(expected, output.cpu(), atol=1e-5))\n\n @require_cuda\n def test_cpu_offload_with_hook(self):\n model1 = torch.nn.Linear(4, 5)\n model1, hook1 = cpu_offload_with_hook(model1)\n self.assertEqual(model1.weight.device, torch.device(\"cpu\"))\n\n inputs = torch.randn(3, 4)\n outputs = model1(inputs)\n self.assertEqual(outputs.device, torch.device(0))\n self.assertEqual(model1.weight.device, torch.device(0))\n\n hook1.offload()\n self.assertEqual(model1.weight.device, torch.device(\"cpu\"))\n\n model2 = torch.nn.Linear(5, 5)\n model2, hook2 = cpu_offload_with_hook(model2, prev_module_hook=hook1)\n self.assertEqual(model2.weight.device, torch.device(\"cpu\"))\n\n outputs = model1(inputs)\n self.assertEqual(outputs.device, torch.device(0))\n self.assertEqual(model1.weight.device, torch.device(0))\n\n outputs = model2(outputs)\n self.assertEqual(outputs.device, torch.device(0))\n self.assertEqual(model1.weight.device, torch.device(\"cpu\"))\n self.assertEqual(model2.weight.device, torch.device(0))\n\n hook2.offload()\n self.assertEqual(model2.weight.device, torch.device(\"cpu\"))\n\n @slow\n @require_bnb\n @require_multi_gpu\n def test_dispatch_model_bnb(self):\n \"\"\"Tests that `dispatch_model` quantizes int8 layers\"\"\"\n from huggingface_hub import hf_hub_download\n from transformers import AutoConfig, AutoModel, BitsAndBytesConfig\n from transformers.utils.bitsandbytes import replace_with_bnb_linear\n\n with init_empty_weights():\n model = AutoModel.from_config(AutoConfig.from_pretrained(\"bigscience/bloom-560m\"))\n\n quantization_config = BitsAndBytesConfig(load_in_8bit=True)\n model = replace_with_bnb_linear(\n model, modules_to_not_convert=[\"lm_head\"], quantization_config=quantization_config\n )\n\n model_path = hf_hub_download(\"bigscience/bloom-560m\", \"pytorch_model.bin\")\n\n model = load_checkpoint_and_dispatch(\n model,\n checkpoint=model_path,\n device_map=\"balanced\",\n )\n\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8)\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)\n\n self.assertTrue(model.h[-1].self_attention.query_key_value.weight.dtype == torch.int8)\n self.assertTrue(model.h[-1].self_attention.query_key_value.weight.device.index == 1)\n\n @slow\n @require_bnb\n def test_dispatch_model_int8_simple(self):\n \"\"\"Tests that `dispatch_model` quantizes int8 layers\"\"\"\n from huggingface_hub import hf_hub_download\n from transformers import AutoConfig, AutoModel, BitsAndBytesConfig\n from transformers.utils.bitsandbytes import replace_with_bnb_linear\n\n with init_empty_weights():\n model = AutoModel.from_config(AutoConfig.from_pretrained(\"bigscience/bloom-560m\"))\n\n quantization_config = BitsAndBytesConfig(load_in_8bit=True)\n model = replace_with_bnb_linear(\n model, modules_to_not_convert=[\"lm_head\"], quantization_config=quantization_config\n )\n\n model_path = hf_hub_download(\"bigscience/bloom-560m\", \"pytorch_model.bin\")\n\n # test with auto\n model = load_checkpoint_and_dispatch(\n model,\n checkpoint=model_path,\n device_map=\"auto\",\n )\n\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8)\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)\n\n with init_empty_weights():\n model = AutoModel.from_config(AutoConfig.from_pretrained(\"bigscience/bloom-560m\"))\n\n model = replace_with_bnb_linear(\n model, modules_to_not_convert=[\"lm_head\"], quantization_config=quantization_config\n )\n\n # test with str device map\n model = load_checkpoint_and_dispatch(\n model,\n checkpoint=model_path,\n device_map={\"\": torch.device(\"cuda:0\")},\n )\n\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8)\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)\n\n with init_empty_weights():\n model = AutoModel.from_config(AutoConfig.from_pretrained(\"bigscience/bloom-560m\"))\n\n model = replace_with_bnb_linear(\n model, modules_to_not_convert=[\"lm_head\"], quantization_config=quantization_config\n )\n\n # test with torch.device device map\n model = load_checkpoint_and_dispatch(\n model,\n checkpoint=model_path,\n device_map={\"\": \"cuda:0\"},\n )\n\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.int8)\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)\n\n @slow\n @require_bnb\n def test_dipatch_model_fp4_simple(self):\n \"\"\"Tests that `dispatch_model` quantizes fp4 layers\"\"\"\n from huggingface_hub import hf_hub_download\n from transformers import AutoConfig, AutoModel, BitsAndBytesConfig\n from transformers.utils.bitsandbytes import replace_with_bnb_linear\n\n with init_empty_weights():\n model = AutoModel.from_config(AutoConfig.from_pretrained(\"bigscience/bloom-560m\"))\n\n quantization_config = BitsAndBytesConfig(load_in_4bit=True)\n\n model = replace_with_bnb_linear(\n model, modules_to_not_convert=[\"lm_head\"], quantization_config=quantization_config\n )\n\n model_path = hf_hub_download(\"bigscience/bloom-560m\", \"pytorch_model.bin\")\n\n # test with auto\n model = load_checkpoint_and_dispatch(\n model,\n checkpoint=model_path,\n device_map=\"auto\",\n )\n\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8)\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)\n\n with init_empty_weights():\n model = AutoModel.from_config(AutoConfig.from_pretrained(\"bigscience/bloom-560m\"))\n\n model = replace_with_bnb_linear(\n model, modules_to_not_convert=[\"lm_head\"], quantization_config=quantization_config\n )\n\n # test with str device map\n model = load_checkpoint_and_dispatch(\n model,\n checkpoint=model_path,\n device_map={\"\": torch.device(\"cuda:0\")},\n )\n\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8)\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)\n\n with init_empty_weights():\n model = AutoModel.from_config(AutoConfig.from_pretrained(\"bigscience/bloom-560m\"))\n\n model = replace_with_bnb_linear(\n model, modules_to_not_convert=[\"lm_head\"], quantization_config=quantization_config\n )\n\n # test with torch.device device map\n model = load_checkpoint_and_dispatch(\n model,\n checkpoint=model_path,\n device_map={\"\": \"cuda:0\"},\n )\n\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.dtype == torch.uint8)\n self.assertTrue(model.h[0].self_attention.query_key_value.weight.device.index == 0)\n", "output": ["LinearWithNonPersistentBuffers", "ModelWithUnusedSubModulesForTest", "ModelWithSubmodules", "ModelForTestNonPersistentBuffers", "BiggerModelForTest", "ModuleWithUnusedSubModules", "BigModelingTester", "LinearModuleAndSubModule", "ModelForTestTiedWeights", "SubModule", "ModelForTestCopy", "ModelForTest"], "metadata": {"file_path": "accelerate-main/tests/test_big_modeling.py", "file_length": 14653, "symbol_dict": [{"symbol": "ModelForTestTiedWeights", "type": "mannual_defined_class", "byte_location": 2979, "location": 966}, {"symbol": "ModelForTest", "type": "mannual_defined_class", "byte_location": 1251, "location": 379}, {"symbol": "SubModule", "type": "mannual_defined_class", "byte_location": 15941, "location": 5273}, {"symbol": "BigModelingTester", "type": "mannual_defined_class", "byte_location": 4472, "location": 1509}, {"symbol": "ModelWithSubmodules", "type": "mannual_defined_class", "byte_location": 17070, "location": 5594}, {"symbol": "LinearModuleAndSubModule", "type": "mannual_defined_class", "byte_location": 20264, "location": 6674}, {"symbol": "ModelForTestCopy", "type": "mannual_defined_class", "byte_location": 2643, "location": 840}, {"symbol": "ModelWithUnusedSubModulesForTest", "type": "mannual_defined_class", "byte_location": 3985, "location": 1336}, {"symbol": "ModelForTestNonPersistentBuffers", "type": "mannual_defined_class", "byte_location": 2288, "location": 716}, {"symbol": "ModuleWithUnusedSubModules", "type": "mannual_defined_class", "byte_location": 3721, "location": 1247}, {"symbol": "BiggerModelForTest", "type": "mannual_defined_class", "byte_location": 3283, "location": 1080}, {"symbol": "LinearWithNonPersistentBuffers", "type": "mannual_defined_class", "byte_location": 1544, "location": 489}]}} {"input": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom contextlib import suppress\nfrom typing import Callable, List, Optional, Union\n\nimport torch\nfrom torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler\n\nfrom .logging import get_logger\nfrom .state import AcceleratorState, DistributedType, GradientState, is_tpu_available\nfrom .utils import (\n RNGType,\n broadcast,\n broadcast_object_list,\n concatenate,\n find_batch_size,\n get_data_structure,\n initialize_tensors,\n is_torch_version,\n send_to_device,\n slice_tensors,\n synchronize_rng_states,\n)\n\n\nlogger = get_logger(__name__)\n\n# kwargs of the DataLoader in min version 1.4.0.\n_PYTORCH_DATALOADER_KWARGS = {\n \"batch_size\": 1,\n \"shuffle\": False,\n \"sampler\": None,\n \"batch_sampler\": None,\n \"num_workers\": 0,\n \"collate_fn\": None,\n \"pin_memory\": False,\n \"drop_last\": False,\n \"timeout\": 0,\n \"worker_init_fn\": None,\n \"multiprocessing_context\": None,\n \"generator\": None,\n \"prefetch_factor\": 2,\n \"persistent_workers\": False,\n}\n\n# kwargs added after by version\n_PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {}\n\nfor v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():\n if is_torch_version(\">=\", v):\n _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)\n\n\nclass SeedableRandomSampler(RandomSampler):\n \"\"\"\n Same as a random sampler, except that in `__iter__` a seed can be used.\n\n Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed\n and be fully reproducable on multiple iterations.\n\n If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on\n (stored in `self.epoch`).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.epoch = 0\n self.seed = torch.random.initial_seed()\n\n def __iter__(self):\n if self.generator is None:\n self.generator = torch.Generator()\n else:\n self.seed = self.generator.initial_seed()\n # Allow `self.epoch` to modify the seed of the generator\n seed = self.epoch + self.seed\n self.generator.manual_seed(seed)\n yield from super().__iter__()\n self.set_epoch(self.epoch + 1)\n\n def set_epoch(self, epoch: int):\n \"Sets the current iteration of the sampler.\"\n self.epoch = epoch\n\n\nclass BatchSamplerShard(BatchSampler):\n \"\"\"\n Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will\n always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.\n Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration\n at the first batch that would be too small / not present on all processes or loop with indices from the beginning.\n\n Args:\n batch_sampler (`torch.utils.data.sampler.BatchSampler`):\n The batch sampler to split in several shards.\n num_processes (`int`, *optional*, defaults to 1):\n The number of processes running concurrently.\n process_index (`int`, *optional*, defaults to 0):\n The index of the current process.\n split_batches (`bool`, *optional*, defaults to `False`):\n Whether the shards should be created by splitting a batch to give a piece of it on each process, or by\n yielding different full batches on each process.\n\n On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:\n\n - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if\n this argument is set to `False`.\n - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`\n then `[6, 7]` if this argument is set to `True`.\n even_batches (`bool`, *optional*, defaults to `True`):\n Whether or not to loop back at the beginning of the sampler when the number of samples is not a round\n multiple of (original batch size / number of processes).\n\n \n\n `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`\n equal to `False`\n\n \"\"\"\n\n def __init__(\n self,\n batch_sampler: BatchSampler,\n num_processes: int = 1,\n process_index: int = 0,\n split_batches: bool = False,\n even_batches: bool = True,\n ):\n if split_batches and batch_sampler.batch_size % num_processes != 0:\n raise ValueError(\n f\"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) \"\n f\"needs to be a round multiple of the number of processes ({num_processes}).\"\n )\n self.batch_sampler = batch_sampler\n self.num_processes = num_processes\n self.process_index = process_index\n self.split_batches = split_batches\n self.even_batches = even_batches\n self.batch_size = getattr(batch_sampler, \"batch_size\", None)\n self.drop_last = getattr(batch_sampler, \"drop_last\", False)\n if self.batch_size is None and self.even_batches:\n raise ValueError(\n \"You need to use `even_batches=False` when the batch sampler has no batch size. If you \"\n \"are not calling this method directly, set `accelerator.even_batches=False` instead.\"\n )\n\n @property\n def total_length(self):\n return len(self.batch_sampler)\n\n def __len__(self):\n if self.split_batches:\n # Split batches does not change the length of the batch sampler\n return len(self.batch_sampler)\n if len(self.batch_sampler) % self.num_processes == 0:\n # If the length is a round multiple of the number of processes, it's easy.\n return len(self.batch_sampler) // self.num_processes\n length = len(self.batch_sampler) // self.num_processes\n if self.drop_last:\n # Same if we drop the remainder.\n return length\n elif self.even_batches:\n # When we even batches we always get +1\n return length + 1\n else:\n # Otherwise it depends on the process index.\n return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length\n\n def __iter__(self):\n return self._iter_with_split() if self.split_batches else self._iter_with_no_split()\n\n def _iter_with_split(self):\n initial_data = []\n batch_length = self.batch_sampler.batch_size // self.num_processes\n for idx, batch in enumerate(self.batch_sampler):\n if idx == 0:\n initial_data = batch\n if len(batch) == self.batch_size:\n # If the batch is full, we yield the part of it this process is responsible of.\n yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]\n\n # If drop_last is True of the last batch was full, iteration is over, otherwise...\n if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:\n if not self.even_batches:\n if len(batch) > batch_length * self.process_index:\n yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]\n else:\n # For degenerate cases where the dataset has less than num_process * batch_size samples\n while len(initial_data) < self.batch_size:\n initial_data += initial_data\n batch = batch + initial_data\n yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]\n\n def _iter_with_no_split(self):\n initial_data = []\n batch_to_yield = []\n for idx, batch in enumerate(self.batch_sampler):\n # We gather the initial indices in case we need to circle back at the end.\n if not self.drop_last and idx < self.num_processes:\n initial_data += batch\n # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually\n # yielding it.\n if idx % self.num_processes == self.process_index:\n batch_to_yield = batch\n if idx % self.num_processes == self.num_processes - 1 and (\n self.batch_size is None or len(batch) == self.batch_size\n ):\n yield batch_to_yield\n batch_to_yield = []\n\n # If drop_last is True, iteration is over, otherwise...\n if not self.drop_last and len(initial_data) > 0:\n if not self.even_batches:\n if len(batch_to_yield) > 0:\n yield batch_to_yield\n else:\n # ... we yield the complete batch we had saved before if it has the proper length\n if len(batch_to_yield) == self.batch_size:\n yield batch_to_yield\n\n # For degenerate cases where the dataset has less than num_process * batch_size samples\n while len(initial_data) < self.num_processes * self.batch_size:\n initial_data += initial_data\n\n # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next\n if len(batch) == self.batch_size:\n batch = []\n idx += 1\n\n # Make sure we yield a multiple of self.num_processes batches\n cycle_index = 0\n while idx % self.num_processes != 0 or len(batch) > 0:\n end_index = cycle_index + self.batch_size - len(batch)\n batch += initial_data[cycle_index:end_index]\n if idx % self.num_processes == self.process_index:\n yield batch\n cycle_index = end_index\n batch = []\n idx += 1\n\n\nclass IterableDatasetShard(IterableDataset):\n \"\"\"\n Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will\n always yield a number of samples that is a round multiple of the actual batch size (depending of the value of\n `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the\n `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would\n be too small or loop with indices from the beginning.\n\n Args:\n dataset (`torch.utils.data.dataset.IterableDataset`):\n The batch sampler to split in several shards.\n batch_size (`int`, *optional*, defaults to 1):\n The size of the batches per shard (if `split_batches=False`) or the size of the batches (if\n `split_batches=True`).\n drop_last (`bool`, *optional*, defaults to `False`):\n Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the\n beginning.\n num_processes (`int`, *optional*, defaults to 1):\n The number of processes running concurrently.\n process_index (`int`, *optional*, defaults to 0):\n The index of the current process.\n split_batches (`bool`, *optional*, defaults to `False`):\n Whether the shards should be created by splitting a batch to give a piece of it on each process, or by\n yielding different full batches on each process.\n\n On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:\n\n - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this\n argument is set to `False`.\n - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if\n this argument is set to `True`.\n \"\"\"\n\n def __init__(\n self,\n dataset: IterableDataset,\n batch_size: int = 1,\n drop_last: bool = False,\n num_processes: int = 1,\n process_index: int = 0,\n split_batches: bool = False,\n ):\n if split_batches and batch_size > 1 and batch_size % num_processes != 0:\n raise ValueError(\n f\"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) \"\n f\"needs to be a round multiple of the number of processes ({num_processes}).\"\n )\n self.dataset = dataset\n self.batch_size = batch_size\n self.drop_last = drop_last\n self.num_processes = num_processes\n self.process_index = process_index\n self.split_batches = split_batches\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n if hasattr(self.dataset, \"set_epoch\"):\n self.dataset.set_epoch(epoch)\n\n def __len__(self):\n # We will just raise the downstream error if the underlying dataset is not sized\n if self.drop_last:\n return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size\n else:\n return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size\n\n def __iter__(self):\n if (\n not hasattr(self.dataset, \"set_epoch\")\n and hasattr(self.dataset, \"generator\")\n and isinstance(self.dataset.generator, torch.Generator)\n ):\n self.dataset.generator.manual_seed(self.epoch)\n real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)\n process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size\n process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)\n\n first_batch = None\n current_batch = []\n for element in self.dataset:\n current_batch.append(element)\n # Wait to have a full batch before yielding elements.\n if len(current_batch) == real_batch_size:\n for i in process_slice:\n yield current_batch[i]\n if first_batch is None:\n first_batch = current_batch.copy()\n current_batch = []\n\n # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.\n if not self.drop_last and len(current_batch) > 0:\n if first_batch is None:\n first_batch = current_batch.copy()\n while len(current_batch) < real_batch_size:\n current_batch += first_batch\n for i in process_slice:\n yield current_batch[i]\n\n\nclass DataLoaderStateMixin:\n \"\"\"\n Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the\n end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other\n useful information that might be needed.\n\n **Available attributes:**\n\n - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch\n - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total\n batch size\n\n \"\"\"\n\n def __init_subclass__(cls, **kwargs):\n cls.end_of_dataloader = False\n cls.remainder = -1\n\n def reset(self):\n self.end_of_dataloader = False\n self.remainder = -1\n\n def begin(self):\n \"Prepares the gradient state for the current dataloader\"\n self.reset()\n with suppress(Exception):\n if not self._drop_last:\n length = getattr(self.dataset, \"total_dataset_length\", len(self.dataset))\n self.remainder = length % self.total_batch_size\n self.gradient_state._add_dataloader(self)\n\n def end(self):\n \"Cleans up the gradient state after exiting the dataloader\"\n self.gradient_state._remove_dataloader(self)\n\n\nclass DataLoaderShard(DataLoader, DataLoaderStateMixin):\n \"\"\"\n Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.\n\n Args:\n dataset (`torch.utils.data.dataset.Dataset`):\n The dataset to use to build this datalaoder.\n device (`torch.device`, *optional*):\n If passed, the device to put all batches on.\n rng_types (list of `str` or [`~utils.RNGType`]):\n The list of random number generators to synchronize at the beginning of each iteration. Should be one or\n several of:\n\n - `\"torch\"`: the base torch random number generator\n - `\"cuda\"`: the CUDA random number generator (GPU only)\n - `\"xla\"`: the XLA random number generator (TPU only)\n - `\"generator\"`: an optional `torch.Generator`\n synchronized_generator (`torch.Generator`, *optional*):\n A random number generator to keep synchronized across processes.\n skip_batches (`int`, *optional*, defaults to 0):\n The number of batches to skip at the beginning.\n kwargs:\n All other keyword arguments to pass to the regular `DataLoader` initialization.\n\n **Available attributes:**\n\n - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.\n Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total\n number of processes\n\n - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n device=None,\n rng_types=None,\n synchronized_generator=None,\n skip_batches=0,\n _drop_last: bool = False,\n **kwargs,\n ):\n super().__init__(dataset, **kwargs)\n self.device = device\n self.rng_types = rng_types\n self.synchronized_generator = synchronized_generator\n self.skip_batches = skip_batches\n self.gradient_state = GradientState()\n self._drop_last = _drop_last\n self.iteration = 0\n\n def __iter__(self):\n if self.rng_types is not None:\n synchronize_rng_states(self.rng_types, self.synchronized_generator)\n self.begin()\n\n self.set_epoch(self.iteration)\n dataloader_iter = super().__iter__()\n # We iterate one batch ahead to check when we are at the end\n try:\n current_batch = next(dataloader_iter)\n except StopIteration:\n yield\n\n batch_index = 0\n while True:\n try:\n # But we still move it to the device so it is done before `StopIteration` is reached\n if self.device is not None:\n current_batch = send_to_device(current_batch, self.device)\n next_batch = next(dataloader_iter)\n if batch_index >= self.skip_batches:\n yield current_batch\n batch_index += 1\n current_batch = next_batch\n except StopIteration:\n self.end_of_dataloader = True\n if batch_index >= self.skip_batches:\n yield current_batch\n break\n\n self.iteration += 1\n self.end()\n\n def set_epoch(self, epoch: int):\n # In case it is manually passed in, the user can set it to what they like\n if self.iteration != epoch:\n self.iteration = epoch\n if hasattr(self.batch_sampler, \"sampler\") and hasattr(self.batch_sampler.sampler, \"set_epoch\"):\n self.batch_sampler.sampler.set_epoch(epoch)\n # We support if a custom `Dataset` implementation has `set_epoch`\n # or in general HF datasets `Datasets`\n elif hasattr(self.dataset, \"set_epoch\"):\n self.dataset.set_epoch(epoch)\n\n @property\n def total_batch_size(self):\n batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler\n return (\n batch_sampler.batch_size\n if getattr(batch_sampler, \"split_batches\", False)\n else (batch_sampler.batch_size * getattr(batch_sampler, \"num_processes\", 1))\n )\n\n @property\n def total_dataset_length(self):\n if hasattr(self.dataset, \"total_length\"):\n return self.dataset.total_length\n else:\n return len(self.dataset)\n\n\nif is_tpu_available(check_device=False):\n import torch_xla.distributed.parallel_loader as xpl\n\n class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):\n \"\"\"\n Wrapper for the xpl.MpDeviceLoader class that knows the total batch size.\n\n XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to\n prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main\n thread only.\n\n **Available attributes:**\n\n - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.\n Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total\n number of processes\n\n - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\n \"\"\"\n\n def __init__(self, dataloader: DataLoaderShard, device: torch.device):\n super().__init__(dataloader, device)\n self._rng_types = self._loader.rng_types\n self._loader.rng_types = None\n\n def __iter__(self):\n if self._rng_types is not None:\n synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)\n\n return super().__iter__()\n\n @property\n def total_batch_size(self):\n return self._loader.total_batch_size\n\n @property\n def total_dataset_length(self):\n return self._loader.total_dataset_length\n\n @property\n def batch_sampler(self):\n return self._loader.batch_sampler\n\n\nclass DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):\n \"\"\"\n Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each\n process their part of the batch.\n\n Args:\n split_batches (`bool`, *optional*, defaults to `False`):\n Whether the resulting `DataLoader` should split the batches of the original data loader across devices or\n yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of\n `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be\n the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial\n `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch\n size of the `dataloader` is a round multiple of `batch_size`.\n skip_batches (`int`, *optional*, defaults to 0):\n The number of batches to skip at the beginning of an iteration.\n\n **Available attributes:**\n\n - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.\n Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total\n number of processes\n\n - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.\n \"\"\"\n\n def __init__(\n self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs\n ):\n shuffle = False\n if is_torch_version(\">=\", \"1.11.0\"):\n from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe\n\n # We need to save the shuffling state of the DataPipe\n if isinstance(dataset, ShufflerIterDataPipe):\n shuffle = dataset._shuffle_enabled\n super().__init__(dataset, **kwargs)\n self.split_batches = split_batches\n if shuffle:\n torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)\n\n self.gradient_state = GradientState()\n self.state = AcceleratorState()\n self._drop_last = _drop_last\n self.skip_batches = skip_batches\n\n self.slice_fn = slice_tensors if slice_fn is None else slice_fn\n self.iteration = 0\n\n def _fetch_batches(self, iterator):\n batches, batch = None, None\n # On process 0, we gather the batch to dispatch.\n if self.state.process_index == 0:\n try:\n if self.split_batches:\n # One batch of the main iterator is dispatched and split.\n batch = next(iterator)\n else:\n # num_processes batches of the main iterator are concatenated then dispatched and split.\n # We add the batches one by one so we have the remainder available when drop_last=False.\n batches = []\n for _ in range(self.state.num_processes):\n batches.append(next(iterator))\n try:\n batch = concatenate(batches, dim=0)\n except RuntimeError as e:\n raise RuntimeError(\n \"You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`.\"\n \"either pass `dispatch_batches=False` and have each process fetch its own batch \"\n \" or pass `split_batches=True`. By doing so, the main process will fetch a full batch and \"\n \"slice it into `num_processes` batches for each process.\"\n ) from e\n # In both cases, we need to get the structure of the batch that we will broadcast on other\n # processes to initialize the tensors with the right shape.\n # data_structure, stop_iteration\n batch_info = [get_data_structure(batch), False]\n except StopIteration:\n batch_info = [None, True]\n else:\n batch_info = [None, self._stop_iteration]\n # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.\n broadcast_object_list(batch_info)\n self._stop_iteration = batch_info[1]\n if self._stop_iteration:\n # If drop_last is False and split_batches is False, we may have a remainder to take care of.\n if not self.split_batches and not self._drop_last:\n if self.state.process_index == 0 and len(batches) > 0:\n batch = concatenate(batches, dim=0)\n batch_info = [get_data_structure(batch), False]\n else:\n batch_info = [None, True]\n broadcast_object_list(batch_info)\n return batch, batch_info\n\n def __iter__(self):\n self.begin()\n self.set_epoch(self.iteration)\n main_iterator = None\n if is_torch_version(\">=\", \"2.0.1\"):\n # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts\n # shared seed to all dist processes. Thus, we need to create iterator for all dist processes.\n # But, we only iterate through the DataLoader on process 0.\n main_iterator = super().__iter__()\n elif self.state.process_index == 0:\n main_iterator = super().__iter__()\n stop_iteration = False\n self._stop_iteration = False\n first_batch = None\n next_batch, next_batch_info = self._fetch_batches(main_iterator)\n batch_index = 0\n while not stop_iteration:\n batch, batch_info = next_batch, next_batch_info\n\n if self.state.process_index != 0:\n # Initialize tensors on other processes than process 0.\n batch = initialize_tensors(batch_info[0])\n batch = send_to_device(batch, self.state.device)\n # Broadcast the batch before splitting it.\n batch = broadcast(batch, from_process=0)\n\n if not self._drop_last and first_batch is None:\n # We keep at least num processes elements of the first batch to be able to complete the last batch\n first_batch = self.slice_fn(\n batch,\n slice(0, self.state.num_processes),\n process_index=self.state.process_index,\n num_processes=self.state.num_processes,\n )\n\n if batch is None:\n raise ValueError(\n f\"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration.\"\n )\n\n observed_batch_size = find_batch_size(batch)\n batch_size = observed_batch_size // self.state.num_processes\n\n stop_iteration = self._stop_iteration\n if not stop_iteration:\n # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in\n # the dataloader since the number of batches is a round multiple of the number of processes.\n next_batch, next_batch_info = self._fetch_batches(main_iterator)\n # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.\n if self._stop_iteration and next_batch_info[0] is None:\n stop_iteration = True\n\n if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:\n # If the last batch is not complete, let's add the first batch to it.\n batch = concatenate([batch, first_batch], dim=0)\n # Batch size computation above is wrong, it's off by 1 so we fix it.\n batch_size += 1\n\n data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)\n batch = self.slice_fn(\n batch,\n data_slice,\n process_index=self.state.process_index,\n num_processes=self.state.num_processes,\n )\n\n if stop_iteration:\n self.end_of_dataloader = True\n self.remainder = observed_batch_size\n if batch_index >= self.skip_batches:\n yield batch\n batch_index += 1\n self.iteration += 1\n self.end()\n\n def set_epoch(self, epoch: int):\n # In case it is manually passed in, the user can set it to what they like\n if self.iteration != epoch:\n self.iteration = epoch\n if hasattr(self.batch_sampler.sampler, \"set_epoch\"):\n self.batch_sampler.sampler.set_epoch(epoch)\n elif hasattr(self.dataset, \"set_epoch\"):\n self.dataset.set_epoch(epoch)\n\n def __len__(self):\n whole_length = super().__len__()\n if self.split_batches:\n return whole_length\n elif self._drop_last:\n return whole_length // self.state.num_processes\n else:\n return math.ceil(whole_length / self.state.num_processes)\n\n @property\n def total_batch_size(self):\n return (\n self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)\n )\n\n @property\n def total_dataset_length(self):\n return len(self.dataset)\n\n\ndef prepare_data_loader(\n dataloader: DataLoader,\n device: Optional[torch.device] = None,\n num_processes: Optional[int] = None,\n process_index: Optional[int] = None,\n split_batches: bool = False,\n put_on_device: bool = False,\n rng_types: Optional[List[Union[str, RNGType]]] = None,\n dispatch_batches: Optional[bool] = None,\n even_batches: bool = True,\n slice_fn_for_dispatch: Optional[Callable] = None,\n use_seedable_sampler: bool = False,\n) -> DataLoader:\n \"\"\"\n Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.\n\n Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration\n at the first batch that would be too small / not present on all processes or loop with indices from the beginning.\n\n Args:\n dataloader (`torch.utils.data.dataloader.DataLoader`):\n The data loader to split across several devices.\n device (`torch.device`):\n The target device for the returned `DataLoader`.\n num_processes (`int`, *optional*):\n The number of processes running concurrently. Will default to the value given by\n [`~state.AcceleratorState`].\n process_index (`int`, *optional*):\n The index of the current process. Will default to the value given by [`~state.AcceleratorState`].\n split_batches (`bool`, *optional*, defaults to `False`):\n Whether the resulting `DataLoader` should split the batches of the original data loader across devices or\n yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of\n `num_processes` batches at each iteration).\n\n Another way to see this is that the observed batch size will be the same as the initial `dataloader` if\n this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`\n otherwise.\n\n Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of\n `batch_size`.\n put_on_device (`bool`, *optional*, defaults to `False`):\n Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or\n dictionaries of tensors).\n rng_types (list of `str` or [`~utils.RNGType`]):\n The list of random number generators to synchronize at the beginning of each iteration. Should be one or\n several of:\n\n - `\"torch\"`: the base torch random number generator\n - `\"cuda\"`: the CUDA random number generator (GPU only)\n - `\"xla\"`: the XLA random number generator (TPU only)\n - `\"generator\"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your\n dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.\n\n dispatch_batches (`bool`, *optional*):\n If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches\n are split and broadcast to each process. Will default to `True` when the underlying dataset is an\n `IterableDataset`, `False` otherwise.\n even_batches (`bool`, *optional*, defaults to `True`):\n If set to `True`, in cases where the total batch size across all processes does not exactly divide the\n dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\n all workers.\n slice_fn_for_dispatch (`Callable`, *optional*`):\n If passed, this function will be used to slice tensors across `num_processes`. Will default to\n [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be\n ignored otherwise.\n use_seedable_sampler (`bool`, *optional*, defaults to `False`):\n Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better\n reproducability. Comes at a cost of potentially different performances due to different shuffling\n algorithms but ensures results will be the *exact* same.\n\n Returns:\n `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches\n\n \n\n `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`\n equal to `False`\n\n \n \"\"\"\n if dispatch_batches is None:\n if not put_on_device:\n dispatch_batches = False\n else:\n dispatch_batches = isinstance(dataloader.dataset, IterableDataset)\n\n if dispatch_batches and not put_on_device:\n raise ValueError(\"Using `dispatch_batches=True` requires `put_on_device=True`.\")\n # Grab defaults from AcceleratorState\n state = AcceleratorState()\n if num_processes is None:\n num_processes = state.num_processes\n if process_index is None:\n process_index = state.process_index\n\n # Sanity check\n if split_batches:\n if dataloader.batch_size is not None:\n batch_size_for_check = dataloader.batch_size\n else:\n # For custom batch_sampler\n if hasattr(dataloader.batch_sampler, \"batch_size\"):\n batch_size_for_check = dataloader.batch_sampler.batch_size\n else:\n raise ValueError(\n \"In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed \"\n \"`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. \"\n \"Your `dataloader.batch_size` is None and `dataloader.batch_sampler` \"\n f\"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set.\"\n )\n\n if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0:\n raise ValueError(\n f\"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) \"\n f\"needs to be a round multiple of the number of processes ({num_processes}).\"\n )\n\n new_dataset = dataloader.dataset\n # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it\n new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None\n sampler_is_batch_sampler = False\n synchronized_generator = None\n sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)\n if sampler_is_batch_sampler:\n sampler = getattr(dataloader.sampler, \"sampler\", None)\n else:\n sampler = getattr(dataloader.batch_sampler, \"sampler\", None)\n if isinstance(sampler, RandomSampler) and use_seedable_sampler:\n # When iterating through the dataloader during distributed processes\n # we want to ensure that on each process we are iterating through the same\n # samples in the same order if a seed is set. This requires a tweak\n # to the `torch.utils.data.RandomSampler` class (if used).\n sampler = SeedableRandomSampler(\n data_source=sampler.data_source,\n replacement=sampler.replacement,\n num_samples=sampler._num_samples,\n generator=getattr(sampler, \"generator\", torch.Generator()),\n )\n\n # No change if no multiprocess\n if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:\n if isinstance(new_dataset, IterableDataset):\n if getattr(dataloader.dataset, \"generator\", None) is not None:\n synchronized_generator = dataloader.dataset.generator\n new_dataset = IterableDatasetShard(\n new_dataset,\n batch_size=dataloader.batch_size,\n drop_last=dataloader.drop_last,\n num_processes=num_processes,\n process_index=process_index,\n split_batches=split_batches,\n )\n else:\n batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler\n new_batch_sampler = BatchSamplerShard(\n batch_sampler,\n num_processes=num_processes,\n process_index=process_index,\n split_batches=split_batches,\n even_batches=even_batches,\n )\n\n # We ignore all of those since they are all dealt with by our new_batch_sampler\n ignore_kwargs = [\n \"batch_size\",\n \"shuffle\",\n \"sampler\",\n \"batch_sampler\",\n \"drop_last\",\n ]\n\n if rng_types is not None and synchronized_generator is None and \"generator\" in rng_types:\n rng_types.remove(\"generator\")\n\n kwargs = {\n k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])\n for k in _PYTORCH_DATALOADER_KWARGS\n if k not in ignore_kwargs\n }\n\n # Need to provide batch_size as batch_sampler is None for Iterable dataset\n if new_batch_sampler is None:\n kwargs[\"drop_last\"] = dataloader.drop_last\n kwargs[\"batch_size\"] = (\n dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size\n )\n if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:\n if sampler_is_batch_sampler:\n dataloader.sampler.sampler = sampler\n else:\n dataloader.batch_sampler.sampler = sampler\n if dispatch_batches:\n kwargs.pop(\"generator\")\n dataloader = DataLoaderDispatcher(\n new_dataset,\n split_batches=split_batches,\n batch_sampler=new_batch_sampler,\n _drop_last=dataloader.drop_last,\n slice_fn=slice_fn_for_dispatch,\n **kwargs,\n )\n elif sampler_is_batch_sampler:\n dataloader = DataLoaderShard(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n sampler=new_batch_sampler,\n batch_size=dataloader.batch_size,\n rng_types=rng_types,\n _drop_last=dataloader.drop_last,\n synchronized_generator=synchronized_generator,\n **kwargs,\n )\n else:\n dataloader = DataLoaderShard(\n new_dataset,\n device=device if put_on_device and state.distributed_type != DistributedType.TPU else None,\n batch_sampler=new_batch_sampler,\n rng_types=rng_types,\n synchronized_generator=synchronized_generator,\n _drop_last=dataloader.drop_last,\n **kwargs,\n )\n\n if state.distributed_type == DistributedType.TPU:\n return MpDeviceLoaderWrapper(dataloader, device)\n return dataloader\n\n\nclass SkipBatchSampler(BatchSampler):\n \"\"\"\n A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.\n \"\"\"\n\n def __init__(self, batch_sampler, skip_batches=0):\n self.batch_sampler = batch_sampler\n self.skip_batches = skip_batches\n\n def __iter__(self):\n for index, samples in enumerate(self.batch_sampler):\n if index >= self.skip_batches:\n yield samples\n\n @property\n def total_length(self):\n return len(self.batch_sampler)\n\n def __len__(self):\n return len(self.batch_sampler) - self.skip_batches\n\n\nclass SkipDataLoader(DataLoader):\n \"\"\"\n Subclass of a PyTorch `DataLoader` that will skip the first batches.\n\n Args:\n dataset (`torch.utils.data.dataset.Dataset`):\n The dataset to use to build this datalaoder.\n skip_batches (`int`, *optional*, defaults to 0):\n The number of batches to skip at the beginning.\n kwargs:\n All other keyword arguments to pass to the regular `DataLoader` initialization.\n \"\"\"\n\n def __init__(self, dataset, skip_batches=0, **kwargs):\n super().__init__(dataset, **kwargs)\n self.skip_batches = skip_batches\n\n def __iter__(self):\n for index, batch in enumerate(super().__iter__()):\n if index >= self.skip_batches:\n yield batch\n\n\ndef skip_first_batches(dataloader, num_batches=0):\n \"\"\"\n Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.\n \"\"\"\n dataset = dataloader.dataset\n sampler_is_batch_sampler = False\n if isinstance(dataset, IterableDataset):\n new_batch_sampler = None\n else:\n sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)\n batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler\n new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)\n\n # We ignore all of those since they are all dealt with by our new_batch_sampler\n ignore_kwargs = [\n \"batch_size\",\n \"shuffle\",\n \"sampler\",\n \"batch_sampler\",\n \"drop_last\",\n ]\n\n kwargs = {\n k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])\n for k in _PYTORCH_DATALOADER_KWARGS\n if k not in ignore_kwargs\n }\n\n # Need to provide batch_size as batch_sampler is None for Iterable dataset\n if new_batch_sampler is None:\n kwargs[\"drop_last\"] = dataloader.drop_last\n kwargs[\"batch_size\"] = dataloader.batch_size\n\n if isinstance(dataloader, DataLoaderDispatcher):\n if new_batch_sampler is None:\n # Need to manually skip batches in the dataloader\n kwargs[\"skip_batches\"] = num_batches\n dataloader = DataLoaderDispatcher(\n dataset,\n split_batches=dataloader.split_batches,\n batch_sampler=new_batch_sampler,\n _drop_last=dataloader._drop_last,\n **kwargs,\n )\n elif isinstance(dataloader, DataLoaderShard):\n if new_batch_sampler is None:\n # Need to manually skip batches in the dataloader\n kwargs[\"skip_batches\"] = num_batches\n elif sampler_is_batch_sampler:\n kwargs[\"sampler\"] = new_batch_sampler\n kwargs[\"batch_size\"] = dataloader.batch_size\n else:\n kwargs[\"batch_sampler\"] = new_batch_sampler\n dataloader = DataLoaderShard(\n dataset,\n device=dataloader.device,\n rng_types=dataloader.rng_types,\n synchronized_generator=dataloader.synchronized_generator,\n **kwargs,\n )\n else:\n if new_batch_sampler is None:\n # Need to manually skip batches in the dataloader\n dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)\n else:\n dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)\n\n return dataloader\n", "output": ["prepare_data_loader", "skip_first_batches", "DataLoaderShard", "DataLoaderDispatcher", "SkipBatchSampler", "SeedableRandomSampler", "BatchSamplerShard", "SkipDataLoader", "MpDeviceLoaderWrapper", "DataLoaderStateMixin", "IterableDatasetShard"], "metadata": {"file_path": "accelerate-main/src/accelerate/data_loader.py", "file_length": 13313, "symbol_dict": [{"symbol": "prepare_data_loader", "type": "mannual_defined_function", "byte_location": 32639, "location": 9018}, {"symbol": "skip_first_batches", "type": "mannual_defined_function", "byte_location": 45018, "location": 12500}, {"symbol": "SkipDataLoader", "type": "mannual_defined_class", "byte_location": 44246, "location": 12286}, {"symbol": "BatchSamplerShard", "type": "mannual_defined_class", "byte_location": 3015, "location": 970}, {"symbol": "MpDeviceLoaderWrapper", "type": "mannual_defined_class", "byte_location": 21487, "location": 6081}, {"symbol": "DataLoaderStateMixin", "type": "mannual_defined_class", "byte_location": 15637, "location": 4452}, {"symbol": "DataLoaderDispatcher", "type": "mannual_defined_class", "byte_location": 23028, "location": 6489}, {"symbol": "IterableDatasetShard", "type": "mannual_defined_class", "byte_location": 10804, "location": 3096}, {"symbol": "SkipBatchSampler", "type": "mannual_defined_class", "byte_location": 43611, "location": 12085}, {"symbol": "SeedableRandomSampler", "type": "mannual_defined_class", "byte_location": 1889, "location": 646}, {"symbol": "DataLoaderShard", "type": "mannual_defined_class", "byte_location": 16934, "location": 4820}]}} {"input": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Expectation:\n# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}\n\nimport json\nimport os\nimport time\nfrom functools import wraps\nfrom typing import Any, Dict, List, Optional, Union\n\nimport yaml\n\nfrom .logging import get_logger\nfrom .state import PartialState\nfrom .utils import (\n LoggerType,\n is_aim_available,\n is_clearml_available,\n is_comet_ml_available,\n is_dvclive_available,\n is_mlflow_available,\n is_tensorboard_available,\n is_wandb_available,\n listify,\n)\n\n\n_available_trackers = []\n\nif is_tensorboard_available():\n _available_trackers.append(LoggerType.TENSORBOARD)\n\nif is_wandb_available():\n _available_trackers.append(LoggerType.WANDB)\n\nif is_comet_ml_available():\n _available_trackers.append(LoggerType.COMETML)\n\nif is_aim_available():\n _available_trackers.append(LoggerType.AIM)\n\nif is_mlflow_available():\n _available_trackers.append(LoggerType.MLFLOW)\n\nif is_clearml_available():\n _available_trackers.append(LoggerType.CLEARML)\n\nif is_dvclive_available():\n _available_trackers.append(LoggerType.DVCLIVE)\n\nlogger = get_logger(__name__)\n\n\ndef on_main_process(function):\n \"\"\"\n Decorator to selectively run the decorated function on the main process only based on the `main_process_only`\n attribute in a class.\n\n Checks at function execution rather than initialization time, not triggering the initialization of the\n `PartialState`.\n \"\"\"\n\n @wraps(function)\n def execute_on_main_process(self, *args, **kwargs):\n if getattr(self, \"main_process_only\", False):\n return PartialState().on_main_process(function)(self, *args, **kwargs)\n else:\n return function(self, *args, **kwargs)\n\n return execute_on_main_process\n\n\ndef get_available_trackers():\n \"Returns a list of all supported available trackers in the system\"\n return _available_trackers\n\n\nclass GeneralTracker:\n \"\"\"\n A base Tracker class to be used for all logging integration implementations.\n\n Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to\n [`Accelerator`].\n\n Should implement `name`, `requires_logging_directory`, and `tracker` properties such that:\n\n `name` (`str`): String representation of the tracker class name, such as \"TensorBoard\" `requires_logging_directory`\n (`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal\n tracking mechanism used by a tracker class (such as the `run` for wandb)\n\n Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and\n other functions should occur on the main process or across all processes (by default will use `True`)\n \"\"\"\n\n main_process_only = True\n\n def __init__(self, _blank=False):\n if not _blank:\n err = \"\"\n if not hasattr(self, \"name\"):\n err += \"`name`\"\n if not hasattr(self, \"requires_logging_directory\"):\n if len(err) > 0:\n err += \", \"\n err += \"`requires_logging_directory`\"\n\n # as tracker is a @property that relies on post-init\n if \"tracker\" not in dir(self):\n if len(err) > 0:\n err += \", \"\n err += \"`tracker`\"\n if len(err) > 0:\n raise NotImplementedError(\n f\"The implementation for this tracker class is missing the following \"\n f\"required attributes. Please define them in the class definition: \"\n f\"{err}\"\n )\n\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration\n functionality of a tracking API.\n\n Args:\n values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n `str`, `float`, `int`, or `None`.\n \"\"\"\n pass\n\n def log(self, values: dict, step: Optional[int], **kwargs):\n \"\"\"\n Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with\n special behavior for the `step parameter.\n\n Args:\n values (Dictionary `str` to `str`, `float`, or `int`):\n Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n \"\"\"\n pass\n\n def finish(self):\n \"\"\"\n Should run any finalizing functions within the tracking API. If the API should not have one, just don't\n overwrite that method.\n \"\"\"\n pass\n\n\nclass TensorBoardTracker(GeneralTracker):\n \"\"\"\n A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.\n\n Args:\n run_name (`str`):\n The name of the experiment run\n logging_dir (`str`, `os.PathLike`):\n Location for TensorBoard logs to be stored.\n kwargs:\n Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method.\n \"\"\"\n\n name = \"tensorboard\"\n requires_logging_directory = True\n\n @on_main_process\n def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):\n try:\n from torch.utils import tensorboard\n except ModuleNotFoundError:\n import tensorboardX as tensorboard\n super().__init__()\n self.run_name = run_name\n self.logging_dir = os.path.join(logging_dir, run_name)\n self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)\n logger.debug(f\"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}\")\n logger.debug(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n\n @property\n def tracker(self):\n return self.writer\n\n @on_main_process\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the\n hyperparameters in a yaml file for future use.\n\n Args:\n values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n `str`, `float`, `int`, or `None`.\n \"\"\"\n self.writer.add_hparams(values, metric_dict={})\n self.writer.flush()\n project_run_name = time.time()\n dir_name = os.path.join(self.logging_dir, str(project_run_name))\n os.makedirs(dir_name, exist_ok=True)\n with open(os.path.join(dir_name, \"hparams.yml\"), \"w\") as outfile:\n try:\n yaml.dump(values, outfile)\n except yaml.representer.RepresenterError:\n logger.error(\"Serialization to store hyperparameters failed\")\n raise\n logger.debug(\"Stored initial configuration hyperparameters to TensorBoard and hparams yaml file\")\n\n @on_main_process\n def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `values` to the current run.\n\n Args:\n values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):\n Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of\n `str` to `float`/`int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs:\n Additional key word arguments passed along to either `SummaryWriter.add_scaler`,\n `SummaryWriter.add_text`, or `SummaryWriter.add_scalers` method based on the contents of `values`.\n \"\"\"\n values = listify(values)\n for k, v in values.items():\n if isinstance(v, (int, float)):\n self.writer.add_scalar(k, v, global_step=step, **kwargs)\n elif isinstance(v, str):\n self.writer.add_text(k, v, global_step=step, **kwargs)\n elif isinstance(v, dict):\n self.writer.add_scalars(k, v, global_step=step, **kwargs)\n self.writer.flush()\n logger.debug(\"Successfully logged to TensorBoard\")\n\n @on_main_process\n def log_images(self, values: dict, step: Optional[int], **kwargs):\n \"\"\"\n Logs `images` to the current run.\n\n Args:\n values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):\n Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs:\n Additional key word arguments passed along to the `SummaryWriter.add_image` method.\n \"\"\"\n for k, v in values.items():\n self.writer.add_images(k, v, global_step=step, **kwargs)\n logger.debug(\"Successfully logged images to TensorBoard\")\n\n @on_main_process\n def finish(self):\n \"\"\"\n Closes `TensorBoard` writer\n \"\"\"\n self.writer.close()\n logger.debug(\"TensorBoard writer closed\")\n\n\nclass WandBTracker(GeneralTracker):\n \"\"\"\n A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.\n\n Args:\n run_name (`str`):\n The name of the experiment run.\n kwargs:\n Additional key word arguments passed along to the `wandb.init` method.\n \"\"\"\n\n name = \"wandb\"\n requires_logging_directory = False\n main_process_only = False\n\n @on_main_process\n def __init__(self, run_name: str, **kwargs):\n super().__init__()\n self.run_name = run_name\n\n import wandb\n\n self.run = wandb.init(project=self.run_name, **kwargs)\n logger.debug(f\"Initialized WandB project {self.run_name}\")\n logger.debug(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n\n @property\n def tracker(self):\n return self.run\n\n @on_main_process\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n\n Args:\n values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n `str`, `float`, `int`, or `None`.\n \"\"\"\n import wandb\n\n wandb.config.update(values, allow_val_change=True)\n logger.debug(\"Stored initial configuration hyperparameters to WandB\")\n\n @on_main_process\n def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `values` to the current run.\n\n Args:\n values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):\n Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of\n `str` to `float`/`int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs:\n Additional key word arguments passed along to the `wandb.log` method.\n \"\"\"\n self.run.log(values, step=step, **kwargs)\n logger.debug(\"Successfully logged to WandB\")\n\n @on_main_process\n def log_images(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `images` to the current run.\n\n Args:\n values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):\n Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs:\n Additional key word arguments passed along to the `wandb.log` method.\n \"\"\"\n import wandb\n\n for k, v in values.items():\n self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)\n logger.debug(\"Successfully logged images to WandB\")\n\n @on_main_process\n def log_table(\n self,\n table_name: str,\n columns: List[str] = None,\n data: List[List[Any]] = None,\n dataframe: Any = None,\n step: Optional[int] = None,\n **kwargs,\n ):\n \"\"\"\n Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either\n with `columns` and `data` or with `dataframe`.\n\n Args:\n table_name (`str`):\n The name to give to the logged table on the wandb workspace\n columns (list of `str`, *optional*):\n The name of the columns on the table\n data (List of List of Any data type, *optional*):\n The data to be logged in the table\n dataframe (Any data type, *optional*):\n The data to be logged in the table\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n \"\"\"\n import wandb\n\n values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}\n self.log(values, step=step, **kwargs)\n\n @on_main_process\n def finish(self):\n \"\"\"\n Closes `wandb` writer\n \"\"\"\n self.run.finish()\n logger.debug(\"WandB run closed\")\n\n\nclass CometMLTracker(GeneralTracker):\n \"\"\"\n A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script.\n\n API keys must be stored in a Comet config file.\n\n Args:\n run_name (`str`):\n The name of the experiment run.\n kwargs:\n Additional key word arguments passed along to the `Experiment.__init__` method.\n \"\"\"\n\n name = \"comet_ml\"\n requires_logging_directory = False\n\n @on_main_process\n def __init__(self, run_name: str, **kwargs):\n super().__init__()\n self.run_name = run_name\n\n from comet_ml import Experiment\n\n self.writer = Experiment(project_name=run_name, **kwargs)\n logger.debug(f\"Initialized CometML project {self.run_name}\")\n logger.debug(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n\n @property\n def tracker(self):\n return self.writer\n\n @on_main_process\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n\n Args:\n values (Dictionary `str` to `bool`, `str`, `float` or `int`):\n Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n `str`, `float`, `int`, or `None`.\n \"\"\"\n self.writer.log_parameters(values)\n logger.debug(\"Stored initial configuration hyperparameters to CometML\")\n\n @on_main_process\n def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `values` to the current run.\n\n Args:\n values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):\n Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of\n `str` to `float`/`int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs:\n Additional key word arguments passed along to either `Experiment.log_metric`, `Experiment.log_other`,\n or `Experiment.log_metrics` method based on the contents of `values`.\n \"\"\"\n if step is not None:\n self.writer.set_step(step)\n for k, v in values.items():\n if isinstance(v, (int, float)):\n self.writer.log_metric(k, v, step=step, **kwargs)\n elif isinstance(v, str):\n self.writer.log_other(k, v, **kwargs)\n elif isinstance(v, dict):\n self.writer.log_metrics(v, step=step, **kwargs)\n logger.debug(\"Successfully logged to CometML\")\n\n @on_main_process\n def finish(self):\n \"\"\"\n Closes `comet-ml` writer\n \"\"\"\n self.writer.end()\n logger.debug(\"CometML run closed\")\n\n\nclass AimTracker(GeneralTracker):\n \"\"\"\n A `Tracker` class that supports `aim`. Should be initialized at the start of your script.\n\n Args:\n run_name (`str`):\n The name of the experiment run.\n kwargs:\n Additional key word arguments passed along to the `Run.__init__` method.\n \"\"\"\n\n name = \"aim\"\n requires_logging_directory = True\n\n @on_main_process\n def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = \".\", **kwargs):\n self.run_name = run_name\n\n from aim import Run\n\n self.writer = Run(repo=logging_dir, **kwargs)\n self.writer.name = self.run_name\n logger.debug(f\"Initialized Aim project {self.run_name}\")\n logger.debug(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n\n @property\n def tracker(self):\n return self.writer\n\n @on_main_process\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n\n Args:\n values (`dict`):\n Values to be stored as initial hyperparameters as key-value pairs.\n \"\"\"\n self.writer[\"hparams\"] = values\n\n @on_main_process\n def log(self, values: dict, step: Optional[int], **kwargs):\n \"\"\"\n Logs `values` to the current run.\n\n Args:\n values (`dict`):\n Values to be logged as key-value pairs.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs:\n Additional key word arguments passed along to the `Run.track` method.\n \"\"\"\n # Note: replace this with the dictionary support when merged\n for key, value in values.items():\n self.writer.track(value, name=key, step=step, **kwargs)\n\n @on_main_process\n def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None):\n \"\"\"\n Logs `images` to the current run.\n\n Args:\n values (`Dict[str, Union[np.ndarray, PIL.Image, Tuple[np.ndarray, str], Tuple[PIL.Image, str]]]`):\n Values to be logged as key-value pairs. The values need to have type `np.ndarray` or PIL.Image. If a\n tuple is provided, the first element should be the image and the second element should be the caption.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs (`Dict[str, dict]`):\n Additional key word arguments passed along to the `Run.Image` and `Run.track` method specified by the\n keys `aim_image` and `track`, respectively.\n \"\"\"\n import aim\n\n aim_image_kw = {}\n track_kw = {}\n\n if kwargs is not None:\n aim_image_kw = kwargs.get(\"aim_image\", {})\n track_kw = kwargs.get(\"track\", {})\n\n for key, value in values.items():\n if isinstance(value, tuple):\n img, caption = value\n else:\n img, caption = value, \"\"\n aim_image = aim.Image(img, caption=caption, **aim_image_kw)\n self.writer.track(aim_image, name=key, step=step, **track_kw)\n\n @on_main_process\n def finish(self):\n \"\"\"\n Closes `aim` writer\n \"\"\"\n self.writer.close()\n\n\nclass MLflowTracker(GeneralTracker):\n \"\"\"\n A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.\n\n Args:\n experiment_name (`str`, *optional*):\n Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.\n logging_dir (`str` or `os.PathLike`, defaults to `\".\"`):\n Location for mlflow logs to be stored.\n run_id (`str`, *optional*):\n If specified, get the run with the specified UUID and log parameters and metrics under that run. The run\u2019s\n end time is unset and its status is set to running, but the run\u2019s other attributes (source_version,\n source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.\n tags (`Dict[str, str]`, *optional*):\n An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a\n run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are\n set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.\n nested_run (`bool`, *optional*, defaults to `False`):\n Controls whether run is nested in parent run. True creates a nested run. Environment variable\n MLFLOW_NESTED_RUN has priority over this argument.\n run_name (`str`, *optional*):\n Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified.\n description (`str`, *optional*):\n An optional string that populates the description box of the run. If a run is being resumed, the\n description is set on the resumed run. If a new run is being created, the description is set on the new\n run.\n \"\"\"\n\n name = \"mlflow\"\n requires_logging_directory = False\n\n @on_main_process\n def __init__(\n self,\n experiment_name: str = None,\n logging_dir: Optional[Union[str, os.PathLike]] = None,\n run_id: Optional[str] = None,\n tags: Optional[Union[Dict[str, Any], str]] = None,\n nested_run: Optional[bool] = False,\n run_name: Optional[str] = None,\n description: Optional[str] = None,\n ):\n experiment_name = os.getenv(\"MLFLOW_EXPERIMENT_NAME\", experiment_name)\n run_id = os.getenv(\"MLFLOW_RUN_ID\", run_id)\n tags = os.getenv(\"MLFLOW_TAGS\", tags)\n if isinstance(tags, str):\n tags = json.loads(tags)\n\n nested_run = os.getenv(\"MLFLOW_NESTED_RUN\", nested_run)\n\n import mlflow\n\n exps = mlflow.search_experiments(filter_string=f\"name = '{experiment_name}'\")\n if len(exps) > 0:\n if len(exps) > 1:\n logger.warning(\"Multiple experiments with the same name found. Using first one.\")\n experiment_id = exps[0].experiment_id\n else:\n experiment_id = mlflow.create_experiment(\n name=experiment_name,\n artifact_location=logging_dir,\n tags=tags,\n )\n\n self.active_run = mlflow.start_run(\n run_id=run_id,\n experiment_id=experiment_id,\n run_name=run_name,\n nested=nested_run,\n tags=tags,\n description=description,\n )\n\n logger.debug(f\"Initialized mlflow experiment {experiment_name}\")\n logger.debug(\n \"Make sure to log any initial configurations with `self.store_init_configuration` before training!\"\n )\n\n @property\n def tracker(self):\n return self.active_run\n\n @on_main_process\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.\n\n Args:\n values (`dict`):\n Values to be stored as initial hyperparameters as key-value pairs.\n \"\"\"\n import mlflow\n\n for name, value in list(values.items()):\n # internally, all values are converted to str in MLflow\n if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:\n logger.warning_once(\n f'Accelerate is attempting to log a value of \"{value}\" for key \"{name}\" as a parameter. MLflow\\'s'\n f\" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute.\"\n )\n del values[name]\n\n values_list = list(values.items())\n\n # MLflow cannot log more than 100 values in one go, so we have to split it\n for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH):\n mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))\n\n logger.debug(\"Stored initial configuration hyperparameters to MLflow\")\n\n @on_main_process\n def log(self, values: dict, step: Optional[int]):\n \"\"\"\n Logs `values` to the current run.\n\n Args:\n values (`dict`):\n Values to be logged as key-value pairs.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n \"\"\"\n metrics = {}\n for k, v in values.items():\n if isinstance(v, (int, float)):\n metrics[k] = v\n else:\n logger.warning_once(\n f'MLflowTracker is attempting to log a value of \"{v}\" of type {type(v)} for key \"{k}\" as a metric. '\n \"MLflow's log_metric() only accepts float and int types so we dropped this attribute.\"\n )\n import mlflow\n\n mlflow.log_metrics(metrics, step=step)\n logger.debug(\"Successfully logged to mlflow\")\n\n @on_main_process\n def finish(self):\n \"\"\"\n End the active MLflow run.\n \"\"\"\n import mlflow\n\n mlflow.end_run()\n\n\nclass ClearMLTracker(GeneralTracker):\n \"\"\"\n A `Tracker` class that supports `clearml`. Should be initialized at the start of your script.\n\n Args:\n run_name (`str`, *optional*):\n Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this\n argument.\n kwargs:\n Kwargs passed along to the `Task.__init__` method.\n \"\"\"\n\n name = \"clearml\"\n requires_logging_directory = False\n\n @on_main_process\n def __init__(self, run_name: str = None, **kwargs):\n from clearml import Task\n\n current_task = Task.current_task()\n self._initialized_externally = False\n if current_task:\n self._initialized_externally = True\n self.task = current_task\n return\n\n kwargs.setdefault(\"project_name\", os.environ.get(\"CLEARML_PROJECT\", run_name))\n kwargs.setdefault(\"task_name\", os.environ.get(\"CLEARML_TASK\", run_name))\n self.task = Task.init(**kwargs)\n\n @property\n def tracker(self):\n return self.task\n\n @on_main_process\n def store_init_configuration(self, values: dict):\n \"\"\"\n Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment.\n\n Args:\n values (`dict`):\n Values to be stored as initial hyperparameters as key-value pairs.\n \"\"\"\n return self.task.connect_configuration(values)\n\n @on_main_process\n def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be\n ints or floats\n\n Args:\n values (`Dict[str, Union[int, float]]`):\n Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will\n be reported under the 'eval'/'test'/'train' series and the respective prefix will be removed.\n Otherwise, the value will be reported under the 'train' series, and no prefix will be removed.\n step (`int`, *optional*):\n If specified, the values will be reported as scalars, with the iteration number equal to `step`.\n Otherwise they will be reported as single values.\n kwargs:\n Additional key word arguments passed along to the `clearml.Logger.report_single_value` or\n `clearml.Logger.report_scalar` methods.\n \"\"\"\n clearml_logger = self.task.get_logger()\n for k, v in values.items():\n if not isinstance(v, (int, float)):\n logger.warning_once(\n \"Accelerator is attempting to log a value of \"\n f'\"{v}\" of type {type(v)} for key \"{k}\" as a scalar. '\n \"This invocation of ClearML logger's report_scalar() \"\n \"is incorrect so we dropped this attribute.\"\n )\n continue\n if step is None:\n clearml_logger.report_single_value(name=k, value=v, **kwargs)\n continue\n title, series = ClearMLTracker._get_title_series(k)\n clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)\n\n @on_main_process\n def log_images(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `images` to the current run.\n\n Args:\n values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`):\n Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs:\n Additional key word arguments passed along to the `clearml.Logger.report_image` method.\n \"\"\"\n clearml_logger = self.task.get_logger()\n for k, v in values.items():\n title, series = ClearMLTracker._get_title_series(k)\n clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)\n\n @on_main_process\n def log_table(\n self,\n table_name: str,\n columns: List[str] = None,\n data: List[List[Any]] = None,\n dataframe: Any = None,\n step: Optional[int] = None,\n **kwargs,\n ):\n \"\"\"\n Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`.\n\n Args:\n table_name (`str`):\n The name of the table\n columns (list of `str`, *optional*):\n The name of the columns on the table\n data (List of List of Any data type, *optional*):\n The data to be logged in the table. If `columns` is not specified, then the first entry in data will be\n the name of the columns of the table\n dataframe (Any data type, *optional*):\n The data to be logged in the table\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs:\n Additional key word arguments passed along to the `clearml.Logger.report_table` method.\n \"\"\"\n to_report = dataframe\n if dataframe is None:\n if data is None:\n raise ValueError(\n \"`ClearMLTracker.log_table` requires that `data` to be supplied if `dataframe` is `None`\"\n )\n to_report = [columns] + data if columns else data\n title, series = ClearMLTracker._get_title_series(table_name)\n self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)\n\n @on_main_process\n def finish(self):\n \"\"\"\n Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this\n function is a noop\n \"\"\"\n if self.task and not self._initialized_externally:\n self.task.close()\n\n @staticmethod\n def _get_title_series(name):\n for prefix in [\"eval\", \"test\", \"train\"]:\n if name.startswith(prefix + \"_\"):\n return name[len(prefix) + 1 :], prefix\n return name, \"train\"\n\n\nclass DVCLiveTracker(GeneralTracker):\n \"\"\"\n A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script.\n\n Args:\n run_name (`str`, *optional*):\n Ignored for dvclive. See `kwargs` instead.\n kwargs:\n Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live).\n\n Example:\n\n ```py\n from accelerate import Accelerator\n\n accelerator = Accelerator(log_with=\"dvclive\")\n accelerator.init_trackers(project_name=\"my_project\", init_kwargs={\"dvclive\": {\"dir\": \"my_directory\"}})\n ```\n \"\"\"\n\n name = \"dvclive\"\n requires_logging_directory = False\n\n @on_main_process\n def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs):\n from dvclive import Live\n\n super().__init__()\n self.live = live if live is not None else Live(**kwargs)\n\n @property\n def tracker(self):\n return self.live\n\n @on_main_process\n def store_init_configuration(self, values: dict):\n \"\"\"\n Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the\n hyperparameters in a yaml file for future use.\n\n Args:\n values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types):\n Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,\n `str`, `float`, or `int`.\n \"\"\"\n self.live.log_params(values)\n\n @on_main_process\n def log(self, values: dict, step: Optional[int] = None, **kwargs):\n \"\"\"\n Logs `values` to the current run.\n\n Args:\n values (Dictionary `str` to `str`, `float`, or `int`):\n Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.\n step (`int`, *optional*):\n The run step. If included, the log will be affiliated with this step.\n kwargs:\n Additional key word arguments passed along to `dvclive.Live.log_metric()`.\n \"\"\"\n from dvclive.plots import Metric\n\n if step is not None:\n self.live.step = step\n for k, v in values.items():\n if Metric.could_log(v):\n self.live.log_metric(k, v, **kwargs)\n else:\n logger.warning_once(\n \"Accelerator attempted to log a value of \"\n f'\"{v}\" of type {type(v)} for key \"{k}\" as a scalar. '\n \"This invocation of DVCLive's Live.log_metric() \"\n \"is incorrect so we dropped this attribute.\"\n )\n self.live.next_step()\n\n @on_main_process\n def finish(self):\n \"\"\"\n Closes `dvclive.Live()`.\n \"\"\"\n self.live.end()\n\n\nLOGGER_TYPE_TO_CLASS = {\n \"aim\": AimTracker,\n \"comet_ml\": CometMLTracker,\n \"mlflow\": MLflowTracker,\n \"tensorboard\": TensorBoardTracker,\n \"wandb\": WandBTracker,\n \"clearml\": ClearMLTracker,\n \"dvclive\": DVCLiveTracker,\n}\n\n\ndef filter_trackers(\n log_with: List[Union[str, LoggerType, GeneralTracker]],\n logging_dir: Union[str, os.PathLike] = None,\n):\n \"\"\"\n Takes in a list of potential tracker types and checks that:\n - The tracker wanted is available in that environment\n - Filters out repeats of tracker types\n - If `all` is in `log_with`, will return all trackers in the environment\n - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None`\n\n Args:\n log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):\n A list of loggers to be setup for experiment tracking. Should be one or several of:\n\n - `\"all\"`\n - `\"tensorboard\"`\n - `\"wandb\"`\n - `\"comet_ml\"`\n - `\"mlflow\"`\n - `\"dvclive\"`\n If `\"all\"` is selected, will pick up all available trackers in the environment and initialize them. Can\n also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `\"all\"`.\n logging_dir (`str`, `os.PathLike`, *optional*):\n A path to a directory for storing logs of locally-compatible loggers.\n \"\"\"\n loggers = []\n if log_with is not None:\n if not isinstance(log_with, (list, tuple)):\n log_with = [log_with]\n if \"all\" in log_with or LoggerType.ALL in log_with:\n loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()\n else:\n for log_type in log_with:\n if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):\n raise ValueError(f\"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}\")\n if issubclass(type(log_type), GeneralTracker):\n loggers.append(log_type)\n else:\n log_type = LoggerType(log_type)\n if log_type not in loggers:\n if log_type in get_available_trackers():\n tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)]\n if getattr(tracker_init, \"requires_logging_directory\"):\n if logging_dir is None:\n raise ValueError(\n f\"Logging with `{log_type}` requires a `logging_dir` to be passed in.\"\n )\n loggers.append(log_type)\n else:\n logger.debug(f\"Tried adding logger {log_type}, but package is unavailable in the system.\")\n\n return loggers\n", "output": ["get_available_trackers", "on_main_process", "filter_trackers", "CometMLTracker", "MLflowTracker", "ClearMLTracker", "DVCLiveTracker", "AimTracker", "GeneralTracker", "WandBTracker", "TensorBoardTracker"], "metadata": {"file_path": "accelerate-main/src/accelerate/tracking.py", "file_length": 10983, "symbol_dict": [{"symbol": "get_available_trackers", "type": "mannual_defined_function", "byte_location": 2382, "location": 742}, {"symbol": "filter_trackers", "type": "mannual_defined_function", "byte_location": 36820, "location": 10265}, {"symbol": "on_main_process", "type": "mannual_defined_function", "byte_location": 1749, "location": 565}, {"symbol": "WandBTracker", "type": "mannual_defined_class", "byte_location": 10287, "location": 2865}, {"symbol": "MLflowTracker", "type": "mannual_defined_class", "byte_location": 21227, "location": 5931}, {"symbol": "TensorBoardTracker", "type": "mannual_defined_class", "byte_location": 5584, "location": 1560}, {"symbol": "ClearMLTracker", "type": "mannual_defined_class", "byte_location": 27287, "location": 7621}, {"symbol": "CometMLTracker", "type": "mannual_defined_class", "byte_location": 14722, "location": 4106}, {"symbol": "AimTracker", "type": "mannual_defined_class", "byte_location": 17699, "location": 4944}, {"symbol": "DVCLiveTracker", "type": "mannual_defined_class", "byte_location": 33688, "location": 9335}, {"symbol": "GeneralTracker", "type": "mannual_defined_class", "byte_location": 2516, "location": 778}]}} {"input": "#!/usr/bin/env python\n\n# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport importlib\nimport logging\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nimport psutil\nimport torch\n\nfrom accelerate.commands.config import default_config_file, load_config_from_file\nfrom accelerate.commands.config.config_args import SageMakerConfig\nfrom accelerate.commands.config.config_utils import DYNAMO_BACKENDS\nfrom accelerate.state import get_int_from_env\nfrom accelerate.utils import (\n ComputeEnvironment,\n DistributedType,\n PrepareForLaunch,\n _filter_args,\n check_cuda_p2p_ib_support,\n is_bf16_available,\n is_deepspeed_available,\n is_npu_available,\n is_rich_available,\n is_sagemaker_available,\n is_torch_version,\n is_tpu_available,\n is_xpu_available,\n patch_environment,\n prepare_deepspeed_cmd_env,\n prepare_multi_gpu_env,\n prepare_sagemager_args_inputs,\n prepare_simple_launcher_cmd_env,\n prepare_tpu,\n)\nfrom accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES\n\n\nif is_rich_available():\n from rich import get_console\n from rich.logging import RichHandler\n\n FORMAT = \"%(message)s\"\n logging.basicConfig(format=FORMAT, datefmt=\"[%X]\", handlers=[RichHandler()])\n\n\nlogger = logging.getLogger(__name__)\n\noptions_to_group = {\n \"--multi-gpu\": \"Distributed GPUs\",\n \"--tpu\": \"TPU\",\n \"--use_deepspeed\": \"DeepSpeed Arguments\",\n \"--use_fsdp\": \"FSDP Arguments\",\n \"--use_megatron_lm\": \"Megatron-LM Arguments\",\n}\n\n\ndef clean_option(option):\n \"Finds all cases of - after the first two characters and changes them to _\"\n if option.startswith(\"--\"):\n return option[:3] + option[3:].replace(\"-\", \"_\")\n\n\nclass _CustomHelpAction(argparse._HelpAction):\n \"\"\"\n This is a custom help action that will hide all arguments that are not used in the command line when the help is\n called. This is useful for the case where the user is using a specific platform and only wants to see the arguments\n for that platform.\n \"\"\"\n\n def __call__(self, parser, namespace, values, option_string=None):\n if \"accelerate\" in sys.argv[0] and \"launch\" in sys.argv[1:]:\n args = sys.argv[2:]\n else:\n args = sys.argv[1:]\n opts = parser._actions\n titles = [\n \"Hardware Selection Arguments\",\n \"Resource Selection Arguments\",\n \"Training Paradigm Arguments\",\n \"positional arguments\",\n \"optional arguments\",\n ]\n if len(args) > 1:\n used_platforms = [arg for arg in args if arg in options_to_group.keys()]\n args = list(map(clean_option, args))\n used_titles = [options_to_group[o] for o in used_platforms]\n for i, arg in enumerate(opts):\n # If the argument's container is outside of the used titles, hide it\n if arg.container.title not in titles + used_titles:\n setattr(opts[i], \"help\", argparse.SUPPRESS)\n # If the argument is hardware selection, but not being passed, hide it\n elif arg.container.title == \"Hardware Selection Arguments\":\n if set(arg.option_strings).isdisjoint(set(args)):\n setattr(opts[i], \"help\", argparse.SUPPRESS)\n else:\n setattr(opts[i], \"help\", arg.help + \" (currently selected)\")\n # If the argument is a training paradigm, but not being passed, hide it\n elif arg.container.title == \"Training Paradigm Arguments\":\n if set(arg.option_strings).isdisjoint(set(used_platforms)):\n setattr(opts[i], \"help\", argparse.SUPPRESS)\n else:\n setattr(opts[i], \"help\", arg.help + \" (currently selected)\")\n for i, group in enumerate(list(parser._action_groups)):\n # If all arguments in the group are hidden, hide the group\n if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]):\n parser._action_groups.remove(group)\n\n super().__call__(parser, namespace, values, option_string)\n\n\ndef launch_command_parser(subparsers=None):\n if subparsers is not None:\n parser = subparsers.add_parser(\"launch\", add_help=False, allow_abbrev=False)\n else:\n parser = argparse.ArgumentParser(\"Accelerate launch command\", add_help=False, allow_abbrev=False)\n\n parser.register(\"action\", \"help\", _CustomHelpAction)\n parser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Show this help message and exit.\")\n\n parser.add_argument(\n \"--config_file\", default=None, help=\"The config file to use for the default values in the launching script.\"\n )\n parser.add_argument(\n \"--quiet\",\n \"-q\",\n action=\"store_true\",\n help=\"Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)\",\n )\n # Hardware selection arguments\n hardware_args = parser.add_argument_group(\n \"Hardware Selection Arguments\", \"Arguments for selecting the hardware to be used.\"\n )\n hardware_args.add_argument(\n \"--cpu\", default=False, action=\"store_true\", help=\"Whether or not to force the training on the CPU.\"\n )\n hardware_args.add_argument(\n \"--multi_gpu\",\n default=False,\n action=\"store_true\",\n help=\"Whether or not this should launch a distributed GPU training.\",\n )\n hardware_args.add_argument(\n \"--tpu\", default=False, action=\"store_true\", help=\"Whether or not this should launch a TPU training.\"\n )\n hardware_args.add_argument(\n \"--ipex\",\n default=False,\n action=\"store_true\",\n help=\"Whether or not this should launch a Intel PyTorch Extension (IPEX) training.\",\n )\n\n # Resource selection arguments\n resource_args = parser.add_argument_group(\n \"Resource Selection Arguments\", \"Arguments for fine-tuning how available hardware should be used.\"\n )\n resource_args.add_argument(\n \"--mixed_precision\",\n type=str,\n choices=[\"no\", \"fp16\", \"bf16\", \"fp8\"],\n help=\"Whether or not to use mixed precision training. \"\n \"Choose between FP16 and BF16 (bfloat16) training. \"\n \"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.\",\n )\n resource_args.add_argument(\n \"--num_processes\", type=int, default=None, help=\"The total number of processes to be launched in parallel.\"\n )\n resource_args.add_argument(\n \"--num_machines\", type=int, default=None, help=\"The total number of machines used in this training.\"\n )\n resource_args.add_argument(\n \"--num_cpu_threads_per_process\",\n type=int,\n default=None,\n help=\"The number of CPU threads per process. Can be tuned for optimal performance.\",\n )\n\n # Dynamo arguments\n resource_args.add_argument(\n \"--dynamo_backend\",\n type=str,\n choices=[\"no\"] + [b.lower() for b in DYNAMO_BACKENDS],\n help=\"Choose a backend to optimize your training with dynamo, see more at \"\n \"https://github.com/pytorch/torchdynamo.\",\n )\n resource_args.add_argument(\n \"--dynamo_mode\",\n type=str,\n default=\"default\",\n choices=TORCH_DYNAMO_MODES,\n help=\"Choose a mode to optimize your training with dynamo.\",\n )\n resource_args.add_argument(\n \"--dynamo_use_fullgraph\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs\",\n )\n resource_args.add_argument(\n \"--dynamo_use_dynamic\",\n default=False,\n action=\"store_true\",\n help=\"Whether to enable dynamic shape tracing.\",\n )\n\n # Training Paradigm arguments\n paradigm_args = parser.add_argument_group(\n \"Training Paradigm Arguments\", \"Arguments for selecting which training paradigm to be used.\"\n )\n paradigm_args.add_argument(\n \"--use_deepspeed\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use deepspeed.\",\n )\n paradigm_args.add_argument(\n \"--use_fsdp\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use fsdp.\",\n )\n paradigm_args.add_argument(\n \"--use_megatron_lm\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use Megatron-LM.\",\n )\n paradigm_args.add_argument(\n \"--use_xpu\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use IPEX plugin to speed up training on XPU specifically.\",\n )\n\n # distributed GPU training arguments\n distributed_args = parser.add_argument_group(\"Distributed GPUs\", \"Arguments related to distributed GPU training.\")\n distributed_args.add_argument(\n \"--gpu_ids\",\n default=None,\n help=\"What GPUs (by id) should be used for training on this machine as a comma-seperated list\",\n )\n distributed_args.add_argument(\n \"--same_network\",\n default=False,\n action=\"store_true\",\n help=\"Whether all machines used for multinode training exist on the same local network.\",\n )\n distributed_args.add_argument(\n \"--machine_rank\", type=int, default=None, help=\"The rank of the machine on which this script is launched.\"\n )\n distributed_args.add_argument(\n \"--main_process_ip\", type=str, default=None, help=\"The IP address of the machine of rank 0.\"\n )\n distributed_args.add_argument(\n \"--main_process_port\",\n type=int,\n default=None,\n help=\"The port to use to communicate with the machine of rank 0.\",\n )\n distributed_args.add_argument(\n \"-t\",\n \"--tee\",\n default=\"0\",\n type=str,\n help=\"Tee std streams into a log file and also to console.\",\n )\n distributed_args.add_argument(\n \"--role\",\n type=str,\n default=\"default\",\n help=\"User-defined role for the workers.\",\n )\n # Rendezvous related arguments\n distributed_args.add_argument(\n \"--rdzv_backend\",\n type=str,\n default=\"static\",\n help=\"The rendezvous method to use, such as 'static' (the default) or 'c10d'\",\n )\n distributed_args.add_argument(\n \"--rdzv_conf\",\n type=str,\n default=\"\",\n help=\"Additional rendezvous configuration (=,=,...).\",\n )\n distributed_args.add_argument(\n \"--max_restarts\",\n type=int,\n default=0,\n help=\"Maximum number of worker group restarts before failing.\",\n )\n distributed_args.add_argument(\n \"--monitor_interval\",\n type=float,\n default=5,\n help=\"Interval, in seconds, to monitor the state of workers.\",\n )\n parser.add_argument(\n \"-m\",\n \"--module\",\n action=\"store_true\",\n help=\"Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.\",\n )\n parser.add_argument(\n \"--no_python\",\n action=\"store_true\",\n help=\"Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.\",\n )\n\n # TPU arguments\n tpu_args = parser.add_argument_group(\"TPU\", \"Arguments related to TPU.\")\n tpu_args.add_argument(\n \"--tpu_cluster\",\n action=\"store_true\",\n dest=\"tpu_use_cluster\",\n help=\"Whether to use a GCP TPU pod for training.\",\n )\n tpu_args.add_argument(\n \"--no_tpu_cluster\",\n action=\"store_false\",\n dest=\"tpu_use_cluster\",\n help=\"Should not be passed explicitly, this is for internal use only.\",\n )\n tpu_args.add_argument(\n \"--tpu_use_sudo\",\n action=\"store_true\",\n help=\"Whether to use `sudo` when running the TPU training script in each pod.\",\n )\n tpu_args.add_argument(\n \"--vm\",\n type=str,\n action=\"append\",\n help=(\n \"List of single Compute VM instance names. \"\n \"If not provided we assume usage of instance groups. For TPU pods.\"\n ),\n )\n tpu_args.add_argument(\n \"--env\",\n type=str,\n action=\"append\",\n help=\"List of environment variables to set on the Compute VM instances. For TPU pods.\",\n )\n tpu_args.add_argument(\n \"--main_training_function\",\n type=str,\n default=None,\n help=\"The name of the main function to be executed in your script (only for TPU training).\",\n )\n tpu_args.add_argument(\n \"--downcast_bf16\",\n action=\"store_true\",\n help=\"Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.\",\n )\n\n # DeepSpeed arguments\n deepspeed_args = parser.add_argument_group(\"DeepSpeed Arguments\", \"Arguments related to DeepSpeed.\")\n deepspeed_args.add_argument(\n \"--deepspeed_config_file\",\n default=None,\n type=str,\n help=\"DeepSpeed config file.\",\n )\n deepspeed_args.add_argument(\n \"--zero_stage\",\n default=None,\n type=int,\n help=\"DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). \"\n \"If unspecified, will default to `2`.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_device\",\n default=None,\n type=str,\n help=\"Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n \"If unspecified, will default to 'none'.\",\n )\n deepspeed_args.add_argument(\n \"--offload_param_device\",\n default=None,\n type=str,\n help=\"Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). \"\n \"If unspecified, will default to 'none'.\",\n )\n deepspeed_args.add_argument(\n \"--offload_optimizer_nvme_path\",\n default=None,\n type=str,\n help=\"Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). \"\n \"If unspecified, will default to 'none'.\",\n )\n deepspeed_args.add_argument(\n \"--offload_param_nvme_path\",\n default=None,\n type=str,\n help=\"Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). \"\n \"If unspecified, will default to 'none'.\",\n )\n deepspeed_args.add_argument(\n \"--gradient_accumulation_steps\",\n default=None,\n type=int,\n help=\"No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). \"\n \"If unspecified, will default to `1`.\",\n )\n deepspeed_args.add_argument(\n \"--gradient_clipping\",\n default=None,\n type=float,\n help=\"gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). \"\n \"If unspecified, will default to `1.0`.\",\n )\n deepspeed_args.add_argument(\n \"--zero3_init_flag\",\n default=None,\n type=str,\n help=\"Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.\",\n )\n deepspeed_args.add_argument(\n \"--zero3_save_16bit_model\",\n default=None,\n type=str,\n help=\"Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. \"\n \"Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.\",\n )\n deepspeed_args.add_argument(\n \"--deepspeed_hostfile\",\n default=None,\n type=str,\n help=\"DeepSpeed hostfile for configuring multi-node compute resources.\",\n )\n deepspeed_args.add_argument(\n \"--deepspeed_exclusion_filter\",\n default=None,\n type=str,\n help=\"DeepSpeed exclusion filter string when using mutli-node setup.\",\n )\n deepspeed_args.add_argument(\n \"--deepspeed_inclusion_filter\",\n default=None,\n type=str,\n help=\"DeepSpeed inclusion filter string when using mutli-node setup.\",\n )\n deepspeed_args.add_argument(\n \"--deepspeed_multinode_launcher\",\n default=None,\n type=str,\n help=\"DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.\",\n )\n\n # fsdp arguments\n fsdp_args = parser.add_argument_group(\"FSDP Arguments\", \"Arguments related to Fully Shared Data Parallelism.\")\n fsdp_args.add_argument(\n \"--fsdp_offload_params\",\n default=\"false\",\n type=str,\n help=\"Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_min_num_params\",\n type=int,\n default=1e8,\n help=\"FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_sharding_strategy\",\n type=str,\n default=\"FULL_SHARD\",\n help=\"FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_auto_wrap_policy\",\n type=str,\n default=None,\n help=\"FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_transformer_layer_cls_to_wrap\",\n default=None,\n type=str,\n help=\"Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... \"\n \"(useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_backward_prefetch_policy\",\n default=None,\n type=str,\n help=\"This argument is deprecated and will be removed in version 0.27.0 of \ud83e\udd17 Accelerate. Use `fsdp_backward_prefetch` instead.\",\n )\n fsdp_args.add_argument(\n \"--fsdp_backward_prefetch\",\n default=None,\n type=str,\n help=\"FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_state_dict_type\",\n default=None,\n type=str,\n help=\"FSDP's state dict type. (useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_forward_prefetch\",\n default=\"false\",\n type=str,\n help=\"If True, then FSDP explicitly prefetches the next upcoming \"\n \"all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_use_orig_params\",\n default=\"true\",\n type=str,\n help=\"If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres.\"\n \" (useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_cpu_ram_efficient_loading\",\n default=\"true\",\n type=str,\n help=\"If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. \"\n \"Only applicable for \ud83e\udd17 Transformers. When using this, `--fsdp_sync_module_states` needs to True. \"\n \"(useful only when `use_fsdp` flag is passed).\",\n )\n fsdp_args.add_argument(\n \"--fsdp_sync_module_states\",\n default=\"true\",\n type=str,\n help=\"If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0.\"\n \" (useful only when `use_fsdp` flag is passed).\",\n )\n\n # megatron_lm args\n megatron_lm_args = parser.add_argument_group(\"Megatron-LM Arguments\", \"Arguments related to Megatron-LM.\")\n megatron_lm_args.add_argument(\n \"--megatron_lm_tp_degree\",\n type=int,\n default=1,\n help=\"Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).\",\n )\n megatron_lm_args.add_argument(\n \"--megatron_lm_pp_degree\",\n type=int,\n default=1,\n help=\"Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).\",\n )\n megatron_lm_args.add_argument(\n \"--megatron_lm_num_micro_batches\",\n type=int,\n default=None,\n help=\"Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).\",\n )\n megatron_lm_args.add_argument(\n \"--megatron_lm_sequence_parallelism\",\n default=None,\n type=str,\n help=\"Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. \"\n \"(useful only when `use_megatron_lm` flag is passed).\",\n )\n megatron_lm_args.add_argument(\n \"--megatron_lm_recompute_activations\",\n default=None,\n type=str,\n help=\"Decides Whether (true|false) to enable Selective Activation Recomputation. \"\n \"(useful only when `use_megatron_lm` flag is passed).\",\n )\n megatron_lm_args.add_argument(\n \"--megatron_lm_use_distributed_optimizer\",\n default=None,\n type=str,\n help=\"Decides Whether (true|false) to use distributed optimizer \"\n \"which shards optimizer state and gradients across Data Pralellel (DP) ranks. \"\n \"(useful only when `use_megatron_lm` flag is passed).\",\n )\n megatron_lm_args.add_argument(\n \"--megatron_lm_gradient_clipping\",\n default=1.0,\n type=float,\n help=\"Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). \"\n \"(useful only when `use_megatron_lm` flag is passed).\",\n )\n\n # AWS arguments\n aws_args = parser.add_argument_group(\"AWS Arguments\", \"Arguments related to AWS.\")\n aws_args.add_argument(\n \"--aws_access_key_id\",\n type=str,\n default=None,\n help=\"The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job\",\n )\n aws_args.add_argument(\n \"--aws_secret_access_key\",\n type=str,\n default=None,\n help=\"The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.\",\n )\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"Whether to print out the torch.distributed stack trace when something fails.\",\n )\n parser.add_argument(\n \"training_script\",\n type=str,\n help=(\n \"The full path to the script to be launched in parallel, followed by all the arguments for the training \"\n \"script.\"\n ),\n )\n\n # Other arguments of the training scripts\n parser.add_argument(\"training_script_args\", nargs=argparse.REMAINDER, help=\"Arguments of the training script.\")\n\n if subparsers is not None:\n parser.set_defaults(func=launch_command)\n return parser\n\n\ndef simple_launcher(args):\n cmd, current_env = prepare_simple_launcher_cmd_env(args)\n\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n if process.returncode != 0:\n if not args.quiet:\n raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n else:\n sys.exit(1)\n\n\ndef multi_gpu_launcher(args):\n import torch.distributed.run as distrib_run\n\n current_env = prepare_multi_gpu_env(args)\n if not check_cuda_p2p_ib_support():\n message = \"Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled.\"\n warn = False\n if \"NCCL_P2P_DISABLE\" not in current_env:\n current_env[\"NCCL_P2P_DISABLE\"] = \"1\"\n warn = True\n if \"NCCL_IB_DISABLE\" not in current_env:\n current_env[\"NCCL_IB_DISABLE\"] = \"1\"\n warn = True\n if warn:\n logger.warning(message)\n\n debug = getattr(args, \"debug\", False)\n args = _filter_args(\n args,\n distrib_run.get_args_parser(),\n [\"--training_script\", args.training_script, \"--training_script_args\", args.training_script_args],\n )\n with patch_environment(**current_env):\n try:\n distrib_run.run(args)\n except Exception:\n if is_rich_available() and debug:\n console = get_console()\n console.print(\"\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\n console.print_exception(suppress=[__file__], show_locals=False)\n else:\n raise\n\n\ndef deepspeed_launcher(args):\n import torch.distributed.run as distrib_run\n\n if not is_deepspeed_available():\n raise ImportError(\"DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.\")\n\n cmd, current_env = prepare_deepspeed_cmd_env(args)\n if not check_cuda_p2p_ib_support():\n message = \"Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled.\"\n warn = False\n if \"NCCL_P2P_DISABLE\" not in current_env:\n current_env[\"NCCL_P2P_DISABLE\"] = \"1\"\n warn = True\n if \"NCCL_IB_DISABLE\" not in current_env:\n current_env[\"NCCL_IB_DISABLE\"] = \"1\"\n warn = True\n if warn:\n logger.warning(message)\n\n if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:\n with open(\".deepspeed_env\", \"a\") as f:\n for key, value in current_env.items():\n if \";\" in value or \" \" in value:\n continue\n f.write(f\"{key}={value}\\n\")\n\n process = subprocess.Popen(cmd, env=current_env)\n process.wait()\n if process.returncode != 0:\n if not args.quiet:\n raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n else:\n sys.exit(1)\n else:\n debug = getattr(args, \"debug\", False)\n args = _filter_args(\n args,\n distrib_run.get_args_parser(),\n [\"--training_script\", args.training_script, \"--training_script_args\", args.training_script_args],\n )\n with patch_environment(**current_env):\n try:\n distrib_run.run(args)\n except Exception:\n if is_rich_available() and debug:\n console = get_console()\n console.print(\"\\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]\")\n console.print_exception(suppress=[__file__], show_locals=False)\n else:\n raise\n\n\ndef tpu_launcher(args):\n import torch_xla.distributed.xla_multiprocessing as xmp\n\n if args.no_python:\n raise ValueError(\"--no_python cannot be used with TPU launcher\")\n\n args, current_env = prepare_tpu(args, {})\n\n if args.module:\n mod_name = args.training_script\n else:\n # Import training_script as a module\n script_path = Path(args.training_script)\n sys.path.append(str(script_path.parent.resolve()))\n mod_name = script_path.stem\n\n mod = importlib.import_module(mod_name)\n if not hasattr(mod, args.main_training_function):\n raise ValueError(\n f\"Your training script should have a function named {args.main_training_function}, or you should pass a \"\n \"different value to `--main_training_function`.\"\n )\n\n # Patch sys.argv\n sys.argv = [mod.__file__] + args.training_script_args\n\n main_function = getattr(mod, args.main_training_function)\n with patch_environment(**current_env):\n xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)\n\n\ndef tpu_pod_launcher(args):\n from torch_xla.distributed import xla_dist\n\n current_env = {}\n args, current_env = prepare_tpu(args, current_env, True)\n debug = getattr(args, \"debug\", False)\n\n training_script = args.training_script\n training_script_args = args.training_script_args\n new_args = _filter_args(\n args, xla_dist.get_args_parser(), [\"--tpu\", args.tpu_name, \"--positional\", \"\", \"--restart-tpuvm-pod-server\"]\n )\n\n if args.tpu_use_sudo:\n new_cmd = [\"sudo\"]\n else:\n new_cmd = []\n\n new_cmd += [\n \"accelerate-launch\",\n \"--tpu\",\n \"--no_tpu_cluster\",\n \"--num_machines\",\n \"1\",\n \"--mixed_precision\",\n \"no\",\n \"--dynamo_backend\",\n \"no\",\n \"--num_processes\",\n str(args.num_processes),\n \"--main_training_function\",\n str(args.main_training_function),\n training_script,\n ] + training_script_args\n\n new_args.positional = new_cmd\n bad_flags = \"\"\n for arg in vars(new_args):\n if arg.startswith(\"docker_\"):\n value = getattr(new_args, arg)\n if value != \"\" and value is not None:\n bad_flags += f'{arg}=\"{value}\"\\n'\n if bad_flags != \"\":\n raise ValueError(\n f\"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\\n{bad_flags}\"\n )\n new_args.env = [f\"{k}={v}\" for k, v in current_env.items()]\n new_args.env.append(\"ACCELERATE_IN_TPU_POD=1\")\n try:\n xla_dist.resolve_and_execute(new_args)\n except Exception:\n if is_rich_available() and debug:\n console = get_console()\n console.print(\"\\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]\")\n console.print_exception(suppress=[__file__], show_locals=False)\n else:\n raise\n\n\ndef sagemaker_launcher(sagemaker_config: SageMakerConfig, args):\n if not is_sagemaker_available():\n raise ImportError(\n \"Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`\"\n )\n if args.module or args.no_python:\n raise ValueError(\n \"SageMaker requires a python training script file and cannot be used with --module or --no_python\"\n )\n\n from sagemaker.huggingface import HuggingFace\n\n args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)\n\n huggingface_estimator = HuggingFace(**args)\n\n huggingface_estimator.fit(inputs=sagemaker_inputs)\n print(f\"You can find your model data at: {huggingface_estimator.model_data}\")\n\n\ndef _validate_launch_command(args):\n # Sanity checks\n if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:\n raise ValueError(\n \"You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time.\"\n )\n if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):\n raise ValueError(\"You need to use at least 2 processes to use `--multi_gpu`.\")\n\n defaults = None\n warned = []\n mp_from_config_flag = False\n # Get the default from the config file.\n if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:\n defaults = load_config_from_file(args.config_file)\n if (\n not args.multi_gpu\n and not args.tpu\n and not args.tpu_use_cluster\n and not args.use_deepspeed\n and not args.use_fsdp\n and not args.use_megatron_lm\n ):\n args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED\n args.multi_gpu = (\n True\n if defaults.distributed_type\n in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU)\n else False\n )\n args.tpu = defaults.distributed_type == DistributedType.TPU\n args.use_fsdp = defaults.distributed_type == DistributedType.FSDP\n args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM\n args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False\n if args.gpu_ids is None:\n if defaults.gpu_ids is not None:\n args.gpu_ids = defaults.gpu_ids\n else:\n args.gpu_ids = \"all\"\n\n if args.multi_gpu and args.num_machines is None:\n args.num_machines = defaults.num_machines\n\n if len(args.gpu_ids.split(\",\")) < 2 and (args.gpu_ids != \"all\") and args.multi_gpu and args.num_machines <= 1:\n raise ValueError(\n \"Less than two GPU ids were configured and tried to run on on multiple GPUs. \"\n \"Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`.\"\n )\n if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:\n # Update args with the defaults\n for name, attr in defaults.__dict__.items():\n if isinstance(attr, dict):\n for k in defaults.deepspeed_config:\n setattr(args, k, defaults.deepspeed_config[k])\n for k in defaults.fsdp_config:\n arg_to_set = k\n if \"fsdp\" not in arg_to_set:\n arg_to_set = \"fsdp_\" + arg_to_set\n setattr(args, arg_to_set, defaults.fsdp_config[k])\n for k in defaults.megatron_lm_config:\n setattr(args, k, defaults.megatron_lm_config[k])\n for k in defaults.dynamo_config:\n setattr(args, k, defaults.dynamo_config[k])\n for k in defaults.ipex_config:\n setattr(args, k, defaults.ipex_config[k])\n continue\n\n # Those args are handled separately\n if (\n name not in [\"compute_environment\", \"mixed_precision\", \"distributed_type\"]\n and getattr(args, name, None) is None\n ):\n setattr(args, name, attr)\n if not args.debug:\n args.debug = defaults.debug\n\n if not args.mixed_precision:\n if defaults.mixed_precision is None:\n args.mixed_precision = \"no\"\n else:\n args.mixed_precision = defaults.mixed_precision\n mp_from_config_flag = True\n else:\n native_amp = False\n err = \"{mode} mixed precision requires {requirement}\"\n if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):\n native_amp = is_torch_version(\">=\", \"1.10\")\n else:\n native_amp = is_bf16_available(True)\n if args.mixed_precision == \"bf16\" and not native_amp and not (args.tpu and is_tpu_available()):\n raise ValueError(err.format(mode=\"bf16\", requirement=\"PyTorch >= 1.10 and a supported device.\"))\n\n # Silently set the default here\n if args.dynamo_backend is None:\n args.dynamo_backend = \"no\"\n else:\n if args.num_processes is None:\n if args.use_xpu and is_xpu_available():\n args.num_processes = torch.xpu.device_count()\n elif is_npu_available():\n args.num_processes = torch.npu.device_count()\n else:\n args.num_processes = torch.cuda.device_count()\n warned.append(f\"\\t`--num_processes` was set to a value of `{args.num_processes}`\")\n if args.debug is None:\n args.debug = False\n if not args.multi_gpu and (\n (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)\n or (is_npu_available() and torch.npu.device_count() > 1)\n or (torch.cuda.device_count() > 1)\n ):\n warned.append(\n \"\\t\\tMore than one GPU was found, enabling multi-GPU training.\\n\"\n \"\\t\\tIf this was unintended please pass in `--num_processes=1`.\"\n )\n args.multi_gpu = True\n if args.num_machines is None:\n warned.append(\"\\t`--num_machines` was set to a value of `1`\")\n args.num_machines = 1\n if args.mixed_precision is None:\n warned.append(\"\\t`--mixed_precision` was set to a value of `'no'`\")\n args.mixed_precision = \"no\"\n if not hasattr(args, \"use_cpu\"):\n args.use_cpu = args.cpu\n if args.dynamo_backend is None:\n warned.append(\"\\t`--dynamo_backend` was set to a value of `'no'`\")\n args.dynamo_backend = \"no\"\n if args.debug:\n logger.debug(\"Running script in debug mode, expect distributed operations to be slightly slower.\")\n\n is_aws_env_disabled = defaults is None or (\n defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER\n )\n if is_aws_env_disabled and args.num_cpu_threads_per_process is None:\n args.num_cpu_threads_per_process = 1\n if args.use_cpu and args.num_processes >= 1:\n local_size = get_int_from_env(\n [\"MPI_LOCALNRANKS\", \"OMPI_COMM_WORLD_LOCAL_SIZE\", \"MV2_COMM_WORLD_LOCAL_SIZE\"], 1\n )\n threads_per_process = int(psutil.cpu_count(logical=False) / local_size)\n if threads_per_process > 1:\n args.num_cpu_threads_per_process = threads_per_process\n warned.append(\n f\"\\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs\"\n )\n\n if any(warned):\n message = \"The following values were not passed to `accelerate launch` and had defaults used instead:\\n\"\n message += \"\\n\".join(warned)\n message += (\n \"\\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.\"\n )\n logger.warning(message)\n return args, defaults, mp_from_config_flag\n\n\ndef launch_command(args):\n args, defaults, mp_from_config_flag = _validate_launch_command(args)\n # Use the proper launcher\n if args.use_deepspeed and not args.cpu:\n args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []\n if mp_from_config_flag:\n args.deepspeed_fields_from_accelerate_config.append(\"mixed_precision\")\n args.deepspeed_fields_from_accelerate_config = \",\".join(args.deepspeed_fields_from_accelerate_config)\n deepspeed_launcher(args)\n elif args.use_fsdp and not args.cpu:\n multi_gpu_launcher(args)\n elif args.use_megatron_lm and not args.cpu:\n multi_gpu_launcher(args)\n elif args.multi_gpu and not args.cpu:\n multi_gpu_launcher(args)\n elif args.tpu and not args.cpu:\n if args.tpu_use_cluster:\n tpu_pod_launcher(args)\n else:\n tpu_launcher(args)\n elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:\n sagemaker_launcher(defaults, args)\n else:\n simple_launcher(args)\n\n\ndef main():\n parser = launch_command_parser()\n args = parser.parse_args()\n launch_command(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "output": ["clean_option", "tpu_pod_launcher", "deepspeed_launcher", "main", "launch_command", "_validate_launch_command", "multi_gpu_launcher", "launch_command_parser", "simple_launcher", "tpu_launcher", "sagemaker_launcher", "_CustomHelpAction"], "metadata": {"file_path": "accelerate-main/src/accelerate/commands/launch.py", "file_length": 12309, "symbol_dict": [{"symbol": "tpu_launcher", "type": "mannual_defined_function", "byte_location": 27458, "location": 8404}, {"symbol": "tpu_pod_launcher", "type": "mannual_defined_function", "byte_location": 28535, "location": 8739}, {"symbol": "sagemaker_launcher", "type": "mannual_defined_function", "byte_location": 30416, "location": 9366}, {"symbol": "launch_command", "type": "mannual_defined_function", "byte_location": 38717, "location": 11880}, {"symbol": "main", "type": "mannual_defined_function", "byte_location": 39829, "location": 12262}, {"symbol": "simple_launcher", "type": "mannual_defined_function", "byte_location": 23686, "location": 7263}, {"symbol": "launch_command_parser", "type": "mannual_defined_function", "byte_location": 4781, "location": 1412}, {"symbol": "multi_gpu_launcher", "type": "mannual_defined_function", "byte_location": 24034, "location": 7375}, {"symbol": "deepspeed_launcher", "type": "mannual_defined_function", "byte_location": 25324, "location": 7766}, {"symbol": "_validate_launch_command", "type": "mannual_defined_function", "byte_location": 31194, "location": 9613}, {"symbol": "clean_option", "type": "mannual_defined_function", "byte_location": 2097, "location": 703}, {"symbol": "_CustomHelpAction", "type": "mannual_defined_class", "byte_location": 2294, "location": 762}]}} {"input": "#\n# Copyright 2024 The HuggingFace Inc. team.\n# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\nimport os\nfrom collections import OrderedDict\nfrom copy import copy\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport onnx\nimport onnx_graphsurgeon as gs\nimport PIL.Image\nimport tensorrt as trt\nimport torch\nfrom huggingface_hub import snapshot_download\nfrom huggingface_hub.utils import validate_hf_hub_args\nfrom onnx import shape_inference\nfrom polygraphy import cuda\nfrom polygraphy.backend.common import bytes_from_path\nfrom polygraphy.backend.onnx.loader import fold_constants\nfrom polygraphy.backend.trt import (\n CreateConfig,\n Profile,\n engine_from_bytes,\n engine_from_network,\n network_from_onnx_path,\n save_engine,\n)\nfrom polygraphy.backend.trt import util as trt_util\nfrom transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection\n\nfrom diffusers.models import AutoencoderKL, UNet2DConditionModel\nfrom diffusers.pipelines.stable_diffusion import (\n StableDiffusionImg2ImgPipeline,\n StableDiffusionPipelineOutput,\n StableDiffusionSafetyChecker,\n)\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img import retrieve_latents\nfrom diffusers.schedulers import DDIMScheduler\nfrom diffusers.utils import logging\n\n\n\"\"\"\nInstallation instructions\npython3 -m pip install --upgrade transformers diffusers>=0.16.0\npython3 -m pip install --upgrade tensorrt>=8.6.1\npython3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com\npython3 -m pip install onnxruntime\n\"\"\"\n\nTRT_LOGGER = trt.Logger(trt.Logger.ERROR)\nlogger = logging.get_logger(__name__) # pylint: disable=invalid-name\n\n# Map of numpy dtype -> torch dtype\nnumpy_to_torch_dtype_dict = {\n np.uint8: torch.uint8,\n np.int8: torch.int8,\n np.int16: torch.int16,\n np.int32: torch.int32,\n np.int64: torch.int64,\n np.float16: torch.float16,\n np.float32: torch.float32,\n np.float64: torch.float64,\n np.complex64: torch.complex64,\n np.complex128: torch.complex128,\n}\nif np.version.full_version >= \"1.24.0\":\n numpy_to_torch_dtype_dict[np.bool_] = torch.bool\nelse:\n numpy_to_torch_dtype_dict[np.bool] = torch.bool\n\n# Map of torch dtype -> numpy dtype\ntorch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}\n\n\ndef device_view(t):\n return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])\n\n\ndef preprocess_image(image):\n \"\"\"\n image: torch.Tensor\n \"\"\"\n w, h = image.size\n w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32\n image = image.resize((w, h))\n image = np.array(image).astype(np.float32) / 255.0\n image = image[None].transpose(0, 3, 1, 2)\n image = torch.from_numpy(image).contiguous()\n return 2.0 * image - 1.0\n\n\nclass Engine:\n def __init__(self, engine_path):\n self.engine_path = engine_path\n self.engine = None\n self.context = None\n self.buffers = OrderedDict()\n self.tensors = OrderedDict()\n\n def __del__(self):\n [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]\n del self.engine\n del self.context\n del self.buffers\n del self.tensors\n\n def build(\n self,\n onnx_path,\n fp16,\n input_profile=None,\n enable_preview=False,\n enable_all_tactics=False,\n timing_cache=None,\n workspace_size=0,\n ):\n logger.warning(f\"Building TensorRT engine for {onnx_path}: {self.engine_path}\")\n p = Profile()\n if input_profile:\n for name, dims in input_profile.items():\n assert len(dims) == 3\n p.add(name, min=dims[0], opt=dims[1], max=dims[2])\n\n config_kwargs = {}\n\n config_kwargs[\"preview_features\"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]\n if enable_preview:\n # Faster dynamic shapes made optional since it increases engine build time.\n config_kwargs[\"preview_features\"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)\n if workspace_size > 0:\n config_kwargs[\"memory_pool_limits\"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}\n if not enable_all_tactics:\n config_kwargs[\"tactic_sources\"] = []\n\n engine = engine_from_network(\n network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),\n config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),\n save_timing_cache=timing_cache,\n )\n save_engine(engine, path=self.engine_path)\n\n def load(self):\n logger.warning(f\"Loading TensorRT engine: {self.engine_path}\")\n self.engine = engine_from_bytes(bytes_from_path(self.engine_path))\n\n def activate(self):\n self.context = self.engine.create_execution_context()\n\n def allocate_buffers(self, shape_dict=None, device=\"cuda\"):\n for idx in range(trt_util.get_bindings_per_profile(self.engine)):\n binding = self.engine[idx]\n if shape_dict and binding in shape_dict:\n shape = shape_dict[binding]\n else:\n shape = self.engine.get_binding_shape(binding)\n dtype = trt.nptype(self.engine.get_binding_dtype(binding))\n if self.engine.binding_is_input(binding):\n self.context.set_binding_shape(idx, shape)\n tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)\n self.tensors[binding] = tensor\n self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)\n\n def infer(self, feed_dict, stream):\n start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)\n # shallow copy of ordered dict\n device_buffers = copy(self.buffers)\n for name, buf in feed_dict.items():\n assert isinstance(buf, cuda.DeviceView)\n device_buffers[name] = buf\n bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]\n noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)\n if not noerror:\n raise ValueError(\"ERROR: inference failed.\")\n\n return self.tensors\n\n\nclass Optimizer:\n def __init__(self, onnx_graph):\n self.graph = gs.import_onnx(onnx_graph)\n\n def cleanup(self, return_onnx=False):\n self.graph.cleanup().toposort()\n if return_onnx:\n return gs.export_onnx(self.graph)\n\n def select_outputs(self, keep, names=None):\n self.graph.outputs = [self.graph.outputs[o] for o in keep]\n if names:\n for i, name in enumerate(names):\n self.graph.outputs[i].name = name\n\n def fold_constants(self, return_onnx=False):\n onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)\n self.graph = gs.import_onnx(onnx_graph)\n if return_onnx:\n return onnx_graph\n\n def infer_shapes(self, return_onnx=False):\n onnx_graph = gs.export_onnx(self.graph)\n if onnx_graph.ByteSize() > 2147483648:\n raise TypeError(\"ERROR: model size exceeds supported 2GB limit\")\n else:\n onnx_graph = shape_inference.infer_shapes(onnx_graph)\n\n self.graph = gs.import_onnx(onnx_graph)\n if return_onnx:\n return onnx_graph\n\n\nclass BaseModel:\n def __init__(self, model, fp16=False, device=\"cuda\", max_batch_size=16, embedding_dim=768, text_maxlen=77):\n self.model = model\n self.name = \"SD Model\"\n self.fp16 = fp16\n self.device = device\n\n self.min_batch = 1\n self.max_batch = max_batch_size\n self.min_image_shape = 256 # min image resolution: 256x256\n self.max_image_shape = 1024 # max image resolution: 1024x1024\n self.min_latent_shape = self.min_image_shape // 8\n self.max_latent_shape = self.max_image_shape // 8\n\n self.embedding_dim = embedding_dim\n self.text_maxlen = text_maxlen\n\n def get_model(self):\n return self.model\n\n def get_input_names(self):\n pass\n\n def get_output_names(self):\n pass\n\n def get_dynamic_axes(self):\n return None\n\n def get_sample_input(self, batch_size, image_height, image_width):\n pass\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n return None\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n return None\n\n def optimize(self, onnx_graph):\n opt = Optimizer(onnx_graph)\n opt.cleanup()\n opt.fold_constants()\n opt.infer_shapes()\n onnx_opt_graph = opt.cleanup(return_onnx=True)\n return onnx_opt_graph\n\n def check_dims(self, batch_size, image_height, image_width):\n assert batch_size >= self.min_batch and batch_size <= self.max_batch\n assert image_height % 8 == 0 or image_width % 8 == 0\n latent_height = image_height // 8\n latent_width = image_width // 8\n assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape\n assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape\n return (latent_height, latent_width)\n\n def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):\n min_batch = batch_size if static_batch else self.min_batch\n max_batch = batch_size if static_batch else self.max_batch\n latent_height = image_height // 8\n latent_width = image_width // 8\n min_image_height = image_height if static_shape else self.min_image_shape\n max_image_height = image_height if static_shape else self.max_image_shape\n min_image_width = image_width if static_shape else self.min_image_shape\n max_image_width = image_width if static_shape else self.max_image_shape\n min_latent_height = latent_height if static_shape else self.min_latent_shape\n max_latent_height = latent_height if static_shape else self.max_latent_shape\n min_latent_width = latent_width if static_shape else self.min_latent_shape\n max_latent_width = latent_width if static_shape else self.max_latent_shape\n return (\n min_batch,\n max_batch,\n min_image_height,\n max_image_height,\n min_image_width,\n max_image_width,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n )\n\n\ndef getOnnxPath(model_name, onnx_dir, opt=True):\n return os.path.join(onnx_dir, model_name + (\".opt\" if opt else \"\") + \".onnx\")\n\n\ndef getEnginePath(model_name, engine_dir):\n return os.path.join(engine_dir, model_name + \".plan\")\n\n\ndef build_engines(\n models: dict,\n engine_dir,\n onnx_dir,\n onnx_opset,\n opt_image_height,\n opt_image_width,\n opt_batch_size=1,\n force_engine_rebuild=False,\n static_batch=False,\n static_shape=True,\n enable_preview=False,\n enable_all_tactics=False,\n timing_cache=None,\n max_workspace_size=0,\n):\n built_engines = {}\n if not os.path.isdir(onnx_dir):\n os.makedirs(onnx_dir)\n if not os.path.isdir(engine_dir):\n os.makedirs(engine_dir)\n\n # Export models to ONNX\n for model_name, model_obj in models.items():\n engine_path = getEnginePath(model_name, engine_dir)\n if force_engine_rebuild or not os.path.exists(engine_path):\n logger.warning(\"Building Engines...\")\n logger.warning(\"Engine build can take a while to complete\")\n onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)\n onnx_opt_path = getOnnxPath(model_name, onnx_dir)\n if force_engine_rebuild or not os.path.exists(onnx_opt_path):\n if force_engine_rebuild or not os.path.exists(onnx_path):\n logger.warning(f\"Exporting model: {onnx_path}\")\n model = model_obj.get_model()\n with torch.inference_mode(), torch.autocast(\"cuda\"):\n inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)\n torch.onnx.export(\n model,\n inputs,\n onnx_path,\n export_params=True,\n opset_version=onnx_opset,\n do_constant_folding=True,\n input_names=model_obj.get_input_names(),\n output_names=model_obj.get_output_names(),\n dynamic_axes=model_obj.get_dynamic_axes(),\n )\n del model\n torch.cuda.empty_cache()\n gc.collect()\n else:\n logger.warning(f\"Found cached model: {onnx_path}\")\n\n # Optimize onnx\n if force_engine_rebuild or not os.path.exists(onnx_opt_path):\n logger.warning(f\"Generating optimizing model: {onnx_opt_path}\")\n onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))\n onnx.save(onnx_opt_graph, onnx_opt_path)\n else:\n logger.warning(f\"Found cached optimized model: {onnx_opt_path} \")\n\n # Build TensorRT engines\n for model_name, model_obj in models.items():\n engine_path = getEnginePath(model_name, engine_dir)\n engine = Engine(engine_path)\n onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)\n onnx_opt_path = getOnnxPath(model_name, onnx_dir)\n\n if force_engine_rebuild or not os.path.exists(engine.engine_path):\n engine.build(\n onnx_opt_path,\n fp16=True,\n input_profile=model_obj.get_input_profile(\n opt_batch_size,\n opt_image_height,\n opt_image_width,\n static_batch=static_batch,\n static_shape=static_shape,\n ),\n enable_preview=enable_preview,\n timing_cache=timing_cache,\n workspace_size=max_workspace_size,\n )\n built_engines[model_name] = engine\n\n # Load and activate TensorRT engines\n for model_name, model_obj in models.items():\n engine = built_engines[model_name]\n engine.load()\n engine.activate()\n\n return built_engines\n\n\ndef runEngine(engine, feed_dict, stream):\n return engine.infer(feed_dict, stream)\n\n\nclass CLIP(BaseModel):\n def __init__(self, model, device, max_batch_size, embedding_dim):\n super(CLIP, self).__init__(\n model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim\n )\n self.name = \"CLIP\"\n\n def get_input_names(self):\n return [\"input_ids\"]\n\n def get_output_names(self):\n return [\"text_embeddings\", \"pooler_output\"]\n\n def get_dynamic_axes(self):\n return {\"input_ids\": {0: \"B\"}, \"text_embeddings\": {0: \"B\"}}\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n self.check_dims(batch_size, image_height, image_width)\n min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(\n batch_size, image_height, image_width, static_batch, static_shape\n )\n return {\n \"input_ids\": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return {\n \"input_ids\": (batch_size, self.text_maxlen),\n \"text_embeddings\": (batch_size, self.text_maxlen, self.embedding_dim),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)\n\n def optimize(self, onnx_graph):\n opt = Optimizer(onnx_graph)\n opt.select_outputs([0]) # delete graph output#1\n opt.cleanup()\n opt.fold_constants()\n opt.infer_shapes()\n opt.select_outputs([0], names=[\"text_embeddings\"]) # rename network output\n opt_onnx_graph = opt.cleanup(return_onnx=True)\n return opt_onnx_graph\n\n\ndef make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):\n return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)\n\n\nclass UNet(BaseModel):\n def __init__(\n self, model, fp16=False, device=\"cuda\", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4\n ):\n super(UNet, self).__init__(\n model=model,\n fp16=fp16,\n device=device,\n max_batch_size=max_batch_size,\n embedding_dim=embedding_dim,\n text_maxlen=text_maxlen,\n )\n self.unet_dim = unet_dim\n self.name = \"UNet\"\n\n def get_input_names(self):\n return [\"sample\", \"timestep\", \"encoder_hidden_states\"]\n\n def get_output_names(self):\n return [\"latent\"]\n\n def get_dynamic_axes(self):\n return {\n \"sample\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n \"encoder_hidden_states\": {0: \"2B\"},\n \"latent\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n }\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n _,\n _,\n _,\n _,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n return {\n \"sample\": [\n (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),\n (2 * batch_size, self.unet_dim, latent_height, latent_width),\n (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),\n ],\n \"encoder_hidden_states\": [\n (2 * min_batch, self.text_maxlen, self.embedding_dim),\n (2 * batch_size, self.text_maxlen, self.embedding_dim),\n (2 * max_batch, self.text_maxlen, self.embedding_dim),\n ],\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"sample\": (2 * batch_size, self.unet_dim, latent_height, latent_width),\n \"encoder_hidden_states\": (2 * batch_size, self.text_maxlen, self.embedding_dim),\n \"latent\": (2 * batch_size, 4, latent_height, latent_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n dtype = torch.float16 if self.fp16 else torch.float32\n return (\n torch.randn(\n 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device\n ),\n torch.tensor([1.0], dtype=torch.float32, device=self.device),\n torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),\n )\n\n\ndef make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False):\n return UNet(\n model,\n fp16=True,\n device=device,\n max_batch_size=max_batch_size,\n embedding_dim=embedding_dim,\n unet_dim=(9 if inpaint else 4),\n )\n\n\nclass VAE(BaseModel):\n def __init__(self, model, device, max_batch_size, embedding_dim):\n super(VAE, self).__init__(\n model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim\n )\n self.name = \"VAE decoder\"\n\n def get_input_names(self):\n return [\"latent\"]\n\n def get_output_names(self):\n return [\"images\"]\n\n def get_dynamic_axes(self):\n return {\"latent\": {0: \"B\", 2: \"H\", 3: \"W\"}, \"images\": {0: \"B\", 2: \"8H\", 3: \"8W\"}}\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n _,\n _,\n _,\n _,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n return {\n \"latent\": [\n (min_batch, 4, min_latent_height, min_latent_width),\n (batch_size, 4, latent_height, latent_width),\n (max_batch, 4, max_latent_height, max_latent_width),\n ]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"latent\": (batch_size, 4, latent_height, latent_width),\n \"images\": (batch_size, 3, image_height, image_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)\n\n\ndef make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):\n return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)\n\n\nclass TorchVAEEncoder(torch.nn.Module):\n def __init__(self, model):\n super().__init__()\n self.vae_encoder = model\n\n def forward(self, x):\n return retrieve_latents(self.vae_encoder.encode(x))\n\n\nclass VAEEncoder(BaseModel):\n def __init__(self, model, device, max_batch_size, embedding_dim):\n super(VAEEncoder, self).__init__(\n model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim\n )\n self.name = \"VAE encoder\"\n\n def get_model(self):\n vae_encoder = TorchVAEEncoder(self.model)\n return vae_encoder\n\n def get_input_names(self):\n return [\"images\"]\n\n def get_output_names(self):\n return [\"latent\"]\n\n def get_dynamic_axes(self):\n return {\"images\": {0: \"B\", 2: \"8H\", 3: \"8W\"}, \"latent\": {0: \"B\", 2: \"H\", 3: \"W\"}}\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n assert batch_size >= self.min_batch and batch_size <= self.max_batch\n min_batch = batch_size if static_batch else self.min_batch\n max_batch = batch_size if static_batch else self.max_batch\n self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n min_image_height,\n max_image_height,\n min_image_width,\n max_image_width,\n _,\n _,\n _,\n _,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n\n return {\n \"images\": [\n (min_batch, 3, min_image_height, min_image_width),\n (batch_size, 3, image_height, image_width),\n (max_batch, 3, max_image_height, max_image_width),\n ]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"images\": (batch_size, 3, image_height, image_width),\n \"latent\": (batch_size, 4, latent_height, latent_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device)\n\n\ndef make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False):\n return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)\n\n\nclass TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):\n r\"\"\"\n Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion.\n\n This model inherits from [`StableDiffusionImg2ImgPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPFeatureExtractor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: DDIMScheduler,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPFeatureExtractor,\n image_encoder: CLIPVisionModelWithProjection = None,\n requires_safety_checker: bool = True,\n stages=[\"clip\", \"unet\", \"vae\", \"vae_encoder\"],\n image_height: int = 512,\n image_width: int = 512,\n max_batch_size: int = 16,\n # ONNX export parameters\n onnx_opset: int = 17,\n onnx_dir: str = \"onnx\",\n # TensorRT engine build parameters\n engine_dir: str = \"engine\",\n build_preview_features: bool = True,\n force_engine_rebuild: bool = False,\n timing_cache: str = \"timing_cache\",\n ):\n super().__init__(\n vae,\n text_encoder,\n tokenizer,\n unet,\n scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n image_encoder=image_encoder,\n requires_safety_checker=requires_safety_checker,\n )\n\n self.vae.forward = self.vae.decode\n\n self.stages = stages\n self.image_height, self.image_width = image_height, image_width\n self.inpaint = False\n self.onnx_opset = onnx_opset\n self.onnx_dir = onnx_dir\n self.engine_dir = engine_dir\n self.force_engine_rebuild = force_engine_rebuild\n self.timing_cache = timing_cache\n self.build_static_batch = False\n self.build_dynamic_shape = False\n self.build_preview_features = build_preview_features\n\n self.max_batch_size = max_batch_size\n # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.\n if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:\n self.max_batch_size = 4\n\n self.stream = None # loaded in loadResources()\n self.models = {} # loaded in __loadModels()\n self.engine = {} # loaded in build_engines()\n\n def __loadModels(self):\n # Load pipeline models\n self.embedding_dim = self.text_encoder.config.hidden_size\n models_args = {\n \"device\": self.torch_device,\n \"max_batch_size\": self.max_batch_size,\n \"embedding_dim\": self.embedding_dim,\n \"inpaint\": self.inpaint,\n }\n if \"clip\" in self.stages:\n self.models[\"clip\"] = make_CLIP(self.text_encoder, **models_args)\n if \"unet\" in self.stages:\n self.models[\"unet\"] = make_UNet(self.unet, **models_args)\n if \"vae\" in self.stages:\n self.models[\"vae\"] = make_VAE(self.vae, **models_args)\n if \"vae_encoder\" in self.stages:\n self.models[\"vae_encoder\"] = make_VAEEncoder(self.vae, **models_args)\n\n @classmethod\n @validate_hf_hub_args\n def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):\n cache_dir = kwargs.pop(\"cache_dir\", None)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n token = kwargs.pop(\"token\", None)\n revision = kwargs.pop(\"revision\", None)\n\n cls.cached_folder = (\n pretrained_model_name_or_path\n if os.path.isdir(pretrained_model_name_or_path)\n else snapshot_download(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n token=token,\n revision=revision,\n )\n )\n\n def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):\n super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)\n\n self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)\n self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)\n self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)\n\n # set device\n self.torch_device = self._execution_device\n logger.warning(f\"Running inference on device: {self.torch_device}\")\n\n # load models\n self.__loadModels()\n\n # build engines\n self.engine = build_engines(\n self.models,\n self.engine_dir,\n self.onnx_dir,\n self.onnx_opset,\n opt_image_height=self.image_height,\n opt_image_width=self.image_width,\n force_engine_rebuild=self.force_engine_rebuild,\n static_batch=self.build_static_batch,\n static_shape=not self.build_dynamic_shape,\n enable_preview=self.build_preview_features,\n timing_cache=self.timing_cache,\n )\n\n return self\n\n def __initialize_timesteps(self, timesteps, strength):\n self.scheduler.set_timesteps(timesteps)\n offset = self.scheduler.steps_offset if hasattr(self.scheduler, \"steps_offset\") else 0\n init_timestep = int(timesteps * strength) + offset\n init_timestep = min(init_timestep, timesteps)\n t_start = max(timesteps - init_timestep + offset, 0)\n timesteps = self.scheduler.timesteps[t_start:].to(self.torch_device)\n return timesteps, t_start\n\n def __preprocess_images(self, batch_size, images=()):\n init_images = []\n for image in images:\n image = image.to(self.torch_device).float()\n image = image.repeat(batch_size, 1, 1, 1)\n init_images.append(image)\n return tuple(init_images)\n\n def __encode_image(self, init_image):\n init_latents = runEngine(self.engine[\"vae_encoder\"], {\"images\": device_view(init_image)}, self.stream)[\n \"latent\"\n ]\n init_latents = 0.18215 * init_latents\n return init_latents\n\n def __encode_prompt(self, prompt, negative_prompt):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.\n Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).\n \"\"\"\n # Tokenize prompt\n text_input_ids = (\n self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n .input_ids.type(torch.int32)\n .to(self.torch_device)\n )\n\n text_input_ids_inp = device_view(text_input_ids)\n # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt\n text_embeddings = runEngine(self.engine[\"clip\"], {\"input_ids\": text_input_ids_inp}, self.stream)[\n \"text_embeddings\"\n ].clone()\n\n # Tokenize negative prompt\n uncond_input_ids = (\n self.tokenizer(\n negative_prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n .input_ids.type(torch.int32)\n .to(self.torch_device)\n )\n uncond_input_ids_inp = device_view(uncond_input_ids)\n uncond_embeddings = runEngine(self.engine[\"clip\"], {\"input_ids\": uncond_input_ids_inp}, self.stream)[\n \"text_embeddings\"\n ]\n\n # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)\n\n return text_embeddings\n\n def __denoise_latent(\n self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None\n ):\n if not isinstance(timesteps, torch.Tensor):\n timesteps = self.scheduler.timesteps\n for step_index, timestep in enumerate(timesteps):\n # Expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2)\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)\n if isinstance(mask, torch.Tensor):\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # Predict the noise residual\n timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep\n\n sample_inp = device_view(latent_model_input)\n timestep_inp = device_view(timestep_float)\n embeddings_inp = device_view(text_embeddings)\n noise_pred = runEngine(\n self.engine[\"unet\"],\n {\"sample\": sample_inp, \"timestep\": timestep_inp, \"encoder_hidden_states\": embeddings_inp},\n self.stream,\n )[\"latent\"]\n\n # Perform guidance\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample\n\n latents = 1.0 / 0.18215 * latents\n return latents\n\n def __decode_latent(self, latents):\n images = runEngine(self.engine[\"vae\"], {\"latent\": device_view(latents)}, self.stream)[\"images\"]\n images = (images / 2 + 0.5).clamp(0, 1)\n return images.cpu().permute(0, 2, 3, 1).float().numpy()\n\n def __loadResources(self, image_height, image_width, batch_size):\n self.stream = cuda.Stream()\n\n # Allocate buffers for TensorRT engine bindings\n for model_name, obj in self.models.items():\n self.engine[model_name].allocate_buffers(\n shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device\n )\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: Union[torch.FloatTensor, PIL.Image.Image] = None,\n strength: float = 0.8,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n image (`PIL.Image.Image`):\n `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will\n be masked out with `mask_image` and repainted according to `prompt`.\n strength (`float`, *optional*, defaults to 0.8):\n Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`\n will be used as a starting point, adding more noise to it the larger the `strength`. The number of\n denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will\n be maximum and the denoising process will run for the full number of iterations specified in\n `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.\n Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n\n \"\"\"\n self.generator = generator\n self.denoising_steps = num_inference_steps\n self._guidance_scale = guidance_scale\n\n # Pre-compute latent input scales and linear multistep coefficients\n self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)\n\n # Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n prompt = [prompt]\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n raise ValueError(f\"Expected prompt to be of type list or str but got {type(prompt)}\")\n\n if negative_prompt is None:\n negative_prompt = [\"\"] * batch_size\n\n if negative_prompt is not None and isinstance(negative_prompt, str):\n negative_prompt = [negative_prompt]\n\n assert len(prompt) == len(negative_prompt)\n\n if batch_size > self.max_batch_size:\n raise ValueError(\n f\"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4\"\n )\n\n # load resources\n self.__loadResources(self.image_height, self.image_width, batch_size)\n\n with torch.inference_mode(), torch.autocast(\"cuda\"), trt.Runtime(TRT_LOGGER):\n # Initialize timesteps\n timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength)\n latent_timestep = timesteps[:1].repeat(batch_size)\n\n # Pre-process input image\n if isinstance(image, PIL.Image.Image):\n image = preprocess_image(image)\n init_image = self.__preprocess_images(batch_size, (image,))[0]\n\n # VAE encode init image\n init_latents = self.__encode_image(init_image)\n\n # Add noise to latents using timesteps\n noise = torch.randn(\n init_latents.shape, generator=self.generator, device=self.torch_device, dtype=torch.float32\n )\n latents = self.scheduler.add_noise(init_latents, noise, latent_timestep)\n\n # CLIP text encoder\n text_embeddings = self.__encode_prompt(prompt, negative_prompt)\n\n # UNet denoiser\n latents = self.__denoise_latent(latents, text_embeddings, timesteps=timesteps, step_offset=t_start)\n\n # VAE decode latent\n images = self.__decode_latent(latents)\n\n images = self.numpy_to_pil(images)\n return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=None)\n", "output": ["make_CLIP", "preprocess_image", "build_engines", "runEngine", "make_UNet", "getEnginePath", "make_VAE", "device_view", "make_VAEEncoder", "getOnnxPath", "TensorRTStableDiffusionImg2ImgPipeline", "UNet", "VAE", "Engine", "TorchVAEEncoder", "BaseModel", "VAEEncoder", "CLIP", "Optimizer"], "metadata": {"file_path": "diffusers-main/examples/community/stable_diffusion_tensorrt_img2img.py", "file_length": 13729, "symbol_dict": [{"symbol": "device_view", "type": "mannual_defined_function", "byte_location": 3034, "location": 1055}, {"symbol": "make_CLIP", "type": "mannual_defined_function", "byte_location": 17370, "location": 5635}, {"symbol": "build_engines", "type": "mannual_defined_function", "byte_location": 11643, "location": 3871}, {"symbol": "getEnginePath", "type": "mannual_defined_function", "byte_location": 11540, "location": 3835}, {"symbol": "runEngine", "type": "mannual_defined_function", "byte_location": 15385, "location": 4970}, {"symbol": "make_UNet", "type": "mannual_defined_function", "byte_location": 20520, "location": 6692}, {"symbol": "preprocess_image", "type": "mannual_defined_function", "byte_location": 3158, "location": 1107}, {"symbol": "make_VAEEncoder", "type": "mannual_defined_function", "byte_location": 25265, "location": 8270}, {"symbol": "make_VAE", "type": "mannual_defined_function", "byte_location": 22704, "location": 7426}, {"symbol": "getOnnxPath", "type": "mannual_defined_function", "byte_location": 11407, "location": 3782}, {"symbol": "BaseModel", "type": "mannual_defined_class", "byte_location": 8219, "location": 2757}, {"symbol": "Optimizer", "type": "mannual_defined_class", "byte_location": 7075, "location": 2364}, {"symbol": "Engine", "type": "mannual_defined_class", "byte_location": 3539, "location": 1265}, {"symbol": "TorchVAEEncoder", "type": "mannual_defined_class", "byte_location": 22878, "location": 7488}, {"symbol": "CLIP", "type": "mannual_defined_class", "byte_location": 15472, "location": 4999}, {"symbol": "VAE", "type": "mannual_defined_class", "byte_location": 20794, "location": 6790}, {"symbol": "VAEEncoder", "type": "mannual_defined_class", "byte_location": 23098, "location": 7565}, {"symbol": "TensorRTStableDiffusionImg2ImgPipeline", "type": "mannual_defined_class", "byte_location": 25453, "location": 8336}, {"symbol": "UNet", "type": "mannual_defined_class", "byte_location": 17546, "location": 5697}]}} {"input": "#\n# Copyright 2024 The HuggingFace Inc. team.\n# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\nimport os\nfrom collections import OrderedDict\nfrom copy import copy\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport onnx\nimport onnx_graphsurgeon as gs\nimport PIL.Image\nimport tensorrt as trt\nimport torch\nfrom huggingface_hub import snapshot_download\nfrom huggingface_hub.utils import validate_hf_hub_args\nfrom onnx import shape_inference\nfrom polygraphy import cuda\nfrom polygraphy.backend.common import bytes_from_path\nfrom polygraphy.backend.onnx.loader import fold_constants\nfrom polygraphy.backend.trt import (\n CreateConfig,\n Profile,\n engine_from_bytes,\n engine_from_network,\n network_from_onnx_path,\n save_engine,\n)\nfrom polygraphy.backend.trt import util as trt_util\nfrom transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection\n\nfrom diffusers.models import AutoencoderKL, UNet2DConditionModel\nfrom diffusers.pipelines.stable_diffusion import (\n StableDiffusionInpaintPipeline,\n StableDiffusionPipelineOutput,\n StableDiffusionSafetyChecker,\n)\nfrom diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image\nfrom diffusers.schedulers import DDIMScheduler\nfrom diffusers.utils import logging\n\n\n\"\"\"\nInstallation instructions\npython3 -m pip install --upgrade transformers diffusers>=0.16.0\npython3 -m pip install --upgrade tensorrt>=8.6.1\npython3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com\npython3 -m pip install onnxruntime\n\"\"\"\n\nTRT_LOGGER = trt.Logger(trt.Logger.ERROR)\nlogger = logging.get_logger(__name__) # pylint: disable=invalid-name\n\n# Map of numpy dtype -> torch dtype\nnumpy_to_torch_dtype_dict = {\n np.uint8: torch.uint8,\n np.int8: torch.int8,\n np.int16: torch.int16,\n np.int32: torch.int32,\n np.int64: torch.int64,\n np.float16: torch.float16,\n np.float32: torch.float32,\n np.float64: torch.float64,\n np.complex64: torch.complex64,\n np.complex128: torch.complex128,\n}\nif np.version.full_version >= \"1.24.0\":\n numpy_to_torch_dtype_dict[np.bool_] = torch.bool\nelse:\n numpy_to_torch_dtype_dict[np.bool] = torch.bool\n\n# Map of torch dtype -> numpy dtype\ntorch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}\n\n\ndef device_view(t):\n return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])\n\n\ndef preprocess_image(image):\n \"\"\"\n image: torch.Tensor\n \"\"\"\n w, h = image.size\n w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32\n image = image.resize((w, h))\n image = np.array(image).astype(np.float32) / 255.0\n image = image[None].transpose(0, 3, 1, 2)\n image = torch.from_numpy(image).contiguous()\n return 2.0 * image - 1.0\n\n\nclass Engine:\n def __init__(self, engine_path):\n self.engine_path = engine_path\n self.engine = None\n self.context = None\n self.buffers = OrderedDict()\n self.tensors = OrderedDict()\n\n def __del__(self):\n [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]\n del self.engine\n del self.context\n del self.buffers\n del self.tensors\n\n def build(\n self,\n onnx_path,\n fp16,\n input_profile=None,\n enable_preview=False,\n enable_all_tactics=False,\n timing_cache=None,\n workspace_size=0,\n ):\n logger.warning(f\"Building TensorRT engine for {onnx_path}: {self.engine_path}\")\n p = Profile()\n if input_profile:\n for name, dims in input_profile.items():\n assert len(dims) == 3\n p.add(name, min=dims[0], opt=dims[1], max=dims[2])\n\n config_kwargs = {}\n\n config_kwargs[\"preview_features\"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]\n if enable_preview:\n # Faster dynamic shapes made optional since it increases engine build time.\n config_kwargs[\"preview_features\"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)\n if workspace_size > 0:\n config_kwargs[\"memory_pool_limits\"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}\n if not enable_all_tactics:\n config_kwargs[\"tactic_sources\"] = []\n\n engine = engine_from_network(\n network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),\n config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),\n save_timing_cache=timing_cache,\n )\n save_engine(engine, path=self.engine_path)\n\n def load(self):\n logger.warning(f\"Loading TensorRT engine: {self.engine_path}\")\n self.engine = engine_from_bytes(bytes_from_path(self.engine_path))\n\n def activate(self):\n self.context = self.engine.create_execution_context()\n\n def allocate_buffers(self, shape_dict=None, device=\"cuda\"):\n for idx in range(trt_util.get_bindings_per_profile(self.engine)):\n binding = self.engine[idx]\n if shape_dict and binding in shape_dict:\n shape = shape_dict[binding]\n else:\n shape = self.engine.get_binding_shape(binding)\n dtype = trt.nptype(self.engine.get_binding_dtype(binding))\n if self.engine.binding_is_input(binding):\n self.context.set_binding_shape(idx, shape)\n tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)\n self.tensors[binding] = tensor\n self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)\n\n def infer(self, feed_dict, stream):\n start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)\n # shallow copy of ordered dict\n device_buffers = copy(self.buffers)\n for name, buf in feed_dict.items():\n assert isinstance(buf, cuda.DeviceView)\n device_buffers[name] = buf\n bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]\n noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)\n if not noerror:\n raise ValueError(\"ERROR: inference failed.\")\n\n return self.tensors\n\n\nclass Optimizer:\n def __init__(self, onnx_graph):\n self.graph = gs.import_onnx(onnx_graph)\n\n def cleanup(self, return_onnx=False):\n self.graph.cleanup().toposort()\n if return_onnx:\n return gs.export_onnx(self.graph)\n\n def select_outputs(self, keep, names=None):\n self.graph.outputs = [self.graph.outputs[o] for o in keep]\n if names:\n for i, name in enumerate(names):\n self.graph.outputs[i].name = name\n\n def fold_constants(self, return_onnx=False):\n onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)\n self.graph = gs.import_onnx(onnx_graph)\n if return_onnx:\n return onnx_graph\n\n def infer_shapes(self, return_onnx=False):\n onnx_graph = gs.export_onnx(self.graph)\n if onnx_graph.ByteSize() > 2147483648:\n raise TypeError(\"ERROR: model size exceeds supported 2GB limit\")\n else:\n onnx_graph = shape_inference.infer_shapes(onnx_graph)\n\n self.graph = gs.import_onnx(onnx_graph)\n if return_onnx:\n return onnx_graph\n\n\nclass BaseModel:\n def __init__(self, model, fp16=False, device=\"cuda\", max_batch_size=16, embedding_dim=768, text_maxlen=77):\n self.model = model\n self.name = \"SD Model\"\n self.fp16 = fp16\n self.device = device\n\n self.min_batch = 1\n self.max_batch = max_batch_size\n self.min_image_shape = 256 # min image resolution: 256x256\n self.max_image_shape = 1024 # max image resolution: 1024x1024\n self.min_latent_shape = self.min_image_shape // 8\n self.max_latent_shape = self.max_image_shape // 8\n\n self.embedding_dim = embedding_dim\n self.text_maxlen = text_maxlen\n\n def get_model(self):\n return self.model\n\n def get_input_names(self):\n pass\n\n def get_output_names(self):\n pass\n\n def get_dynamic_axes(self):\n return None\n\n def get_sample_input(self, batch_size, image_height, image_width):\n pass\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n return None\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n return None\n\n def optimize(self, onnx_graph):\n opt = Optimizer(onnx_graph)\n opt.cleanup()\n opt.fold_constants()\n opt.infer_shapes()\n onnx_opt_graph = opt.cleanup(return_onnx=True)\n return onnx_opt_graph\n\n def check_dims(self, batch_size, image_height, image_width):\n assert batch_size >= self.min_batch and batch_size <= self.max_batch\n assert image_height % 8 == 0 or image_width % 8 == 0\n latent_height = image_height // 8\n latent_width = image_width // 8\n assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape\n assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape\n return (latent_height, latent_width)\n\n def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):\n min_batch = batch_size if static_batch else self.min_batch\n max_batch = batch_size if static_batch else self.max_batch\n latent_height = image_height // 8\n latent_width = image_width // 8\n min_image_height = image_height if static_shape else self.min_image_shape\n max_image_height = image_height if static_shape else self.max_image_shape\n min_image_width = image_width if static_shape else self.min_image_shape\n max_image_width = image_width if static_shape else self.max_image_shape\n min_latent_height = latent_height if static_shape else self.min_latent_shape\n max_latent_height = latent_height if static_shape else self.max_latent_shape\n min_latent_width = latent_width if static_shape else self.min_latent_shape\n max_latent_width = latent_width if static_shape else self.max_latent_shape\n return (\n min_batch,\n max_batch,\n min_image_height,\n max_image_height,\n min_image_width,\n max_image_width,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n )\n\n\ndef getOnnxPath(model_name, onnx_dir, opt=True):\n return os.path.join(onnx_dir, model_name + (\".opt\" if opt else \"\") + \".onnx\")\n\n\ndef getEnginePath(model_name, engine_dir):\n return os.path.join(engine_dir, model_name + \".plan\")\n\n\ndef build_engines(\n models: dict,\n engine_dir,\n onnx_dir,\n onnx_opset,\n opt_image_height,\n opt_image_width,\n opt_batch_size=1,\n force_engine_rebuild=False,\n static_batch=False,\n static_shape=True,\n enable_preview=False,\n enable_all_tactics=False,\n timing_cache=None,\n max_workspace_size=0,\n):\n built_engines = {}\n if not os.path.isdir(onnx_dir):\n os.makedirs(onnx_dir)\n if not os.path.isdir(engine_dir):\n os.makedirs(engine_dir)\n\n # Export models to ONNX\n for model_name, model_obj in models.items():\n engine_path = getEnginePath(model_name, engine_dir)\n if force_engine_rebuild or not os.path.exists(engine_path):\n logger.warning(\"Building Engines...\")\n logger.warning(\"Engine build can take a while to complete\")\n onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)\n onnx_opt_path = getOnnxPath(model_name, onnx_dir)\n if force_engine_rebuild or not os.path.exists(onnx_opt_path):\n if force_engine_rebuild or not os.path.exists(onnx_path):\n logger.warning(f\"Exporting model: {onnx_path}\")\n model = model_obj.get_model()\n with torch.inference_mode(), torch.autocast(\"cuda\"):\n inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)\n torch.onnx.export(\n model,\n inputs,\n onnx_path,\n export_params=True,\n opset_version=onnx_opset,\n do_constant_folding=True,\n input_names=model_obj.get_input_names(),\n output_names=model_obj.get_output_names(),\n dynamic_axes=model_obj.get_dynamic_axes(),\n )\n del model\n torch.cuda.empty_cache()\n gc.collect()\n else:\n logger.warning(f\"Found cached model: {onnx_path}\")\n\n # Optimize onnx\n if force_engine_rebuild or not os.path.exists(onnx_opt_path):\n logger.warning(f\"Generating optimizing model: {onnx_opt_path}\")\n onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))\n onnx.save(onnx_opt_graph, onnx_opt_path)\n else:\n logger.warning(f\"Found cached optimized model: {onnx_opt_path} \")\n\n # Build TensorRT engines\n for model_name, model_obj in models.items():\n engine_path = getEnginePath(model_name, engine_dir)\n engine = Engine(engine_path)\n onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)\n onnx_opt_path = getOnnxPath(model_name, onnx_dir)\n\n if force_engine_rebuild or not os.path.exists(engine.engine_path):\n engine.build(\n onnx_opt_path,\n fp16=True,\n input_profile=model_obj.get_input_profile(\n opt_batch_size,\n opt_image_height,\n opt_image_width,\n static_batch=static_batch,\n static_shape=static_shape,\n ),\n enable_preview=enable_preview,\n timing_cache=timing_cache,\n workspace_size=max_workspace_size,\n )\n built_engines[model_name] = engine\n\n # Load and activate TensorRT engines\n for model_name, model_obj in models.items():\n engine = built_engines[model_name]\n engine.load()\n engine.activate()\n\n return built_engines\n\n\ndef runEngine(engine, feed_dict, stream):\n return engine.infer(feed_dict, stream)\n\n\nclass CLIP(BaseModel):\n def __init__(self, model, device, max_batch_size, embedding_dim):\n super(CLIP, self).__init__(\n model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim\n )\n self.name = \"CLIP\"\n\n def get_input_names(self):\n return [\"input_ids\"]\n\n def get_output_names(self):\n return [\"text_embeddings\", \"pooler_output\"]\n\n def get_dynamic_axes(self):\n return {\"input_ids\": {0: \"B\"}, \"text_embeddings\": {0: \"B\"}}\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n self.check_dims(batch_size, image_height, image_width)\n min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(\n batch_size, image_height, image_width, static_batch, static_shape\n )\n return {\n \"input_ids\": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return {\n \"input_ids\": (batch_size, self.text_maxlen),\n \"text_embeddings\": (batch_size, self.text_maxlen, self.embedding_dim),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)\n\n def optimize(self, onnx_graph):\n opt = Optimizer(onnx_graph)\n opt.select_outputs([0]) # delete graph output#1\n opt.cleanup()\n opt.fold_constants()\n opt.infer_shapes()\n opt.select_outputs([0], names=[\"text_embeddings\"]) # rename network output\n opt_onnx_graph = opt.cleanup(return_onnx=True)\n return opt_onnx_graph\n\n\ndef make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):\n return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)\n\n\nclass UNet(BaseModel):\n def __init__(\n self, model, fp16=False, device=\"cuda\", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4\n ):\n super(UNet, self).__init__(\n model=model,\n fp16=fp16,\n device=device,\n max_batch_size=max_batch_size,\n embedding_dim=embedding_dim,\n text_maxlen=text_maxlen,\n )\n self.unet_dim = unet_dim\n self.name = \"UNet\"\n\n def get_input_names(self):\n return [\"sample\", \"timestep\", \"encoder_hidden_states\"]\n\n def get_output_names(self):\n return [\"latent\"]\n\n def get_dynamic_axes(self):\n return {\n \"sample\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n \"encoder_hidden_states\": {0: \"2B\"},\n \"latent\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n }\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n _,\n _,\n _,\n _,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n return {\n \"sample\": [\n (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),\n (2 * batch_size, self.unet_dim, latent_height, latent_width),\n (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),\n ],\n \"encoder_hidden_states\": [\n (2 * min_batch, self.text_maxlen, self.embedding_dim),\n (2 * batch_size, self.text_maxlen, self.embedding_dim),\n (2 * max_batch, self.text_maxlen, self.embedding_dim),\n ],\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"sample\": (2 * batch_size, self.unet_dim, latent_height, latent_width),\n \"encoder_hidden_states\": (2 * batch_size, self.text_maxlen, self.embedding_dim),\n \"latent\": (2 * batch_size, 4, latent_height, latent_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n dtype = torch.float16 if self.fp16 else torch.float32\n return (\n torch.randn(\n 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device\n ),\n torch.tensor([1.0], dtype=torch.float32, device=self.device),\n torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),\n )\n\n\ndef make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False, unet_dim=4):\n return UNet(\n model,\n fp16=True,\n device=device,\n max_batch_size=max_batch_size,\n embedding_dim=embedding_dim,\n unet_dim=unet_dim,\n )\n\n\nclass VAE(BaseModel):\n def __init__(self, model, device, max_batch_size, embedding_dim):\n super(VAE, self).__init__(\n model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim\n )\n self.name = \"VAE decoder\"\n\n def get_input_names(self):\n return [\"latent\"]\n\n def get_output_names(self):\n return [\"images\"]\n\n def get_dynamic_axes(self):\n return {\"latent\": {0: \"B\", 2: \"H\", 3: \"W\"}, \"images\": {0: \"B\", 2: \"8H\", 3: \"8W\"}}\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n _,\n _,\n _,\n _,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n return {\n \"latent\": [\n (min_batch, 4, min_latent_height, min_latent_width),\n (batch_size, 4, latent_height, latent_width),\n (max_batch, 4, max_latent_height, max_latent_width),\n ]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"latent\": (batch_size, 4, latent_height, latent_width),\n \"images\": (batch_size, 3, image_height, image_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)\n\n\ndef make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):\n return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)\n\n\nclass TorchVAEEncoder(torch.nn.Module):\n def __init__(self, model):\n super().__init__()\n self.vae_encoder = model\n\n def forward(self, x):\n return self.vae_encoder.encode(x).latent_dist.sample()\n\n\nclass VAEEncoder(BaseModel):\n def __init__(self, model, device, max_batch_size, embedding_dim):\n super(VAEEncoder, self).__init__(\n model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim\n )\n self.name = \"VAE encoder\"\n\n def get_model(self):\n vae_encoder = TorchVAEEncoder(self.model)\n return vae_encoder\n\n def get_input_names(self):\n return [\"images\"]\n\n def get_output_names(self):\n return [\"latent\"]\n\n def get_dynamic_axes(self):\n return {\"images\": {0: \"B\", 2: \"8H\", 3: \"8W\"}, \"latent\": {0: \"B\", 2: \"H\", 3: \"W\"}}\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n assert batch_size >= self.min_batch and batch_size <= self.max_batch\n min_batch = batch_size if static_batch else self.min_batch\n max_batch = batch_size if static_batch else self.max_batch\n self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n min_image_height,\n max_image_height,\n min_image_width,\n max_image_width,\n _,\n _,\n _,\n _,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n\n return {\n \"images\": [\n (min_batch, 3, min_image_height, min_image_width),\n (batch_size, 3, image_height, image_width),\n (max_batch, 3, max_image_height, max_image_width),\n ]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"images\": (batch_size, 3, image_height, image_width),\n \"latent\": (batch_size, 4, latent_height, latent_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device)\n\n\ndef make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False):\n return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)\n\n\nclass TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):\n r\"\"\"\n Pipeline for inpainting using TensorRT accelerated Stable Diffusion.\n\n This model inherits from [`StableDiffusionInpaintPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPFeatureExtractor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: DDIMScheduler,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPFeatureExtractor,\n image_encoder: CLIPVisionModelWithProjection = None,\n requires_safety_checker: bool = True,\n stages=[\"clip\", \"unet\", \"vae\", \"vae_encoder\"],\n image_height: int = 512,\n image_width: int = 512,\n max_batch_size: int = 16,\n # ONNX export parameters\n onnx_opset: int = 17,\n onnx_dir: str = \"onnx\",\n # TensorRT engine build parameters\n engine_dir: str = \"engine\",\n build_preview_features: bool = True,\n force_engine_rebuild: bool = False,\n timing_cache: str = \"timing_cache\",\n ):\n super().__init__(\n vae,\n text_encoder,\n tokenizer,\n unet,\n scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n image_encoder=image_encoder,\n requires_safety_checker=requires_safety_checker,\n )\n\n self.vae.forward = self.vae.decode\n\n self.stages = stages\n self.image_height, self.image_width = image_height, image_width\n self.inpaint = True\n self.onnx_opset = onnx_opset\n self.onnx_dir = onnx_dir\n self.engine_dir = engine_dir\n self.force_engine_rebuild = force_engine_rebuild\n self.timing_cache = timing_cache\n self.build_static_batch = False\n self.build_dynamic_shape = False\n self.build_preview_features = build_preview_features\n\n self.max_batch_size = max_batch_size\n # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.\n if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:\n self.max_batch_size = 4\n\n self.stream = None # loaded in loadResources()\n self.models = {} # loaded in __loadModels()\n self.engine = {} # loaded in build_engines()\n\n def __loadModels(self):\n # Load pipeline models\n self.embedding_dim = self.text_encoder.config.hidden_size\n models_args = {\n \"device\": self.torch_device,\n \"max_batch_size\": self.max_batch_size,\n \"embedding_dim\": self.embedding_dim,\n \"inpaint\": self.inpaint,\n }\n if \"clip\" in self.stages:\n self.models[\"clip\"] = make_CLIP(self.text_encoder, **models_args)\n if \"unet\" in self.stages:\n self.models[\"unet\"] = make_UNet(self.unet, **models_args, unet_dim=self.unet.config.in_channels)\n if \"vae\" in self.stages:\n self.models[\"vae\"] = make_VAE(self.vae, **models_args)\n if \"vae_encoder\" in self.stages:\n self.models[\"vae_encoder\"] = make_VAEEncoder(self.vae, **models_args)\n\n @classmethod\n @validate_hf_hub_args\n def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):\n cache_dir = kwargs.pop(\"cache_dir\", None)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n token = kwargs.pop(\"token\", None)\n revision = kwargs.pop(\"revision\", None)\n\n cls.cached_folder = (\n pretrained_model_name_or_path\n if os.path.isdir(pretrained_model_name_or_path)\n else snapshot_download(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n token=token,\n revision=revision,\n )\n )\n\n def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):\n super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)\n\n self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)\n self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)\n self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)\n\n # set device\n self.torch_device = self._execution_device\n logger.warning(f\"Running inference on device: {self.torch_device}\")\n\n # load models\n self.__loadModels()\n\n # build engines\n self.engine = build_engines(\n self.models,\n self.engine_dir,\n self.onnx_dir,\n self.onnx_opset,\n opt_image_height=self.image_height,\n opt_image_width=self.image_width,\n force_engine_rebuild=self.force_engine_rebuild,\n static_batch=self.build_static_batch,\n static_shape=not self.build_dynamic_shape,\n enable_preview=self.build_preview_features,\n timing_cache=self.timing_cache,\n )\n\n return self\n\n def __initialize_timesteps(self, num_inference_steps, strength):\n self.scheduler.set_timesteps(num_inference_steps)\n offset = self.scheduler.config.steps_offset if hasattr(self.scheduler, \"steps_offset\") else 0\n init_timestep = int(num_inference_steps * strength) + offset\n init_timestep = min(init_timestep, num_inference_steps)\n t_start = max(num_inference_steps - init_timestep + offset, 0)\n timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :].to(self.torch_device)\n return timesteps, num_inference_steps - t_start\n\n def __preprocess_images(self, batch_size, images=()):\n init_images = []\n for image in images:\n image = image.to(self.torch_device).float()\n image = image.repeat(batch_size, 1, 1, 1)\n init_images.append(image)\n return tuple(init_images)\n\n def __encode_image(self, init_image):\n init_latents = runEngine(self.engine[\"vae_encoder\"], {\"images\": device_view(init_image)}, self.stream)[\n \"latent\"\n ]\n init_latents = 0.18215 * init_latents\n return init_latents\n\n def __encode_prompt(self, prompt, negative_prompt):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.\n Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).\n \"\"\"\n # Tokenize prompt\n text_input_ids = (\n self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n .input_ids.type(torch.int32)\n .to(self.torch_device)\n )\n\n text_input_ids_inp = device_view(text_input_ids)\n # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt\n text_embeddings = runEngine(self.engine[\"clip\"], {\"input_ids\": text_input_ids_inp}, self.stream)[\n \"text_embeddings\"\n ].clone()\n\n # Tokenize negative prompt\n uncond_input_ids = (\n self.tokenizer(\n negative_prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n .input_ids.type(torch.int32)\n .to(self.torch_device)\n )\n uncond_input_ids_inp = device_view(uncond_input_ids)\n uncond_embeddings = runEngine(self.engine[\"clip\"], {\"input_ids\": uncond_input_ids_inp}, self.stream)[\n \"text_embeddings\"\n ]\n\n # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)\n\n return text_embeddings\n\n def __denoise_latent(\n self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None\n ):\n if not isinstance(timesteps, torch.Tensor):\n timesteps = self.scheduler.timesteps\n for step_index, timestep in enumerate(timesteps):\n # Expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2)\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)\n if isinstance(mask, torch.Tensor):\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # Predict the noise residual\n timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep\n\n sample_inp = device_view(latent_model_input)\n timestep_inp = device_view(timestep_float)\n embeddings_inp = device_view(text_embeddings)\n noise_pred = runEngine(\n self.engine[\"unet\"],\n {\"sample\": sample_inp, \"timestep\": timestep_inp, \"encoder_hidden_states\": embeddings_inp},\n self.stream,\n )[\"latent\"]\n\n # Perform guidance\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample\n\n latents = 1.0 / 0.18215 * latents\n return latents\n\n def __decode_latent(self, latents):\n images = runEngine(self.engine[\"vae\"], {\"latent\": device_view(latents)}, self.stream)[\"images\"]\n images = (images / 2 + 0.5).clamp(0, 1)\n return images.cpu().permute(0, 2, 3, 1).float().numpy()\n\n def __loadResources(self, image_height, image_width, batch_size):\n self.stream = cuda.Stream()\n\n # Allocate buffers for TensorRT engine bindings\n for model_name, obj in self.models.items():\n self.engine[model_name].allocate_buffers(\n shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device\n )\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: Union[torch.FloatTensor, PIL.Image.Image] = None,\n mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,\n strength: float = 1.0,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n image (`PIL.Image.Image`):\n `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will\n be masked out with `mask_image` and repainted according to `prompt`.\n mask_image (`PIL.Image.Image`):\n `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be\n repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted\n to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)\n instead of 3, so the expected shape would be `(B, H, W, 1)`.\n strength (`float`, *optional*, defaults to 0.8):\n Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`\n will be used as a starting point, adding more noise to it the larger the `strength`. The number of\n denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will\n be maximum and the denoising process will run for the full number of iterations specified in\n `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.\n Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n\n \"\"\"\n self.generator = generator\n self.denoising_steps = num_inference_steps\n self.guidance_scale = guidance_scale\n\n # Pre-compute latent input scales and linear multistep coefficients\n self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)\n\n # Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n prompt = [prompt]\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n raise ValueError(f\"Expected prompt to be of type list or str but got {type(prompt)}\")\n\n if negative_prompt is None:\n negative_prompt = [\"\"] * batch_size\n\n if negative_prompt is not None and isinstance(negative_prompt, str):\n negative_prompt = [negative_prompt]\n\n assert len(prompt) == len(negative_prompt)\n\n if batch_size > self.max_batch_size:\n raise ValueError(\n f\"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4\"\n )\n\n # Validate image dimensions\n mask_width, mask_height = mask_image.size\n if mask_height != self.image_height or mask_width != self.image_width:\n raise ValueError(\n f\"Input image height and width {self.image_height} and {self.image_width} are not equal to \"\n f\"the respective dimensions of the mask image {mask_height} and {mask_width}\"\n )\n\n # load resources\n self.__loadResources(self.image_height, self.image_width, batch_size)\n\n with torch.inference_mode(), torch.autocast(\"cuda\"), trt.Runtime(TRT_LOGGER):\n # Spatial dimensions of latent tensor\n latent_height = self.image_height // 8\n latent_width = self.image_width // 8\n\n # Pre-process input images\n mask, masked_image, init_image = self.__preprocess_images(\n batch_size,\n prepare_mask_and_masked_image(\n image,\n mask_image,\n self.image_height,\n self.image_width,\n return_image=True,\n ),\n )\n\n mask = torch.nn.functional.interpolate(mask, size=(latent_height, latent_width))\n mask = torch.cat([mask] * 2)\n\n # Initialize timesteps\n timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength)\n\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # Pre-initialize latents\n num_channels_latents = self.vae.config.latent_channels\n latents_outputs = self.prepare_latents(\n batch_size,\n num_channels_latents,\n self.image_height,\n self.image_width,\n torch.float32,\n self.torch_device,\n generator,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n )\n\n latents = latents_outputs[0]\n\n # VAE encode masked image\n masked_latents = self.__encode_image(masked_image)\n masked_latents = torch.cat([masked_latents] * 2)\n\n # CLIP text encoder\n text_embeddings = self.__encode_prompt(prompt, negative_prompt)\n\n # UNet denoiser\n latents = self.__denoise_latent(\n latents,\n text_embeddings,\n timesteps=timesteps,\n step_offset=t_start,\n mask=mask,\n masked_image_latents=masked_latents,\n )\n\n # VAE decode latent\n images = self.__decode_latent(latents)\n\n images = self.numpy_to_pil(images)\n return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=None)\n", "output": ["build_engines", "device_view", "getOnnxPath", "make_VAE", "make_VAEEncoder", "getEnginePath", "make_CLIP", "runEngine", "preprocess_image", "make_UNet", "TensorRTStableDiffusionInpaintPipeline", "UNet", "VAE", "CLIP", "TorchVAEEncoder", "VAEEncoder", "Engine", "Optimizer", "BaseModel"], "metadata": {"file_path": "diffusers-main/examples/community/stable_diffusion_tensorrt_inpaint.py", "file_length": 14358, "symbol_dict": [{"symbol": "make_CLIP", "type": "mannual_defined_function", "byte_location": 17383, "location": 5641}, {"symbol": "build_engines", "type": "mannual_defined_function", "byte_location": 11656, "location": 3877}, {"symbol": "preprocess_image", "type": "mannual_defined_function", "byte_location": 3171, "location": 1113}, {"symbol": "getOnnxPath", "type": "mannual_defined_function", "byte_location": 11420, "location": 3788}, {"symbol": "device_view", "type": "mannual_defined_function", "byte_location": 3047, "location": 1061}, {"symbol": "make_UNet", "type": "mannual_defined_function", "byte_location": 20533, "location": 6698}, {"symbol": "getEnginePath", "type": "mannual_defined_function", "byte_location": 11553, "location": 3841}, {"symbol": "runEngine", "type": "mannual_defined_function", "byte_location": 15398, "location": 4976}, {"symbol": "make_VAE", "type": "mannual_defined_function", "byte_location": 22716, "location": 7435}, {"symbol": "make_VAEEncoder", "type": "mannual_defined_function", "byte_location": 25280, "location": 8281}, {"symbol": "UNet", "type": "mannual_defined_class", "byte_location": 17559, "location": 5703}, {"symbol": "VAE", "type": "mannual_defined_class", "byte_location": 20806, "location": 6799}, {"symbol": "VAEEncoder", "type": "mannual_defined_class", "byte_location": 23113, "location": 7576}, {"symbol": "Engine", "type": "mannual_defined_class", "byte_location": 3552, "location": 1271}, {"symbol": "TorchVAEEncoder", "type": "mannual_defined_class", "byte_location": 22890, "location": 7497}, {"symbol": "CLIP", "type": "mannual_defined_class", "byte_location": 15485, "location": 5005}, {"symbol": "BaseModel", "type": "mannual_defined_class", "byte_location": 8232, "location": 2763}, {"symbol": "Optimizer", "type": "mannual_defined_class", "byte_location": 7088, "location": 2370}, {"symbol": "TensorRTStableDiffusionInpaintPipeline", "type": "mannual_defined_class", "byte_location": 25468, "location": 8347}]}} {"input": "#\n# Copyright 2024 The HuggingFace Inc. team.\n# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gc\nimport os\nfrom collections import OrderedDict\nfrom copy import copy\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport onnx\nimport onnx_graphsurgeon as gs\nimport tensorrt as trt\nimport torch\nfrom huggingface_hub import snapshot_download\nfrom huggingface_hub.utils import validate_hf_hub_args\nfrom onnx import shape_inference\nfrom polygraphy import cuda\nfrom polygraphy.backend.common import bytes_from_path\nfrom polygraphy.backend.onnx.loader import fold_constants\nfrom polygraphy.backend.trt import (\n CreateConfig,\n Profile,\n engine_from_bytes,\n engine_from_network,\n network_from_onnx_path,\n save_engine,\n)\nfrom polygraphy.backend.trt import util as trt_util\nfrom transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection\n\nfrom diffusers.models import AutoencoderKL, UNet2DConditionModel\nfrom diffusers.pipelines.stable_diffusion import (\n StableDiffusionPipeline,\n StableDiffusionPipelineOutput,\n StableDiffusionSafetyChecker,\n)\nfrom diffusers.schedulers import DDIMScheduler\nfrom diffusers.utils import logging\n\n\n\"\"\"\nInstallation instructions\npython3 -m pip install --upgrade transformers diffusers>=0.16.0\npython3 -m pip install --upgrade tensorrt>=8.6.1\npython3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com\npython3 -m pip install onnxruntime\n\"\"\"\n\nTRT_LOGGER = trt.Logger(trt.Logger.ERROR)\nlogger = logging.get_logger(__name__) # pylint: disable=invalid-name\n\n# Map of numpy dtype -> torch dtype\nnumpy_to_torch_dtype_dict = {\n np.uint8: torch.uint8,\n np.int8: torch.int8,\n np.int16: torch.int16,\n np.int32: torch.int32,\n np.int64: torch.int64,\n np.float16: torch.float16,\n np.float32: torch.float32,\n np.float64: torch.float64,\n np.complex64: torch.complex64,\n np.complex128: torch.complex128,\n}\nif np.version.full_version >= \"1.24.0\":\n numpy_to_torch_dtype_dict[np.bool_] = torch.bool\nelse:\n numpy_to_torch_dtype_dict[np.bool] = torch.bool\n\n# Map of torch dtype -> numpy dtype\ntorch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}\n\n\ndef device_view(t):\n return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])\n\n\nclass Engine:\n def __init__(self, engine_path):\n self.engine_path = engine_path\n self.engine = None\n self.context = None\n self.buffers = OrderedDict()\n self.tensors = OrderedDict()\n\n def __del__(self):\n [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]\n del self.engine\n del self.context\n del self.buffers\n del self.tensors\n\n def build(\n self,\n onnx_path,\n fp16,\n input_profile=None,\n enable_preview=False,\n enable_all_tactics=False,\n timing_cache=None,\n workspace_size=0,\n ):\n logger.warning(f\"Building TensorRT engine for {onnx_path}: {self.engine_path}\")\n p = Profile()\n if input_profile:\n for name, dims in input_profile.items():\n assert len(dims) == 3\n p.add(name, min=dims[0], opt=dims[1], max=dims[2])\n\n config_kwargs = {}\n\n config_kwargs[\"preview_features\"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]\n if enable_preview:\n # Faster dynamic shapes made optional since it increases engine build time.\n config_kwargs[\"preview_features\"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)\n if workspace_size > 0:\n config_kwargs[\"memory_pool_limits\"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}\n if not enable_all_tactics:\n config_kwargs[\"tactic_sources\"] = []\n\n engine = engine_from_network(\n network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),\n config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),\n save_timing_cache=timing_cache,\n )\n save_engine(engine, path=self.engine_path)\n\n def load(self):\n logger.warning(f\"Loading TensorRT engine: {self.engine_path}\")\n self.engine = engine_from_bytes(bytes_from_path(self.engine_path))\n\n def activate(self):\n self.context = self.engine.create_execution_context()\n\n def allocate_buffers(self, shape_dict=None, device=\"cuda\"):\n for idx in range(trt_util.get_bindings_per_profile(self.engine)):\n binding = self.engine[idx]\n if shape_dict and binding in shape_dict:\n shape = shape_dict[binding]\n else:\n shape = self.engine.get_binding_shape(binding)\n dtype = trt.nptype(self.engine.get_binding_dtype(binding))\n if self.engine.binding_is_input(binding):\n self.context.set_binding_shape(idx, shape)\n tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)\n self.tensors[binding] = tensor\n self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)\n\n def infer(self, feed_dict, stream):\n start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)\n # shallow copy of ordered dict\n device_buffers = copy(self.buffers)\n for name, buf in feed_dict.items():\n assert isinstance(buf, cuda.DeviceView)\n device_buffers[name] = buf\n bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]\n noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)\n if not noerror:\n raise ValueError(\"ERROR: inference failed.\")\n\n return self.tensors\n\n\nclass Optimizer:\n def __init__(self, onnx_graph):\n self.graph = gs.import_onnx(onnx_graph)\n\n def cleanup(self, return_onnx=False):\n self.graph.cleanup().toposort()\n if return_onnx:\n return gs.export_onnx(self.graph)\n\n def select_outputs(self, keep, names=None):\n self.graph.outputs = [self.graph.outputs[o] for o in keep]\n if names:\n for i, name in enumerate(names):\n self.graph.outputs[i].name = name\n\n def fold_constants(self, return_onnx=False):\n onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)\n self.graph = gs.import_onnx(onnx_graph)\n if return_onnx:\n return onnx_graph\n\n def infer_shapes(self, return_onnx=False):\n onnx_graph = gs.export_onnx(self.graph)\n if onnx_graph.ByteSize() > 2147483648:\n raise TypeError(\"ERROR: model size exceeds supported 2GB limit\")\n else:\n onnx_graph = shape_inference.infer_shapes(onnx_graph)\n\n self.graph = gs.import_onnx(onnx_graph)\n if return_onnx:\n return onnx_graph\n\n\nclass BaseModel:\n def __init__(self, model, fp16=False, device=\"cuda\", max_batch_size=16, embedding_dim=768, text_maxlen=77):\n self.model = model\n self.name = \"SD Model\"\n self.fp16 = fp16\n self.device = device\n\n self.min_batch = 1\n self.max_batch = max_batch_size\n self.min_image_shape = 256 # min image resolution: 256x256\n self.max_image_shape = 1024 # max image resolution: 1024x1024\n self.min_latent_shape = self.min_image_shape // 8\n self.max_latent_shape = self.max_image_shape // 8\n\n self.embedding_dim = embedding_dim\n self.text_maxlen = text_maxlen\n\n def get_model(self):\n return self.model\n\n def get_input_names(self):\n pass\n\n def get_output_names(self):\n pass\n\n def get_dynamic_axes(self):\n return None\n\n def get_sample_input(self, batch_size, image_height, image_width):\n pass\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n return None\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n return None\n\n def optimize(self, onnx_graph):\n opt = Optimizer(onnx_graph)\n opt.cleanup()\n opt.fold_constants()\n opt.infer_shapes()\n onnx_opt_graph = opt.cleanup(return_onnx=True)\n return onnx_opt_graph\n\n def check_dims(self, batch_size, image_height, image_width):\n assert batch_size >= self.min_batch and batch_size <= self.max_batch\n assert image_height % 8 == 0 or image_width % 8 == 0\n latent_height = image_height // 8\n latent_width = image_width // 8\n assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape\n assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape\n return (latent_height, latent_width)\n\n def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):\n min_batch = batch_size if static_batch else self.min_batch\n max_batch = batch_size if static_batch else self.max_batch\n latent_height = image_height // 8\n latent_width = image_width // 8\n min_image_height = image_height if static_shape else self.min_image_shape\n max_image_height = image_height if static_shape else self.max_image_shape\n min_image_width = image_width if static_shape else self.min_image_shape\n max_image_width = image_width if static_shape else self.max_image_shape\n min_latent_height = latent_height if static_shape else self.min_latent_shape\n max_latent_height = latent_height if static_shape else self.max_latent_shape\n min_latent_width = latent_width if static_shape else self.min_latent_shape\n max_latent_width = latent_width if static_shape else self.max_latent_shape\n return (\n min_batch,\n max_batch,\n min_image_height,\n max_image_height,\n min_image_width,\n max_image_width,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n )\n\n\ndef getOnnxPath(model_name, onnx_dir, opt=True):\n return os.path.join(onnx_dir, model_name + (\".opt\" if opt else \"\") + \".onnx\")\n\n\ndef getEnginePath(model_name, engine_dir):\n return os.path.join(engine_dir, model_name + \".plan\")\n\n\ndef build_engines(\n models: dict,\n engine_dir,\n onnx_dir,\n onnx_opset,\n opt_image_height,\n opt_image_width,\n opt_batch_size=1,\n force_engine_rebuild=False,\n static_batch=False,\n static_shape=True,\n enable_preview=False,\n enable_all_tactics=False,\n timing_cache=None,\n max_workspace_size=0,\n):\n built_engines = {}\n if not os.path.isdir(onnx_dir):\n os.makedirs(onnx_dir)\n if not os.path.isdir(engine_dir):\n os.makedirs(engine_dir)\n\n # Export models to ONNX\n for model_name, model_obj in models.items():\n engine_path = getEnginePath(model_name, engine_dir)\n if force_engine_rebuild or not os.path.exists(engine_path):\n logger.warning(\"Building Engines...\")\n logger.warning(\"Engine build can take a while to complete\")\n onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)\n onnx_opt_path = getOnnxPath(model_name, onnx_dir)\n if force_engine_rebuild or not os.path.exists(onnx_opt_path):\n if force_engine_rebuild or not os.path.exists(onnx_path):\n logger.warning(f\"Exporting model: {onnx_path}\")\n model = model_obj.get_model()\n with torch.inference_mode(), torch.autocast(\"cuda\"):\n inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)\n torch.onnx.export(\n model,\n inputs,\n onnx_path,\n export_params=True,\n opset_version=onnx_opset,\n do_constant_folding=True,\n input_names=model_obj.get_input_names(),\n output_names=model_obj.get_output_names(),\n dynamic_axes=model_obj.get_dynamic_axes(),\n )\n del model\n torch.cuda.empty_cache()\n gc.collect()\n else:\n logger.warning(f\"Found cached model: {onnx_path}\")\n\n # Optimize onnx\n if force_engine_rebuild or not os.path.exists(onnx_opt_path):\n logger.warning(f\"Generating optimizing model: {onnx_opt_path}\")\n onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))\n onnx.save(onnx_opt_graph, onnx_opt_path)\n else:\n logger.warning(f\"Found cached optimized model: {onnx_opt_path} \")\n\n # Build TensorRT engines\n for model_name, model_obj in models.items():\n engine_path = getEnginePath(model_name, engine_dir)\n engine = Engine(engine_path)\n onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)\n onnx_opt_path = getOnnxPath(model_name, onnx_dir)\n\n if force_engine_rebuild or not os.path.exists(engine.engine_path):\n engine.build(\n onnx_opt_path,\n fp16=True,\n input_profile=model_obj.get_input_profile(\n opt_batch_size,\n opt_image_height,\n opt_image_width,\n static_batch=static_batch,\n static_shape=static_shape,\n ),\n enable_preview=enable_preview,\n timing_cache=timing_cache,\n workspace_size=max_workspace_size,\n )\n built_engines[model_name] = engine\n\n # Load and activate TensorRT engines\n for model_name, model_obj in models.items():\n engine = built_engines[model_name]\n engine.load()\n engine.activate()\n\n return built_engines\n\n\ndef runEngine(engine, feed_dict, stream):\n return engine.infer(feed_dict, stream)\n\n\nclass CLIP(BaseModel):\n def __init__(self, model, device, max_batch_size, embedding_dim):\n super(CLIP, self).__init__(\n model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim\n )\n self.name = \"CLIP\"\n\n def get_input_names(self):\n return [\"input_ids\"]\n\n def get_output_names(self):\n return [\"text_embeddings\", \"pooler_output\"]\n\n def get_dynamic_axes(self):\n return {\"input_ids\": {0: \"B\"}, \"text_embeddings\": {0: \"B\"}}\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n self.check_dims(batch_size, image_height, image_width)\n min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(\n batch_size, image_height, image_width, static_batch, static_shape\n )\n return {\n \"input_ids\": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return {\n \"input_ids\": (batch_size, self.text_maxlen),\n \"text_embeddings\": (batch_size, self.text_maxlen, self.embedding_dim),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)\n\n def optimize(self, onnx_graph):\n opt = Optimizer(onnx_graph)\n opt.select_outputs([0]) # delete graph output#1\n opt.cleanup()\n opt.fold_constants()\n opt.infer_shapes()\n opt.select_outputs([0], names=[\"text_embeddings\"]) # rename network output\n opt_onnx_graph = opt.cleanup(return_onnx=True)\n return opt_onnx_graph\n\n\ndef make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):\n return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)\n\n\nclass UNet(BaseModel):\n def __init__(\n self, model, fp16=False, device=\"cuda\", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4\n ):\n super(UNet, self).__init__(\n model=model,\n fp16=fp16,\n device=device,\n max_batch_size=max_batch_size,\n embedding_dim=embedding_dim,\n text_maxlen=text_maxlen,\n )\n self.unet_dim = unet_dim\n self.name = \"UNet\"\n\n def get_input_names(self):\n return [\"sample\", \"timestep\", \"encoder_hidden_states\"]\n\n def get_output_names(self):\n return [\"latent\"]\n\n def get_dynamic_axes(self):\n return {\n \"sample\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n \"encoder_hidden_states\": {0: \"2B\"},\n \"latent\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n }\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n _,\n _,\n _,\n _,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n return {\n \"sample\": [\n (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),\n (2 * batch_size, self.unet_dim, latent_height, latent_width),\n (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),\n ],\n \"encoder_hidden_states\": [\n (2 * min_batch, self.text_maxlen, self.embedding_dim),\n (2 * batch_size, self.text_maxlen, self.embedding_dim),\n (2 * max_batch, self.text_maxlen, self.embedding_dim),\n ],\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"sample\": (2 * batch_size, self.unet_dim, latent_height, latent_width),\n \"encoder_hidden_states\": (2 * batch_size, self.text_maxlen, self.embedding_dim),\n \"latent\": (2 * batch_size, 4, latent_height, latent_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n dtype = torch.float16 if self.fp16 else torch.float32\n return (\n torch.randn(\n 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device\n ),\n torch.tensor([1.0], dtype=torch.float32, device=self.device),\n torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),\n )\n\n\ndef make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False):\n return UNet(\n model,\n fp16=True,\n device=device,\n max_batch_size=max_batch_size,\n embedding_dim=embedding_dim,\n unet_dim=(9 if inpaint else 4),\n )\n\n\nclass VAE(BaseModel):\n def __init__(self, model, device, max_batch_size, embedding_dim):\n super(VAE, self).__init__(\n model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim\n )\n self.name = \"VAE decoder\"\n\n def get_input_names(self):\n return [\"latent\"]\n\n def get_output_names(self):\n return [\"images\"]\n\n def get_dynamic_axes(self):\n return {\"latent\": {0: \"B\", 2: \"H\", 3: \"W\"}, \"images\": {0: \"B\", 2: \"8H\", 3: \"8W\"}}\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n _,\n _,\n _,\n _,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n return {\n \"latent\": [\n (min_batch, 4, min_latent_height, min_latent_width),\n (batch_size, 4, latent_height, latent_width),\n (max_batch, 4, max_latent_height, max_latent_width),\n ]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"latent\": (batch_size, 4, latent_height, latent_width),\n \"images\": (batch_size, 3, image_height, image_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)\n\n\ndef make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):\n return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)\n\n\nclass TensorRTStableDiffusionPipeline(StableDiffusionPipeline):\n r\"\"\"\n Pipeline for text-to-image generation using TensorRT accelerated Stable Diffusion.\n\n This model inherits from [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPFeatureExtractor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: DDIMScheduler,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPFeatureExtractor,\n image_encoder: CLIPVisionModelWithProjection = None,\n requires_safety_checker: bool = True,\n stages=[\"clip\", \"unet\", \"vae\"],\n image_height: int = 768,\n image_width: int = 768,\n max_batch_size: int = 16,\n # ONNX export parameters\n onnx_opset: int = 17,\n onnx_dir: str = \"onnx\",\n # TensorRT engine build parameters\n engine_dir: str = \"engine\",\n build_preview_features: bool = True,\n force_engine_rebuild: bool = False,\n timing_cache: str = \"timing_cache\",\n ):\n super().__init__(\n vae,\n text_encoder,\n tokenizer,\n unet,\n scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n image_encoder=image_encoder,\n requires_safety_checker=requires_safety_checker,\n )\n\n self.vae.forward = self.vae.decode\n\n self.stages = stages\n self.image_height, self.image_width = image_height, image_width\n self.inpaint = False\n self.onnx_opset = onnx_opset\n self.onnx_dir = onnx_dir\n self.engine_dir = engine_dir\n self.force_engine_rebuild = force_engine_rebuild\n self.timing_cache = timing_cache\n self.build_static_batch = False\n self.build_dynamic_shape = False\n self.build_preview_features = build_preview_features\n\n self.max_batch_size = max_batch_size\n # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.\n if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:\n self.max_batch_size = 4\n\n self.stream = None # loaded in loadResources()\n self.models = {} # loaded in __loadModels()\n self.engine = {} # loaded in build_engines()\n\n def __loadModels(self):\n # Load pipeline models\n self.embedding_dim = self.text_encoder.config.hidden_size\n models_args = {\n \"device\": self.torch_device,\n \"max_batch_size\": self.max_batch_size,\n \"embedding_dim\": self.embedding_dim,\n \"inpaint\": self.inpaint,\n }\n if \"clip\" in self.stages:\n self.models[\"clip\"] = make_CLIP(self.text_encoder, **models_args)\n if \"unet\" in self.stages:\n self.models[\"unet\"] = make_UNet(self.unet, **models_args)\n if \"vae\" in self.stages:\n self.models[\"vae\"] = make_VAE(self.vae, **models_args)\n\n @classmethod\n @validate_hf_hub_args\n def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):\n cache_dir = kwargs.pop(\"cache_dir\", None)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n token = kwargs.pop(\"token\", None)\n revision = kwargs.pop(\"revision\", None)\n\n cls.cached_folder = (\n pretrained_model_name_or_path\n if os.path.isdir(pretrained_model_name_or_path)\n else snapshot_download(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n token=token,\n revision=revision,\n )\n )\n\n def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):\n super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)\n\n self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)\n self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)\n self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)\n\n # set device\n self.torch_device = self._execution_device\n logger.warning(f\"Running inference on device: {self.torch_device}\")\n\n # load models\n self.__loadModels()\n\n # build engines\n self.engine = build_engines(\n self.models,\n self.engine_dir,\n self.onnx_dir,\n self.onnx_opset,\n opt_image_height=self.image_height,\n opt_image_width=self.image_width,\n force_engine_rebuild=self.force_engine_rebuild,\n static_batch=self.build_static_batch,\n static_shape=not self.build_dynamic_shape,\n enable_preview=self.build_preview_features,\n timing_cache=self.timing_cache,\n )\n\n return self\n\n def __encode_prompt(self, prompt, negative_prompt):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.\n Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).\n \"\"\"\n # Tokenize prompt\n text_input_ids = (\n self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n .input_ids.type(torch.int32)\n .to(self.torch_device)\n )\n\n text_input_ids_inp = device_view(text_input_ids)\n # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt\n text_embeddings = runEngine(self.engine[\"clip\"], {\"input_ids\": text_input_ids_inp}, self.stream)[\n \"text_embeddings\"\n ].clone()\n\n # Tokenize negative prompt\n uncond_input_ids = (\n self.tokenizer(\n negative_prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n .input_ids.type(torch.int32)\n .to(self.torch_device)\n )\n uncond_input_ids_inp = device_view(uncond_input_ids)\n uncond_embeddings = runEngine(self.engine[\"clip\"], {\"input_ids\": uncond_input_ids_inp}, self.stream)[\n \"text_embeddings\"\n ]\n\n # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)\n\n return text_embeddings\n\n def __denoise_latent(\n self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None\n ):\n if not isinstance(timesteps, torch.Tensor):\n timesteps = self.scheduler.timesteps\n for step_index, timestep in enumerate(timesteps):\n # Expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2)\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)\n if isinstance(mask, torch.Tensor):\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # Predict the noise residual\n timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep\n\n sample_inp = device_view(latent_model_input)\n timestep_inp = device_view(timestep_float)\n embeddings_inp = device_view(text_embeddings)\n noise_pred = runEngine(\n self.engine[\"unet\"],\n {\"sample\": sample_inp, \"timestep\": timestep_inp, \"encoder_hidden_states\": embeddings_inp},\n self.stream,\n )[\"latent\"]\n\n # Perform guidance\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample\n\n latents = 1.0 / 0.18215 * latents\n return latents\n\n def __decode_latent(self, latents):\n images = runEngine(self.engine[\"vae\"], {\"latent\": device_view(latents)}, self.stream)[\"images\"]\n images = (images / 2 + 0.5).clamp(0, 1)\n return images.cpu().permute(0, 2, 3, 1).float().numpy()\n\n def __loadResources(self, image_height, image_width, batch_size):\n self.stream = cuda.Stream()\n\n # Allocate buffers for TensorRT engine bindings\n for model_name, obj in self.models.items():\n self.engine[model_name].allocate_buffers(\n shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device\n )\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.\n Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n\n \"\"\"\n self.generator = generator\n self.denoising_steps = num_inference_steps\n self.guidance_scale = guidance_scale\n\n # Pre-compute latent input scales and linear multistep coefficients\n self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)\n\n # Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n prompt = [prompt]\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n raise ValueError(f\"Expected prompt to be of type list or str but got {type(prompt)}\")\n\n if negative_prompt is None:\n negative_prompt = [\"\"] * batch_size\n\n if negative_prompt is not None and isinstance(negative_prompt, str):\n negative_prompt = [negative_prompt]\n\n assert len(prompt) == len(negative_prompt)\n\n if batch_size > self.max_batch_size:\n raise ValueError(\n f\"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4\"\n )\n\n # load resources\n self.__loadResources(self.image_height, self.image_width, batch_size)\n\n with torch.inference_mode(), torch.autocast(\"cuda\"), trt.Runtime(TRT_LOGGER):\n # CLIP text encoder\n text_embeddings = self.__encode_prompt(prompt, negative_prompt)\n\n # Pre-initialize latents\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n batch_size,\n num_channels_latents,\n self.image_height,\n self.image_width,\n torch.float32,\n self.torch_device,\n generator,\n )\n\n # UNet denoiser\n latents = self.__denoise_latent(latents, text_embeddings)\n\n # VAE decode latent\n images = self.__decode_latent(latents)\n\n images, has_nsfw_concept = self.run_safety_checker(images, self.torch_device, text_embeddings.dtype)\n images = self.numpy_to_pil(images)\n return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)\n", "output": ["device_view", "build_engines", "make_VAE", "make_CLIP", "getEnginePath", "make_UNet", "runEngine", "getOnnxPath", "Engine", "Optimizer", "BaseModel", "CLIP", "VAE", "UNet", "TensorRTStableDiffusionPipeline"], "metadata": {"file_path": "diffusers-main/examples/community/stable_diffusion_tensorrt_txt2img.py", "file_length": 11943, "symbol_dict": [{"symbol": "make_CLIP", "type": "mannual_defined_function", "byte_location": 16865, "location": 5439}, {"symbol": "build_engines", "type": "mannual_defined_function", "byte_location": 11138, "location": 3675}, {"symbol": "make_UNet", "type": "mannual_defined_function", "byte_location": 20015, "location": 6496}, {"symbol": "make_VAE", "type": "mannual_defined_function", "byte_location": 22199, "location": 7230}, {"symbol": "device_view", "type": "mannual_defined_function", "byte_location": 2910, "location": 1017}, {"symbol": "getOnnxPath", "type": "mannual_defined_function", "byte_location": 10902, "location": 3586}, {"symbol": "getEnginePath", "type": "mannual_defined_function", "byte_location": 11035, "location": 3639}, {"symbol": "runEngine", "type": "mannual_defined_function", "byte_location": 14880, "location": 4774}, {"symbol": "Engine", "type": "mannual_defined_class", "byte_location": 3034, "location": 1069}, {"symbol": "UNet", "type": "mannual_defined_class", "byte_location": 17041, "location": 5501}, {"symbol": "BaseModel", "type": "mannual_defined_class", "byte_location": 7714, "location": 2561}, {"symbol": "TensorRTStableDiffusionPipeline", "type": "mannual_defined_class", "byte_location": 22373, "location": 7292}, {"symbol": "VAE", "type": "mannual_defined_class", "byte_location": 20289, "location": 6594}, {"symbol": "CLIP", "type": "mannual_defined_class", "byte_location": 14967, "location": 4803}, {"symbol": "Optimizer", "type": "mannual_defined_class", "byte_location": 6570, "location": 2168}]}} {"input": "import copy\nimport inspect\nfrom dataclasses import dataclass\nfrom typing import Callable, List, Optional, Union\n\nimport numpy as np\nimport PIL.Image\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.functional import grid_sample\nfrom transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer\n\nfrom ...image_processor import VaeImageProcessor\nfrom ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin\nfrom ...models import AutoencoderKL, UNet2DConditionModel\nfrom ...models.lora import adjust_lora_scale_text_encoder\nfrom ...schedulers import KarrasDiffusionSchedulers\nfrom ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers\nfrom ...utils.torch_utils import randn_tensor\nfrom ..pipeline_utils import DiffusionPipeline\nfrom ..stable_diffusion import StableDiffusionSafetyChecker\n\n\nlogger = logging.get_logger(__name__) # pylint: disable=invalid-name\n\n\ndef rearrange_0(tensor, f):\n F, C, H, W = tensor.size()\n tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4))\n return tensor\n\n\ndef rearrange_1(tensor):\n B, C, F, H, W = tensor.size()\n return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W))\n\n\ndef rearrange_3(tensor, f):\n F, D, C = tensor.size()\n return torch.reshape(tensor, (F // f, f, D, C))\n\n\ndef rearrange_4(tensor):\n B, F, D, C = tensor.size()\n return torch.reshape(tensor, (B * F, D, C))\n\n\nclass CrossFrameAttnProcessor:\n \"\"\"\n Cross frame attention processor. Each frame attends the first frame.\n\n Args:\n batch_size: The number that represents actual batch size, other than the frames.\n For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to\n 2, due to classifier-free guidance.\n \"\"\"\n\n def __init__(self, batch_size=2):\n self.batch_size = batch_size\n\n def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None):\n batch_size, sequence_length, _ = hidden_states.shape\n attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)\n query = attn.to_q(hidden_states)\n\n is_cross_attention = encoder_hidden_states is not None\n if encoder_hidden_states is None:\n encoder_hidden_states = hidden_states\n elif attn.norm_cross:\n encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)\n\n key = attn.to_k(encoder_hidden_states)\n value = attn.to_v(encoder_hidden_states)\n\n # Cross Frame Attention\n if not is_cross_attention:\n video_length = key.size()[0] // self.batch_size\n first_frame_index = [0] * video_length\n\n # rearrange keys to have batch and frames in the 1st and 2nd dims respectively\n key = rearrange_3(key, video_length)\n key = key[:, first_frame_index]\n # rearrange values to have batch and frames in the 1st and 2nd dims respectively\n value = rearrange_3(value, video_length)\n value = value[:, first_frame_index]\n\n # rearrange back to original shape\n key = rearrange_4(key)\n value = rearrange_4(value)\n\n query = attn.head_to_batch_dim(query)\n key = attn.head_to_batch_dim(key)\n value = attn.head_to_batch_dim(value)\n\n attention_probs = attn.get_attention_scores(query, key, attention_mask)\n hidden_states = torch.bmm(attention_probs, value)\n hidden_states = attn.batch_to_head_dim(hidden_states)\n\n # linear proj\n hidden_states = attn.to_out[0](hidden_states)\n # dropout\n hidden_states = attn.to_out[1](hidden_states)\n\n return hidden_states\n\n\nclass CrossFrameAttnProcessor2_0:\n \"\"\"\n Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0.\n\n Args:\n batch_size: The number that represents actual batch size, other than the frames.\n For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to\n 2, due to classifier-free guidance.\n \"\"\"\n\n def __init__(self, batch_size=2):\n if not hasattr(F, \"scaled_dot_product_attention\"):\n raise ImportError(\"AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.\")\n self.batch_size = batch_size\n\n def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None):\n batch_size, sequence_length, _ = (\n hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape\n )\n inner_dim = hidden_states.shape[-1]\n\n if attention_mask is not None:\n attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)\n # scaled_dot_product_attention expects attention_mask shape to be\n # (batch, heads, source_length, target_length)\n attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])\n\n query = attn.to_q(hidden_states)\n\n is_cross_attention = encoder_hidden_states is not None\n if encoder_hidden_states is None:\n encoder_hidden_states = hidden_states\n elif attn.norm_cross:\n encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)\n\n key = attn.to_k(encoder_hidden_states)\n value = attn.to_v(encoder_hidden_states)\n\n # Cross Frame Attention\n if not is_cross_attention:\n video_length = max(1, key.size()[0] // self.batch_size)\n first_frame_index = [0] * video_length\n\n # rearrange keys to have batch and frames in the 1st and 2nd dims respectively\n key = rearrange_3(key, video_length)\n key = key[:, first_frame_index]\n # rearrange values to have batch and frames in the 1st and 2nd dims respectively\n value = rearrange_3(value, video_length)\n value = value[:, first_frame_index]\n\n # rearrange back to original shape\n key = rearrange_4(key)\n value = rearrange_4(value)\n\n head_dim = inner_dim // attn.heads\n query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n\n # the output of sdp = (batch, num_heads, seq_len, head_dim)\n # TODO: add support for attn.scale when we move to Torch 2.1\n hidden_states = F.scaled_dot_product_attention(\n query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False\n )\n\n hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)\n hidden_states = hidden_states.to(query.dtype)\n\n # linear proj\n hidden_states = attn.to_out[0](hidden_states)\n # dropout\n hidden_states = attn.to_out[1](hidden_states)\n return hidden_states\n\n\n@dataclass\nclass TextToVideoPipelineOutput(BaseOutput):\n r\"\"\"\n Output class for zero-shot text-to-video pipeline.\n\n Args:\n images (`[List[PIL.Image.Image]`, `np.ndarray`]):\n List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,\n num_channels)`.\n nsfw_content_detected (`[List[bool]]`):\n List indicating whether the corresponding generated image contains \"not-safe-for-work\" (nsfw) content or\n `None` if safety checking could not be performed.\n \"\"\"\n\n images: Union[List[PIL.Image.Image], np.ndarray]\n nsfw_content_detected: Optional[List[bool]]\n\n\ndef coords_grid(batch, ht, wd, device):\n # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py\n coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))\n coords = torch.stack(coords[::-1], dim=0).float()\n return coords[None].repeat(batch, 1, 1, 1)\n\n\ndef warp_single_latent(latent, reference_flow):\n \"\"\"\n Warp latent of a single frame with given flow\n\n Args:\n latent: latent code of a single frame\n reference_flow: flow which to warp the latent with\n\n Returns:\n warped: warped latent\n \"\"\"\n _, _, H, W = reference_flow.size()\n _, _, h, w = latent.size()\n coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype)\n\n coords_t0 = coords0 + reference_flow\n coords_t0[:, 0] /= W\n coords_t0[:, 1] /= H\n\n coords_t0 = coords_t0 * 2.0 - 1.0\n coords_t0 = F.interpolate(coords_t0, size=(h, w), mode=\"bilinear\")\n coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1))\n\n warped = grid_sample(latent, coords_t0, mode=\"nearest\", padding_mode=\"reflection\")\n return warped\n\n\ndef create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype):\n \"\"\"\n Create translation motion field\n\n Args:\n motion_field_strength_x: motion strength along x-axis\n motion_field_strength_y: motion strength along y-axis\n frame_ids: indexes of the frames the latents of which are being processed.\n This is needed when we perform chunk-by-chunk inference\n device: device\n dtype: dtype\n\n Returns:\n\n \"\"\"\n seq_length = len(frame_ids)\n reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype)\n for fr_idx in range(seq_length):\n reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx])\n reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx])\n return reference_flow\n\n\ndef create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents):\n \"\"\"\n Creates translation motion and warps the latents accordingly\n\n Args:\n motion_field_strength_x: motion strength along x-axis\n motion_field_strength_y: motion strength along y-axis\n frame_ids: indexes of the frames the latents of which are being processed.\n This is needed when we perform chunk-by-chunk inference\n latents: latent codes of frames\n\n Returns:\n warped_latents: warped latents\n \"\"\"\n motion_field = create_motion_field(\n motion_field_strength_x=motion_field_strength_x,\n motion_field_strength_y=motion_field_strength_y,\n frame_ids=frame_ids,\n device=latents.device,\n dtype=latents.dtype,\n )\n warped_latents = latents.clone().detach()\n for i in range(len(warped_latents)):\n warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None])\n return warped_latents\n\n\nclass TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):\n r\"\"\"\n Pipeline for zero-shot text-to-video generation using Stable Diffusion.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods\n implemented for all pipelines (downloading, saving, running on a particular device, etc.).\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).\n tokenizer (`CLIPTokenizer`):\n A [`~transformers.CLIPTokenizer`] to tokenize text.\n unet ([`UNet2DConditionModel`]):\n A [`UNet3DConditionModel`] to denoise the encoded video latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details\n about a model's potential harms.\n feature_extractor ([`CLIPImageProcessor`]):\n A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`.\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n super().__init__()\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n processor = (\n CrossFrameAttnProcessor2_0(batch_size=2)\n if hasattr(F, \"scaled_dot_product_attention\")\n else CrossFrameAttnProcessor(batch_size=2)\n )\n self.unet.set_attn_processor(processor)\n\n if safety_checker is None and requires_safety_checker:\n logger.warning(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n\n def forward_loop(self, x_t0, t0, t1, generator):\n \"\"\"\n Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance.\n\n Args:\n x_t0:\n Latent code at time t0.\n t0:\n Timestep at t0.\n t1:\n Timestamp at t1.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make\n generation deterministic.\n\n Returns:\n x_t1:\n Forward process applied to x_t0 from time t0 to t1.\n \"\"\"\n eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device)\n alpha_vec = torch.prod(self.scheduler.alphas[t0:t1])\n x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps\n return x_t1\n\n def backward_loop(\n self,\n latents,\n timesteps,\n prompt_embeds,\n guidance_scale,\n callback,\n callback_steps,\n num_warmup_steps,\n extra_step_kwargs,\n cross_attention_kwargs=None,\n ):\n \"\"\"\n Perform backward process given list of time steps.\n\n Args:\n latents:\n Latents at time timesteps[0].\n timesteps:\n Time steps along which to perform backward process.\n prompt_embeds:\n Pre-generated text embeddings.\n guidance_scale:\n A higher guidance scale value encourages the model to generate images closely linked to the text\n `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.\n callback (`Callable`, *optional*):\n A function that calls every `callback_steps` steps during inference. The function is called with the\n following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function is called. If not specified, the callback is called at\n every step.\n extra_step_kwargs:\n Extra_step_kwargs.\n cross_attention_kwargs:\n A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in\n [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).\n num_warmup_steps:\n number of warmup steps.\n\n Returns:\n latents:\n Latents of backward process output at time timesteps[-1].\n \"\"\"\n do_classifier_free_guidance = guidance_scale > 1.0\n num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order\n with self.progress_bar(total=num_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n ).sample\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n step_idx = i // getattr(self.scheduler, \"order\", 1)\n callback(step_idx, t, latents)\n return latents.clone().detach()\n\n def check_inputs(\n self,\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n callback_on_step_end_tensor_inputs=None,\n ):\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n if callback_on_step_end_tensor_inputs is not None and not all(\n k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs\n ):\n raise ValueError(\n f\"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}\"\n )\n\n if prompt is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt is None and prompt_embeds is None:\n raise ValueError(\n \"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.\"\n )\n elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if negative_prompt is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n\n if prompt_embeds is not None and negative_prompt_embeds is not None:\n if prompt_embeds.shape != negative_prompt_embeds.shape:\n raise ValueError(\n \"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but\"\n f\" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`\"\n f\" {negative_prompt_embeds.shape}.\"\n )\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents\n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n else:\n latents = latents.to(device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n video_length: Optional[int] = 8,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_videos_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n motion_field_strength_x: float = 12,\n motion_field_strength_y: float = 12,\n output_type: Optional[str] = \"tensor\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n t0: int = 44,\n t1: int = 47,\n frame_ids: Optional[List[int]] = None,\n ):\n \"\"\"\n The call function to the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.\n video_length (`int`, *optional*, defaults to 8):\n The number of generated video frames.\n height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n A higher guidance scale value encourages the model to generate images closely linked to the text\n `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide what to not include in video generation. If not defined, you need to\n pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).\n num_videos_per_prompt (`int`, *optional*, defaults to 1):\n The number of videos to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (\u03b7) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies\n to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make\n generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor is generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"numpy\"`):\n The output format of the generated video. Choose between `\"latent\"` and `\"numpy\"`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a\n [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of\n a plain tuple.\n callback (`Callable`, *optional*):\n A function that calls every `callback_steps` steps during inference. The function is called with the\n following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function is called. If not specified, the callback is called at\n every step.\n motion_field_strength_x (`float`, *optional*, defaults to 12):\n Strength of motion in generated video along x-axis. See the [paper](https://arxiv.org/abs/2303.13439),\n Sect. 3.3.1.\n motion_field_strength_y (`float`, *optional*, defaults to 12):\n Strength of motion in generated video along y-axis. See the [paper](https://arxiv.org/abs/2303.13439),\n Sect. 3.3.1.\n t0 (`int`, *optional*, defaults to 44):\n Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the\n [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1.\n t1 (`int`, *optional*, defaults to 47):\n Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the\n [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1.\n frame_ids (`List[int]`, *optional*):\n Indexes of the frames that are being generated. This is used when generating longer videos\n chunk-by-chunk.\n\n Returns:\n [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`]:\n The output contains a `ndarray` of the generated video, when `output_type` != `\"latent\"`, otherwise a\n latent code of generated videos and a list of `bool`s indicating whether the corresponding generated\n video contains \"not-safe-for-work\" (nsfw) content..\n \"\"\"\n assert video_length > 0\n if frame_ids is None:\n frame_ids = list(range(video_length))\n assert len(frame_ids) == video_length\n\n assert num_videos_per_prompt == 1\n\n if isinstance(prompt, str):\n prompt = [prompt]\n if isinstance(negative_prompt, str):\n negative_prompt = [negative_prompt]\n\n # Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # Check inputs. Raise error if not correct\n self.check_inputs(prompt, height, width, callback_steps)\n\n # Define call parameters\n batch_size = 1 if isinstance(prompt, str) else len(prompt)\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # Encode input prompt\n prompt_embeds_tuple = self.encode_prompt(\n prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])\n\n # Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n )\n # Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n\n # Perform the first backward process up to time T_1\n x_1_t1 = self.backward_loop(\n timesteps=timesteps[: -t1 - 1],\n prompt_embeds=prompt_embeds,\n latents=latents,\n guidance_scale=guidance_scale,\n callback=callback,\n callback_steps=callback_steps,\n extra_step_kwargs=extra_step_kwargs,\n num_warmup_steps=num_warmup_steps,\n )\n scheduler_copy = copy.deepcopy(self.scheduler)\n\n # Perform the second backward process up to time T_0\n x_1_t0 = self.backward_loop(\n timesteps=timesteps[-t1 - 1 : -t0 - 1],\n prompt_embeds=prompt_embeds,\n latents=x_1_t1,\n guidance_scale=guidance_scale,\n callback=callback,\n callback_steps=callback_steps,\n extra_step_kwargs=extra_step_kwargs,\n num_warmup_steps=0,\n )\n\n # Propagate first frame latents at time T_0 to remaining frames\n x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1)\n\n # Add motion in latents at time T_0\n x_2k_t0 = create_motion_field_and_warp_latents(\n motion_field_strength_x=motion_field_strength_x,\n motion_field_strength_y=motion_field_strength_y,\n latents=x_2k_t0,\n frame_ids=frame_ids[1:],\n )\n\n # Perform forward process up to time T_1\n x_2k_t1 = self.forward_loop(\n x_t0=x_2k_t0,\n t0=timesteps[-t0 - 1].item(),\n t1=timesteps[-t1 - 1].item(),\n generator=generator,\n )\n\n # Perform backward process from time T_1 to 0\n x_1k_t1 = torch.cat([x_1_t1, x_2k_t1])\n b, l, d = prompt_embeds.size()\n prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d)\n\n self.scheduler = scheduler_copy\n x_1k_0 = self.backward_loop(\n timesteps=timesteps[-t1 - 1 :],\n prompt_embeds=prompt_embeds,\n latents=x_1k_t1,\n guidance_scale=guidance_scale,\n callback=callback,\n callback_steps=callback_steps,\n extra_step_kwargs=extra_step_kwargs,\n num_warmup_steps=0,\n )\n latents = x_1k_0\n\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n if output_type == \"latent\":\n image = latents\n has_nsfw_concept = None\n else:\n image = self.decode_latents(latents)\n # Run safety checker\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return TextToVideoPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker\n def run_safety_checker(self, image, device, dtype):\n if self.safety_checker is None:\n has_nsfw_concept = None\n else:\n if torch.is_tensor(image):\n feature_extractor_input = self.image_processor.postprocess(image, output_type=\"pil\")\n else:\n feature_extractor_input = self.image_processor.numpy_to_pil(image)\n safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors=\"pt\").to(device)\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(dtype)\n )\n return image, has_nsfw_concept\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (\u03b7) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to \u03b7 in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt\n def encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n lora_scale: Optional[float] = None,\n clip_skip: Optional[int] = None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n do_classifier_free_guidance (`bool`):\n whether to use classifier free guidance or not\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n lora_scale (`float`, *optional*):\n A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.\n clip_skip (`int`, *optional*):\n Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that\n the output of the pre-final layer will be used for computing the prompt embeddings.\n \"\"\"\n # set lora scale so that monkey patched LoRA\n # function of text encoder can correctly access it\n if lora_scale is not None and isinstance(self, LoraLoaderMixin):\n self._lora_scale = lora_scale\n\n # dynamically adjust the LoRA scale\n if not USE_PEFT_BACKEND:\n adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)\n else:\n scale_lora_layers(self.text_encoder, lora_scale)\n\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n # textual inversion: procecss multi-vector tokens if necessary\n if isinstance(self, TextualInversionLoaderMixin):\n prompt = self.maybe_convert_prompt(prompt, self.tokenizer)\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n if clip_skip is None:\n prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)\n prompt_embeds = prompt_embeds[0]\n else:\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True\n )\n # Access the `hidden_states` first, that contains a tuple of\n # all the hidden states from the encoder layers. Then index into\n # the tuple to access the hidden states from the desired layer.\n prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]\n # We also need to apply the final LayerNorm here to not mess with the\n # representations. The `last_hidden_states` that we typically use for\n # obtaining the final prompt representations passes through the LayerNorm\n # layer.\n prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)\n\n if self.text_encoder is not None:\n prompt_embeds_dtype = self.text_encoder.dtype\n elif self.unet is not None:\n prompt_embeds_dtype = self.unet.dtype\n else:\n prompt_embeds_dtype = prompt_embeds.dtype\n\n prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance and negative_prompt_embeds is None:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif prompt is not None and type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n # textual inversion: procecss multi-vector tokens if necessary\n if isinstance(self, TextualInversionLoaderMixin):\n uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)\n\n max_length = prompt_embeds.shape[1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = uncond_input.attention_mask.to(device)\n else:\n attention_mask = None\n\n negative_prompt_embeds = self.text_encoder(\n uncond_input.input_ids.to(device),\n attention_mask=attention_mask,\n )\n negative_prompt_embeds = negative_prompt_embeds[0]\n\n if do_classifier_free_guidance:\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = negative_prompt_embeds.shape[1]\n\n negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)\n\n negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)\n negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:\n # Retrieve the original scale by scaling back the LoRA layers\n unscale_lora_layers(self.text_encoder, lora_scale)\n\n return prompt_embeds, negative_prompt_embeds\n\n def decode_latents(self, latents):\n latents = 1 / self.vae.config.scaling_factor * latents\n image = self.vae.decode(latents, return_dict=False)[0]\n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n return image\n", "output": ["create_motion_field_and_warp_latents", "coords_grid", "rearrange_1", "rearrange_4", "rearrange_3", "create_motion_field", "warp_single_latent", "rearrange_0", "CrossFrameAttnProcessor2_0", "TextToVideoZeroPipeline", "TextToVideoPipelineOutput", "CrossFrameAttnProcessor"], "metadata": {"file_path": "diffusers-main/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py", "file_length": 13269, "symbol_dict": [{"symbol": "create_motion_field", "type": "mannual_defined_function", "byte_location": 8898, "location": 2908}, {"symbol": "rearrange_3", "type": "mannual_defined_function", "byte_location": 1236, "location": 423}, {"symbol": "warp_single_latent", "type": "mannual_defined_function", "byte_location": 8115, "location": 2594}, {"symbol": "create_motion_field_and_warp_latents", "type": "mannual_defined_function", "byte_location": 9757, "location": 3188}, {"symbol": "rearrange_1", "type": "mannual_defined_function", "byte_location": 1092, "location": 353}, {"symbol": "coords_grid", "type": "mannual_defined_function", "byte_location": 7790, "location": 2462}, {"symbol": "rearrange_0", "type": "mannual_defined_function", "byte_location": 924, "location": 276}, {"symbol": "rearrange_4", "type": "mannual_defined_function", "byte_location": 1346, "location": 470}, {"symbol": "TextToVideoPipelineOutput", "type": "mannual_defined_class", "byte_location": 7129, "location": 2263}, {"symbol": "TextToVideoZeroPipeline", "type": "mannual_defined_class", "byte_location": 10781, "location": 3512}, {"symbol": "CrossFrameAttnProcessor", "type": "mannual_defined_class", "byte_location": 1452, "location": 515}, {"symbol": "CrossFrameAttnProcessor2_0", "type": "mannual_defined_class", "byte_location": 3780, "location": 1215}]}} {"input": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Configuration classes for graph optimization and quantization with ONNX Runtime.\"\"\"\n\nimport os\nimport warnings\nfrom dataclasses import asdict, dataclass, field\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple, Union\n\nfrom datasets import Dataset\nfrom packaging.version import Version, parse\n\nfrom onnxruntime import __version__ as ort_version\nfrom onnxruntime.quantization import CalibraterBase, CalibrationMethod, QuantFormat, QuantizationMode, QuantType\nfrom onnxruntime.quantization.calibrate import create_calibrator\nfrom onnxruntime.quantization.registry import IntegerOpsRegistry, QDQRegistry, QLinearOpsRegistry\nfrom onnxruntime.transformers.fusion_options import FusionOptions\n\nfrom ..configuration_utils import BaseConfig\nfrom ..utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n# This value is used to indicate ORT which axis it should use to quantize an operator \"per-channel\"\nORT_DEFAULT_CHANNEL_FOR_OPERATORS = {\"MatMul\": 1}\n\n# Reference: https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/python/tools/quantization/registry.py\nORT_DEFAULT_OPS_DYNAMIC_QUANTIZATION = list(IntegerOpsRegistry.keys())\nORT_DEFAULT_OPS_STATIC_QUANTIZATION_QDQ = list(QDQRegistry.keys())\nORT_DEFAULT_OPS_STATIC_QUANTIZATION_QOPS = list(QLinearOpsRegistry.keys())\n\n\n@dataclass\nclass CalibrationConfig:\n \"\"\"\n CalibrationConfig is the configuration class handling all the ONNX Runtime parameters related to the calibration\n step of static quantization.\n\n Args:\n dataset_name (`str`):\n The name of the calibration dataset.\n dataset_config_name (`str`):\n The name of the calibration dataset configuration.\n dataset_split (`str`):\n Which split of the dataset is used to perform the calibration step.\n dataset_num_samples (`int`):\n The number of samples composing the calibration dataset.\n method (`CalibrationMethod`):\n The method chosen to calculate the activations quantization parameters using the calibration dataset.\n num_bins (`Optional[int]`, defaults to `None`):\n The number of bins to use when creating the histogram when performing the calibration step using the\n Percentile or Entropy method.\n num_quantized_bins (`Optional[int]`, defaults to `None`):\n The number of quantized bins to use when performing the calibration step using the Entropy method.\n percentile (`Optional[float]`, defaults to `None`):\n The percentile to use when computing the activations quantization ranges when performing the calibration\n step using the Percentile method.\n moving_average (`Optional[bool]`, defaults to `None`):\n Whether to compute the moving average of the minimum and maximum values when performing the calibration step\n using the MinMax method.\n averaging_constant (`Optional[float]`, defaults to `None`):\n The constant smoothing factor to use when computing the moving average of the minimum and maximum values.\n Effective only when the MinMax calibration method is selected and `moving_average` is set to True.\n \"\"\"\n\n dataset_name: str\n dataset_config_name: str\n dataset_split: str\n dataset_num_samples: int\n method: CalibrationMethod\n num_bins: Optional[int] = None\n num_quantized_bins: Optional[int] = None\n percentile: Optional[float] = None\n moving_average: Optional[bool] = None\n averaging_constant: Optional[float] = None\n\n def create_calibrator(\n self,\n onnx_model_path: Union[str, os.PathLike, Path],\n operators_to_quantize: Optional[List[str]],\n use_external_data_format: bool = False,\n force_symmetric_range: bool = False,\n augmented_model_name: str = \"augmented_model.onnx\",\n ) -> CalibraterBase:\n kwargs = {\n \"model\": onnx_model_path,\n \"op_types_to_calibrate\": operators_to_quantize or [],\n \"calibrate_method\": self.method,\n \"augmented_model_path\": augmented_model_name,\n }\n if parse(ort_version) > Version(\"1.10.0\"):\n kwargs[\"use_external_data_format\"] = use_external_data_format\n kwargs[\"extra_options\"] = {\n \"symmetric\": force_symmetric_range,\n \"num_bins\": self.num_bins,\n \"num_quantized_bins\": self.num_quantized_bins,\n \"percentile\": self.percentile,\n \"moving_average\": self.moving_average,\n \"averaging_constant\": self.averaging_constant,\n }\n return create_calibrator(**kwargs)\n\n\nclass AutoCalibrationConfig:\n @staticmethod\n def minmax(dataset: Dataset, moving_average: bool = False, averaging_constant: float = 0.01) -> CalibrationConfig:\n \"\"\"\n Args:\n dataset (`Dataset`):\n The dataset to use when performing the calibration step.\n moving_average (`bool`):\n Whether to compute the moving average of the minimum and maximum values.\n averaging_constant (`float`):\n The constant smoothing factor to use when computing the moving average of the minimum and maximum\n values.\n\n Returns:\n The calibration configuration.\n \"\"\"\n if moving_average and parse(ort_version) < Version(\"1.11.0\"):\n raise NotImplementedError(\n \"MinMax calibration using the moving average method is only implemented for onnxruntime >= 1.11.0\"\n )\n\n if moving_average and not 0 <= averaging_constant <= 1:\n raise ValueError(f\"Invalid averaging constant value ({averaging_constant}) should be within [0, 1]\")\n\n return CalibrationConfig(\n dataset_name=dataset.info.builder_name,\n dataset_config_name=dataset.info.config_name,\n dataset_split=str(dataset.split),\n dataset_num_samples=dataset.num_rows,\n method=CalibrationMethod.MinMax,\n moving_average=moving_average,\n averaging_constant=averaging_constant,\n )\n\n @staticmethod\n def entropy(\n dataset: Dataset,\n num_bins: int = 128,\n num_quantized_bins: int = 128,\n ) -> CalibrationConfig:\n \"\"\"\n Args:\n dataset (`Dataset`):\n The dataset to use when performing the calibration step.\n num_bins (`int`):\n The number of bins to use when creating the histogram.\n num_quantized_bins (`int`):\n The number of quantized bins used to find the optimal threshold when computing the activations\n quantization ranges.\n\n Returns:\n The calibration configuration.\n \"\"\"\n if parse(ort_version) < Version(\"1.11.0\"):\n raise NotImplementedError(\"Entropy calibration method is only implemented for onnxruntime >= 1.11.0\")\n\n if num_bins <= 0:\n raise ValueError(f\"Invalid value num_bins ({num_bins}) should be >= 1\")\n\n if num_quantized_bins <= 0:\n raise ValueError(f\"Invalid value num_quantized_bins ({num_quantized_bins}) should be >= 1\")\n\n return CalibrationConfig(\n dataset_name=dataset.info.builder_name,\n dataset_config_name=dataset.info.config_name,\n dataset_split=str(dataset.split),\n dataset_num_samples=dataset.num_rows,\n method=CalibrationMethod.Entropy,\n num_bins=num_bins,\n num_quantized_bins=num_quantized_bins,\n )\n\n @staticmethod\n def percentiles(dataset: Dataset, num_bins: int = 2048, percentile: float = 99.999) -> CalibrationConfig:\n \"\"\"\n Args:\n dataset (`Dataset`):\n The dataset to use when performing the calibration step.\n num_bins (`int`):\n The number of bins to use when creating the histogram.\n percentile (`float`):\n The percentile to use when computing the activations quantization ranges.\n\n Returns:\n The calibration configuration.\n \"\"\"\n if parse(ort_version) < Version(\"1.11.0\"):\n raise NotImplementedError(\"Percentile calibration method is only implemented for onnxruntime >= 1.11.0\")\n\n if num_bins <= 0:\n raise ValueError(f\"Invalid value num_bins ({num_bins}) should be >= 1\")\n\n if not 0 <= percentile <= 100:\n raise ValueError(f\"Invalid value percentile ({percentile}) should be within [0, 100]\")\n\n return CalibrationConfig(\n dataset_name=dataset.info.builder_name,\n dataset_config_name=dataset.info.config_name,\n dataset_split=str(dataset.split),\n dataset_num_samples=dataset.num_rows,\n method=CalibrationMethod.Percentile,\n num_bins=num_bins,\n percentile=percentile,\n )\n\n\n@dataclass\nclass QuantizationConfig:\n \"\"\"\n QuantizationConfig is the configuration class handling all the ONNX Runtime quantization parameters.\n\n Args:\n is_static (`bool`):\n Whether to apply static quantization or dynamic quantization.\n format (`QuantFormat`):\n Targeted ONNX Runtime quantization representation format.\n For the Operator Oriented (QOperator) format, all the quantized operators have their own ONNX definitions.\n For the Tensor Oriented (QDQ) format, the model is quantized by inserting QuantizeLinear / DeQuantizeLinear\n operators.\n mode (`QuantizationMode`, defaults to `QuantizationMode.QLinearOps`):\n Targeted ONNX Runtime quantization mode, default is QLinearOps to match QDQ format.\n When targeting dynamic quantization mode, the default value is `QuantizationMode.IntegerOps` whereas the\n default value for static quantization mode is `QuantizationMode.QLinearOps`.\n activations_dtype (`QuantType`, defaults to `QuantType.QUInt8`):\n The quantization data types to use for the activations.\n activations_symmetric (`bool`, defaults to `False`):\n Whether to apply symmetric quantization on the activations.\n weights_dtype (`QuantType`, defaults to `QuantType.QInt8`):\n The quantization data types to use for the weights.\n weights_symmetric (`bool`, defaults to `True`):\n Whether to apply symmetric quantization on the weights.\n per_channel (`bool`, defaults to `False`):\n Whether we should quantize per-channel (also known as \"per-row\"). Enabling this can increase overall\n accuracy while making the quantized model heavier.\n reduce_range (`bool`, defaults to `False`):\n Whether to use reduce-range 7-bits integers instead of 8-bits integers.\n nodes_to_quantize (`List[str]`, defaults to `[]`):\n List of the nodes names to quantize. When unspecified, all nodes will be quantized. If empty, all nodes being operators from `operators_to_quantize` will be quantized.\n nodes_to_exclude (`List[str]`, defaults to `[]`):\n List of the nodes names to exclude when applying quantization. The list of nodes in a model can be found loading the ONNX model through onnx.load, or through visual inspection with [netron](https://github.com/lutzroeder/netron).\n operators_to_quantize (`List[str]`):\n List of the operators types to quantize. Defaults to all quantizable operators for the given quantization mode and format. Quantizable operators can be found at https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/python/tools/quantization/registry.py.\n qdq_add_pair_to_weight (`bool`, defaults to `False`):\n By default, floating-point weights are quantized and feed to solely inserted DeQuantizeLinear node.\n If set to True, the floating-point weights will remain and both QuantizeLinear / DeQuantizeLinear nodes\n will be inserted.\n qdq_dedicated_pair (`bool`, defaults to `False`):\n When inserting QDQ pair, multiple nodes can share a single QDQ pair as their inputs. If True, it will\n create an identical and dedicated QDQ pair for each node.\n qdq_op_type_per_channel_support_to_axis (`Dict[str, int]`):\n Set the channel axis for a specific operator type. Effective only when per channel quantization is\n supported and `per_channel` is set to True.\n \"\"\"\n\n is_static: bool\n format: QuantFormat\n mode: QuantizationMode = QuantizationMode.QLinearOps\n activations_dtype: QuantType = QuantType.QUInt8\n activations_symmetric: bool = False\n weights_dtype: QuantType = QuantType.QInt8\n weights_symmetric: bool = True\n per_channel: bool = False\n reduce_range: bool = False\n nodes_to_quantize: List[str] = field(default_factory=list)\n nodes_to_exclude: List[str] = field(default_factory=list)\n operators_to_quantize: List[str] = field(default_factory=list)\n qdq_add_pair_to_weight: bool = False\n qdq_dedicated_pair: bool = False\n qdq_op_type_per_channel_support_to_axis: Dict[str, int] = field(\n default_factory=lambda: ORT_DEFAULT_CHANNEL_FOR_OPERATORS\n )\n\n def __post_init__(self):\n ensure_valid_mode_or_raise(self.is_static, self.mode)\n ensure_valid_data_type_or_raise(self.is_static, self.activations_dtype, self.weights_dtype)\n\n # If needed, dynamically set operators_to_quantize default.\n if len(self.operators_to_quantize) == 0:\n _, _, operators_to_quantize = default_quantization_parameters(\n self.is_static, self.format, self.mode, self.operators_to_quantize\n )\n self.operators_to_quantize = operators_to_quantize\n\n @staticmethod\n def quantization_type_str(activations_dtype: QuantType, weights_dtype: QuantType) -> str:\n return (\n f\"{'s8' if activations_dtype == QuantType.QInt8 else 'u8'}\"\n f\"/\"\n f\"{'s8' if weights_dtype == QuantType.QInt8 else 'u8'}\"\n )\n\n @property\n def use_symmetric_calibration(self) -> bool:\n return self.activations_symmetric and self.weights_symmetric\n\n def __str__(self):\n return (\n f\"{self.format} (\"\n f\"mode: {self.mode}, \"\n f\"schema: {QuantizationConfig.quantization_type_str(self.activations_dtype, self.weights_dtype)}, \"\n f\"channel-wise: {self.per_channel})\"\n )\n\n\ndef ensure_valid_mode_or_raise(use_static_quantization: bool, mode: QuantizationMode):\n if not use_static_quantization and mode == QuantizationMode.QLinearOps:\n raise ValueError(\n \"Invalid combination of \"\n \"use_static_quantization = False \"\n \"and \"\n \"mode = QuantizationMode.QLinearOps. \"\n \"OnnxRuntime dynamic quantization requires mode = QuantizationMode.IntegerOps\"\n )\n\n\ndef ensure_valid_data_type_or_raise(\n use_static_quantization: bool, activations_dtype: QuantType, weights_dtype: QuantType\n):\n if not use_static_quantization and activations_dtype == QuantType.QInt8:\n raise ValueError(\n \"Invalid combination of \"\n \"use_static_quantization = False \"\n \"and \"\n \"activations_dtype = QuantType.QInt8. \"\n \"OnnxRuntime dynamic quantization requires activations_dtype = QuantType.QUInt8\"\n )\n\n if use_static_quantization and activations_dtype == QuantType.QInt8 and weights_dtype == QuantType.QUInt8:\n raise ValueError(\n \"Invalid combination of \"\n \"use_static_quantization = True, \"\n \"activations_dtype = QuantType.QInt8 \"\n \"and \"\n \"weights_dtype = QuantType.QUInt8.\"\n \"OnnxRuntime static quantization does not support \"\n \"activations_dtype = QuantType.QInt8 with weights_dtype = QuantType.QUInt8.\"\n )\n\n\ndef default_quantization_parameters(\n is_static: bool,\n format: Optional[QuantFormat] = None,\n mode: Optional[QuantizationMode] = None,\n operators_to_quantize: Optional[List[str]] = None,\n) -> Tuple[QuantFormat, QuantizationMode, List[str]]:\n if format is None:\n format = QuantFormat.QDQ if is_static else QuantFormat.QOperator\n\n if mode is None:\n mode = QuantizationMode.QLinearOps if is_static else QuantizationMode.IntegerOps\n\n if operators_to_quantize is None or len(operators_to_quantize) == 0:\n if is_static and format == QuantFormat.QDQ:\n operators_to_quantize = ORT_DEFAULT_OPS_STATIC_QUANTIZATION_QDQ\n elif is_static and mode == QuantizationMode.QLinearOps:\n operators_to_quantize = ORT_DEFAULT_OPS_STATIC_QUANTIZATION_QOPS\n elif not is_static and mode == QuantizationMode.IntegerOps:\n operators_to_quantize = ORT_DEFAULT_OPS_DYNAMIC_QUANTIZATION\n\n return format, mode, operators_to_quantize\n\n\nclass AutoQuantizationConfig:\n @staticmethod\n def arm64(\n is_static: bool,\n use_symmetric_activations: bool = False,\n use_symmetric_weights: bool = True,\n per_channel: bool = True,\n nodes_to_quantize: Optional[List[str]] = None,\n nodes_to_exclude: Optional[List[str]] = None,\n operators_to_quantize: Optional[List[str]] = None,\n ):\n \"\"\"\n Creates a [`~onnxruntime.QuantizationConfig`] fit for ARM64.\n\n Args:\n is_static (`bool`):\n Boolean flag to indicate whether we target static or dynamic quantization.\n use_symmetric_activations (`bool`, defaults to `False`):\n Whether to use symmetric quantization for activations.\n use_symmetric_weights (`bool`, defaults to `True`):\n Whether to use symmetric quantization for weights.\n per_channel (`bool`, defaults to `True`):\n Whether we should quantize per-channel (also known as \"per-row\"). Enabling this can\n increase overall accuracy while making the quantized model heavier.\n nodes_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to quantize. If `None`, all nodes being operators from `operators_to_quantize` will be quantized.\n nodes_to_exclude (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to exclude from quantization. The list of nodes in a model can be found loading the ONNX model through onnx.load, or through visual inspection with [netron](https://github.com/lutzroeder/netron).\n operators_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Type of nodes to perform quantization on. By default, all the quantizable operators will be quantized. Quantizable operators can be found at https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/python/tools/quantization/registry.py.\n \"\"\"\n format, mode, operators_to_quantize = default_quantization_parameters(\n is_static, operators_to_quantize=operators_to_quantize\n )\n\n # u8/s8 is faster (than u8/u8) on lower-end ARM64 and identical on higher-end ARM64,\n # so let's use u8/s8 by default\n return QuantizationConfig(\n is_static=is_static,\n format=format,\n mode=mode,\n activations_dtype=QuantType.QUInt8,\n activations_symmetric=use_symmetric_activations,\n weights_dtype=QuantType.QInt8,\n weights_symmetric=use_symmetric_weights,\n per_channel=per_channel,\n reduce_range=False,\n nodes_to_quantize=nodes_to_quantize or [],\n nodes_to_exclude=nodes_to_exclude or [],\n operators_to_quantize=operators_to_quantize,\n )\n\n @staticmethod\n def avx2(\n is_static: bool,\n use_symmetric_activations: bool = False,\n use_symmetric_weights: bool = True,\n per_channel: bool = True,\n reduce_range: bool = False,\n nodes_to_quantize: Optional[List[str]] = None,\n nodes_to_exclude: Optional[List[str]] = None,\n operators_to_quantize: Optional[List[str]] = None,\n ) -> QuantizationConfig:\n \"\"\"\n Creates a [`~onnxruntime.QuantizationConfig`] fit for CPU with AVX2 instruction set.\n\n Args:\n is_static (`bool`):\n Boolean flag to indicate whether we target static or dynamic quantization.\n use_symmetric_activations (`bool`, defaults to `False`):\n Whether to use symmetric quantization for activations.\n use_symmetric_weights (`bool`, defaults to `True`):\n Whether to use symmetric quantization for weights.\n per_channel (`bool`, defaults to `True`):\n Whether we should quantize per-channel (also known as \"per-row\"). Enabling this can\n increase overall accuracy while making the quantized model heavier.\n reduce_range (`bool`, defaults to `False`):\n Indicate whether to use 8-bits integers (False) or reduce-range 7-bits integers (True).\n As a baseline, it is always recommended testing with full range (reduce_range = False) and then, if\n accuracy drop is significant, to try with reduced range (reduce_range = True).\n Intel's CPUs using AVX512 (non VNNI) can suffer from saturation issue when invoking\n the VPMADDUBSW instruction. To counter this, one should use 7-bits rather than 8-bits integers.\n nodes_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to quantize. If `None`, all nodes being operators from `operators_to_quantize` will be quantized.\n nodes_to_exclude (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to exclude from quantization. The list of nodes in a model can be found loading the ONNX model through onnx.load, or through visual inspection with [netron](https://github.com/lutzroeder/netron).\n operators_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Type of nodes to perform quantization on. By default, all the quantizable operators will be quantized. Quantizable operators can be found at https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/python/tools/quantization/registry.py.\n \"\"\"\n format, mode, operators_to_quantize = default_quantization_parameters(\n is_static, operators_to_quantize=operators_to_quantize\n )\n\n return QuantizationConfig(\n is_static=is_static,\n format=format,\n mode=mode,\n activations_dtype=QuantType.QUInt8,\n activations_symmetric=use_symmetric_activations,\n weights_dtype=QuantType.QUInt8,\n weights_symmetric=use_symmetric_weights,\n per_channel=per_channel,\n reduce_range=reduce_range,\n nodes_to_quantize=nodes_to_quantize or [],\n nodes_to_exclude=nodes_to_exclude or [],\n operators_to_quantize=operators_to_quantize,\n )\n\n @staticmethod\n def avx512(\n is_static: bool,\n use_symmetric_activations: bool = False,\n use_symmetric_weights: bool = True,\n per_channel: bool = True,\n reduce_range: bool = False,\n nodes_to_quantize: Optional[List[str]] = None,\n nodes_to_exclude: Optional[List[str]] = None,\n operators_to_quantize: Optional[List[str]] = None,\n ) -> QuantizationConfig:\n \"\"\"\n Creates a [`~onnxruntime.QuantizationConfig`] fit for CPU with AVX512 instruction set.\n\n Args:\n is_static (`bool`):\n Boolean flag to indicate whether we target static or dynamic quantization.\n use_symmetric_activations (`bool`, defaults to `False`):\n Whether to use symmetric quantization for activations.\n use_symmetric_weights (`bool`, defaults to `True`):\n Whether to use symmetric quantization for weights.\n per_channel (`bool`, defaults to `True`):\n Whether we should quantize per-channel (also known as \"per-row\"). Enabling this can\n increase overall accuracy while making the quantized model heavier.\n reduce_range (`bool`, defaults to `False`):\n Indicate whether to use 8-bits integers (False) or reduce-range 7-bits integers (True).\n As a baseline, it is always recommended testing with full range (reduce_range = False) and then, if\n accuracy drop is significant, to try with reduced range (reduce_range = True).\n Intel's CPUs using AVX512 (non VNNI) can suffer from saturation issue when invoking\n the VPMADDUBSW instruction. To counter this, one should use 7-bits rather than 8-bits integers.\n nodes_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to quantize. If `None`, all nodes being operators from `operators_to_quantize` will be quantized.\n nodes_to_exclude (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to exclude from quantization. The list of nodes in a model can be found loading the ONNX model through onnx.load, or through visual inspection with [netron](https://github.com/lutzroeder/netron).\n operators_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Type of nodes to perform quantization on. By default, all the quantizable operators will be quantized. Quantizable operators can be found at https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/python/tools/quantization/registry.py.\n \"\"\"\n format, mode, operators_to_quantize = default_quantization_parameters(\n is_static, operators_to_quantize=operators_to_quantize\n )\n\n return QuantizationConfig(\n is_static=is_static,\n format=format,\n mode=mode,\n activations_dtype=QuantType.QUInt8,\n activations_symmetric=use_symmetric_activations,\n weights_dtype=QuantType.QInt8,\n weights_symmetric=use_symmetric_weights,\n per_channel=per_channel,\n reduce_range=reduce_range,\n nodes_to_quantize=nodes_to_quantize or [],\n nodes_to_exclude=nodes_to_exclude or [],\n operators_to_quantize=operators_to_quantize,\n )\n\n @staticmethod\n def avx512_vnni(\n is_static: bool,\n use_symmetric_activations: bool = False,\n use_symmetric_weights: bool = True,\n per_channel: bool = True,\n nodes_to_quantize: Optional[List[str]] = None,\n nodes_to_exclude: Optional[List[str]] = None,\n operators_to_quantize: Optional[List[str]] = None,\n ) -> QuantizationConfig:\n \"\"\"\n Creates a [`~onnxruntime.QuantizationConfig`] fit for CPU with AVX512-VNNI instruction set.\n\n When targeting Intel AVX512-VNNI CPU underlying execution engine leverage the CPU instruction VPDPBUSD to\n compute \\\\i32 += i8(w) * u8(x)\\\\ within a single instruction.\n\n AVX512-VNNI (AVX512 Vector Neural Network Instruction)\n is an x86 extension Instruction set and is a part of the AVX-512 ISA.\n\n AVX512 VNNI is designed to accelerate convolutional neural network for INT8 inference.\n\n Args:\n is_static (`bool`):\n Boolean flag to indicate whether we target static or dynamic quantization.\n use_symmetric_activations (`bool`, defaults to `False`):\n Whether to use symmetric quantization for activations.\n use_symmetric_weights (`bool`, defaults to `True`):\n Whether to use symmetric quantization for weights.\n per_channel (`bool`, defaults to `True`):\n Whether we should quantize per-channel (also known as \"per-row\"). Enabling this can\n increase overall accuracy while making the quantized model heavier.\n nodes_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to quantize. If `None`, all nodes being operators from `operators_to_quantize` will be quantized.\n nodes_to_exclude (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to exclude from quantization. The list of nodes in a model can be found loading the ONNX model through onnx.load, or through visual inspection with [netron](https://github.com/lutzroeder/netron).\n operators_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Type of nodes to perform quantization on. By default, all the quantizable operators will be quantized. Quantizable operators can be found at https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/python/tools/quantization/registry.py.\n \"\"\"\n format, mode, operators_to_quantize = default_quantization_parameters(\n is_static, operators_to_quantize=operators_to_quantize\n )\n\n return QuantizationConfig(\n is_static=is_static,\n format=format,\n mode=mode,\n activations_dtype=QuantType.QUInt8,\n activations_symmetric=use_symmetric_activations,\n weights_dtype=QuantType.QInt8,\n weights_symmetric=use_symmetric_weights,\n per_channel=per_channel,\n reduce_range=False,\n nodes_to_quantize=nodes_to_quantize or [],\n nodes_to_exclude=nodes_to_exclude or [],\n operators_to_quantize=operators_to_quantize,\n )\n\n @staticmethod\n def tensorrt(\n per_channel: bool = True,\n nodes_to_quantize: Optional[List[str]] = None,\n nodes_to_exclude: Optional[List[str]] = None,\n operators_to_quantize: Optional[List[str]] = None,\n ) -> QuantizationConfig:\n \"\"\"\n Creates a [`~onnxruntime.QuantizationConfig`] fit for TensorRT static quantization, targetting NVIDIA GPUs.\n\n Args:\n per_channel (`bool`, defaults to `True`):\n Whether we should quantize per-channel (also known as \"per-row\"). Enabling this can\n increase overall accuracy while making the quantized model heavier.\n nodes_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to quantize. If `None`, all nodes being operators from `operators_to_quantize` will be quantized.\n nodes_to_exclude (`Optional[List[str]]`, defaults to `None`):\n Specific nodes to exclude from quantization. The list of nodes in a model can be found loading the ONNX model through onnx.load, or through visual inspection with [netron](https://github.com/lutzroeder/netron).\n operators_to_quantize (`Optional[List[str]]`, defaults to `None`):\n Type of nodes to perform quantization on. By default, all the quantizable operators will be quantized. Quantizable operators can be found at https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/python/tools/quantization/registry.py.\n \"\"\"\n format, mode, operators_to_quantize = default_quantization_parameters(\n is_static=True, operators_to_quantize=operators_to_quantize\n )\n\n return QuantizationConfig(\n is_static=True,\n format=format,\n mode=mode,\n activations_dtype=QuantType.QInt8,\n activations_symmetric=True, # TRT only supports symmetric\n weights_dtype=QuantType.QInt8,\n weights_symmetric=True, # TRT only supports symmetric\n per_channel=per_channel,\n reduce_range=False,\n nodes_to_quantize=nodes_to_quantize or [],\n nodes_to_exclude=nodes_to_exclude or [],\n operators_to_quantize=operators_to_quantize,\n # `qdq_dedicated_pair=True` argument is required by TensorRT, since it expects a single node after each\n # `QuantizeLinear` + `DequantizeLinear` (QDQ) pair.\n qdq_add_pair_to_weight=True,\n # `qdq_dedicated_pair=True` is required because TensorRT expects QDQ pairs on weights, not only DequantizeLinear\n qdq_dedicated_pair=True,\n )\n\n\n@dataclass\nclass OptimizationConfig:\n \"\"\"\n OptimizationConfig is the configuration class handling all the ONNX Runtime optimization parameters.\n There are two stacks of optimizations:\n 1. The ONNX Runtime general-purpose optimization tool: it can work on any ONNX model.\n 2. The ONNX Runtime transformers optimization tool: it can only work on a subset of transformers models.\n\n Attributes:\n optimization_level (`int`, defaults to 1):\n Optimization level performed by ONNX Runtime of the loaded graph.\n Supported optimization level are 0, 1, 2 and 99.\n - 0: will disable all optimizations\n - 1: will enable basic optimizations\n - 2: will enable basic and extended optimizations, including complex node fusions applied to the nodes\n assigned to the CPU or CUDA execution provider, making the resulting optimized graph hardware dependent\n - 99: will enable all available optimizations including layout optimizations\n optimize_for_gpu (`bool`, defaults to `False`):\n Whether to optimize the model for GPU inference.\n The optimized graph might contain operators for GPU or CPU only when `optimization_level` > 1.\n fp16 (`bool`, defaults to `False`):\n Whether all weights and nodes should be converted from float32 to float16.\n enable_transformers_specific_optimizations (`bool`, defaults to `True`):\n Whether to only use `transformers` specific optimizations on top of ONNX Runtime general optimizations.\n disable_gelu_fusion (`bool`, defaults to `False`):\n Whether to disable the Gelu fusion.\n disable_layer_norm_fusion (`bool`, defaults to `False`):\n Whether to disable Layer Normalization fusion.\n disable_attention_fusion (`bool`, defaults to `False`):\n Whether to disable Attention fusion.\n disable_skip_layer_norm_fusion (`bool`, defaults to `False`):\n Whether to disable SkipLayerNormalization fusion.\n disable_bias_skip_layer_norm_fusion (`bool`, defaults to `False`):\n Whether to disable Add Bias and SkipLayerNormalization fusion.\n disable_bias_gelu_fusion (`bool`, defaults to `False`):\n Whether to disable Add Bias and Gelu / FastGelu fusion.\n disable_embed_layer_norm_fusion (`bool`, defaults to `True`):\n Whether to disable EmbedLayerNormalization fusion.\n The default value is set to `True` since this fusion is incompatible with ONNX Runtime quantization.\n enable_gelu_approximation (`bool`, defaults to `False`):\n Whether to enable Gelu / BiasGelu to FastGelu conversion.\n The default value is set to `False` since this approximation might slightly impact the model's accuracy.\n use_mask_index (`bool`, defaults to `False`):\n Whether to use mask index instead of raw attention mask in the attention operator.\n no_attention_mask (`bool`, defaults to `False`):\n Whether to not use attention masks. Only works for bert model type.\n disable_embed_layer_norm (`bool`, defaults to `True`):\n Whether to disable EmbedLayerNormalization fusion.\n The default value is set to `True` since this fusion is incompatible with ONNX Runtime quantization\n disable_shape_inference (`bool`, defaults to `False`):\n Whether to disable symbolic shape inference.\n The default value is set to `False` but symbolic shape inference might cause issues sometimes.\n use_multi_head_attention (`bool`, defaults to `False`):\n Experimental argument. Use MultiHeadAttention instead of Attention operator, which has merged weights for Q/K/V projection,\n which might be faster in some cases since 3 MatMul is merged into one.\"\n \"Note that MultiHeadAttention might be slower than Attention when qkv are not packed. \"\n enable_gemm_fast_gelu_fusion (`bool`, defaults to `False`):\n Enable GemmfastGelu fusion.\n use_raw_attention_mask (`bool`, defaults to `False`):\n Use raw attention mask. Use this option if your input is not right-side padding. This might deactivate fused attention and get worse performance.\n disable_group_norm_fusion (`bool`, defaults to `True`):\n Do not fuse GroupNorm. Only works for model_type=unet.\n disable_packed_kv (`bool`, defaults to `True`):\n Do not use packed kv in cross attention. Only works for model_type=unet.\n disable_rotary_embeddings (`bool`, defaults to `False`):\n Whether to disable Rotary Embedding fusion.\n \"\"\"\n\n optimization_level: int = 1\n optimize_for_gpu: bool = False\n\n fp16: bool = False\n\n optimize_with_onnxruntime_only: Optional[bool] = None\n enable_transformers_specific_optimizations: bool = True\n\n disable_gelu: Optional[bool] = None\n disable_gelu_fusion: bool = False\n\n disable_layer_norm: Optional[bool] = None\n disable_layer_norm_fusion: bool = False\n\n disable_attention: Optional[bool] = None\n disable_attention_fusion: bool = False\n\n disable_skip_layer_norm: Optional[bool] = None\n disable_skip_layer_norm_fusion: bool = False\n\n disable_bias_skip_layer_norm: Optional[bool] = None\n disable_bias_skip_layer_norm_fusion: bool = False\n\n disable_bias_gelu: Optional[bool] = None\n disable_bias_gelu_fusion: bool = False\n\n disable_embed_layer_norm: Optional[bool] = None\n disable_embed_layer_norm_fusion: bool = True\n\n enable_gelu_approximation: bool = False\n use_mask_index: bool = False\n no_attention_mask: bool = False\n disable_embed_layer_norm: bool = True\n disable_shape_inference: bool = False\n\n # ONNX Runtime 1.14.0 arguments\n use_multi_head_attention: bool = False\n enable_gemm_fast_gelu_fusion: bool = False\n use_raw_attention_mask: bool = False\n disable_group_norm_fusion: bool = True\n disable_packed_kv: bool = True\n\n # ONNX Runtime 1.16.2 arguments\n disable_rotary_embeddings: bool = False\n\n def __post_init__(self):\n def deprecate_renamed_attribute(old_name, new_name, mapping_func=None):\n if getattr(self, old_name, None) is not None:\n if mapping_func is None:\n\n def identity(x):\n return x\n\n mapping_func = identity\n setattr(self, new_name, mapping_func(getattr(self, old_name)))\n warnings.warn(\n f\"{old_name} will be deprecated soon, use {new_name} instead, {new_name} is set to \"\n f\"{getattr(self, new_name)}.\",\n FutureWarning,\n )\n\n deprecate_renamed_attribute(\n \"optimize_with_onnxruntime_only\",\n \"enable_transformers_specific_optimizations\",\n mapping_func=lambda x: not x,\n )\n\n deprecate_renamed_attribute(\"disable_gelu\", \"disable_bias_gelu_fusion\")\n deprecate_renamed_attribute(\"disable_layer_norm\", \"disable_layer_norm_fusion\")\n deprecate_renamed_attribute(\"disable_attention\", \"disable_attention_fusion\")\n deprecate_renamed_attribute(\"disable_skip_layer_norm\", \"disable_skip_layer_norm_fusion\")\n deprecate_renamed_attribute(\"disable_bias_skip_layer_norm\", \"disable_bias_skip_layer_norm_fusion\")\n deprecate_renamed_attribute(\"disable_bias_gelu\", \"disable_bias_gelu_fusion\")\n deprecate_renamed_attribute(\"disable_embed_layer_norm\", \"disable_embed_layer_norm_fusion\")\n\n def create_fusion_options(self, model_type: str) -> FusionOptions:\n class Box:\n pass\n\n args = Box()\n args.model_type = model_type\n attribute_map = {\n \"disable_gelu_fusion\": \"disable_gelu\",\n \"disable_layer_norm_fusion\": \"disable_layer_norm\",\n \"disable_attention_fusion\": \"disable_attention\",\n \"disable_skip_layer_norm_fusion\": \"disable_skip_layer_norm\",\n \"disable_bias_skip_layer_norm_fusion\": \"disable_bias_skip_layer_norm\",\n \"disable_bias_gelu_fusion\": \"disable_bias_gelu\",\n \"disable_embed_layer_norm_fusion\": \"disable_embed_layer_norm\",\n \"disable_group_norm_fusion\": \"disable_group_norm\",\n \"disable_packed_kv\": \"disable_packed_kv\",\n \"use_raw_attention_mask\": \"use_raw_attention_mask\",\n \"enable_gemm_fast_gelu_fusion\": \"enable_gemm_fast_gelu\",\n \"use_multi_head_attention\": \"use_multi_head_attention\",\n \"disable_rotary_embeddings\": \"disable_rotary_embeddings\",\n }\n for attr_name, fusion_attr_name in attribute_map.items():\n setattr(args, fusion_attr_name, getattr(self, attr_name))\n\n for attr, value in self.__dict__.items():\n if hasattr(args, attr):\n continue\n setattr(args, attr, value)\n\n return FusionOptions.parse(args)\n\n\nclass AutoOptimizationConfig:\n \"\"\"\n Factory to create common `OptimizationConfig`.\n \"\"\"\n\n _LEVELS = {\n \"O1\": {\n \"optimization_level\": 1,\n \"enable_transformers_specific_optimizations\": False,\n },\n \"O2\": {\n \"optimization_level\": 2,\n \"enable_transformers_specific_optimizations\": True,\n },\n \"O3\": {\n \"optimization_level\": 2,\n \"enable_transformers_specific_optimizations\": True,\n \"enable_gelu_approximation\": True,\n },\n \"O4\": {\n \"optimization_level\": 2,\n \"enable_transformers_specific_optimizations\": True,\n \"enable_gelu_approximation\": True,\n \"fp16\": True,\n },\n }\n\n @classmethod\n def with_optimization_level(cls, optimization_level: str, for_gpu: bool = False, **kwargs) -> OptimizationConfig:\n \"\"\"\n Creates an [`~OptimizationConfig`] with pre-defined arguments according to an optimization level.\n\n Args:\n optimization_level (`str`):\n The optimization level, the following values are allowed:\n - O1: Basic general optimizations\n - O2: Basic and extended general optimizations, transformers-specific fusions.\n - O3: Same as O2 with Fast Gelu approximation.\n - O4: Same as O3 with mixed precision.\n for_gpu (`bool`, defaults to `False`):\n Whether the model to optimize will run on GPU, some optimizations depends on the hardware the model\n will run on. Only needed for optimization_level > 1.\n kwargs (`Dict[str, Any]`):\n Arguments to provide to the [`~OptimizationConfig`] constructor.\n\n Returns:\n `OptimizationConfig`: The `OptimizationConfig` corresponding to the requested optimization level.\n \"\"\"\n if optimization_level not in cls._LEVELS:\n raise ValueError(\n f\"optimization_level must be in {', '.join(cls._LEVELS.keys())}, got {optimization_level}\"\n )\n\n if optimization_level == \"O4\":\n if for_gpu is False:\n logger.warning(\"Overridding for_gpu=False to for_gpu=True as half precision is available only on GPU.\")\n for_gpu = True\n\n return OptimizationConfig(optimize_for_gpu=for_gpu, **cls._LEVELS[optimization_level], **kwargs)\n\n @classmethod\n def O1(cls, for_gpu: bool = False, **kwargs) -> OptimizationConfig:\n \"\"\"\n Creates an O1 [`~OptimizationConfig`].\n\n Args:\n for_gpu (`bool`, defaults to `False`):\n Whether the model to optimize will run on GPU, some optimizations depends on the hardware the model\n will run on. Only needed for optimization_level > 1.\n kwargs (`Dict[str, Any]`):\n Arguments to provide to the [`~OptimizationConfig`] constructor.\n\n Returns:\n `OptimizationConfig`: The `OptimizationConfig` corresponding to the O1 optimization level.\n \"\"\"\n return cls.with_optimization_level(\"O1\", for_gpu=for_gpu, **kwargs)\n\n @classmethod\n def O2(cls, for_gpu: bool = False, **kwargs) -> OptimizationConfig:\n \"\"\"\n Creates an O2 [`~OptimizationConfig`].\n\n Args:\n for_gpu (`bool`, defaults to `False`):\n Whether the model to optimize will run on GPU, some optimizations depends on the hardware the model\n will run on. Only needed for optimization_level > 1.\n kwargs (`Dict[str, Any]`):\n Arguments to provide to the [`~OptimizationConfig`] constructor.\n\n Returns:\n `OptimizationConfig`: The `OptimizationConfig` corresponding to the O2 optimization level.\n \"\"\"\n return cls.with_optimization_level(\"O2\", for_gpu=for_gpu, **kwargs)\n\n @classmethod\n def O3(cls, for_gpu: bool = False, **kwargs) -> OptimizationConfig:\n \"\"\"\n Creates an O3 [`~OptimizationConfig`].\n\n Args:\n for_gpu (`bool`, defaults to `False`):\n Whether the model to optimize will run on GPU, some optimizations depends on the hardware the model\n will run on. Only needed for optimization_level > 1.\n kwargs (`Dict[str, Any]`):\n Arguments to provide to the [`~OptimizationConfig`] constructor.\n\n Returns:\n `OptimizationConfig`: The `OptimizationConfig` corresponding to the O3 optimization level.\n \"\"\"\n return cls.with_optimization_level(\"O3\", for_gpu=for_gpu, **kwargs)\n\n @classmethod\n def O4(cls, for_gpu: bool = True, **kwargs) -> OptimizationConfig:\n \"\"\"\n Creates an O4 [`~OptimizationConfig`].\n\n Args:\n for_gpu (`bool`, defaults to `False`):\n Whether the model to optimize will run on GPU, some optimizations depends on the hardware the model\n will run on. Only needed for optimization_level > 1.\n kwargs (`Dict[str, Any]`):\n Arguments to provide to the [`~OptimizationConfig`] constructor.\n\n Returns:\n `OptimizationConfig`: The `OptimizationConfig` corresponding to the O4 optimization level.\n \"\"\"\n return cls.with_optimization_level(\"O4\", for_gpu=for_gpu, **kwargs)\n\n\nclass ORTConfig(BaseConfig):\n \"\"\"\n ORTConfig is the configuration class handling all the ONNX Runtime parameters related to the ONNX IR model export,\n optimization and quantization parameters.\n\n Attributes:\n opset (`Optional[int]`, defaults to `None`):\n ONNX opset version to export the model with.\n use_external_data_format (`bool`, defaults to `False`):\n Allow exporting model >= than 2Gb.\n one_external_file (`bool`, defaults to `True`):\n When `use_external_data_format=True`, whether to save all tensors to one external file.\n If false, save each tensor to a file named with the tensor name.\n (Can not be set to `False` for the quantization)\n optimization (`Optional[OptimizationConfig]`, defaults to `None`):\n Specify a configuration to optimize ONNX Runtime model\n quantization (`Optional[QuantizationConfig]`, defaults to `None`):\n Specify a configuration to quantize ONNX Runtime model\n \"\"\"\n\n CONFIG_NAME = \"ort_config.json\"\n FULL_CONFIGURATION_FILE = \"ort_config.json\"\n\n def __init__(\n self,\n opset: Optional[int] = None,\n use_external_data_format: bool = False,\n one_external_file: bool = True,\n optimization: Optional[OptimizationConfig] = None,\n quantization: Optional[QuantizationConfig] = None,\n **kwargs,\n ):\n super().__init__()\n self.opset = opset\n self.use_external_data_format = use_external_data_format\n self.one_external_file = one_external_file\n self.optimization = self.dataclass_to_dict(optimization)\n self.quantization = self.dataclass_to_dict(quantization)\n self.optimum_version = kwargs.pop(\"optimum_version\", None)\n\n @staticmethod\n def dataclass_to_dict(config) -> dict:\n new_config = {}\n if config is None:\n return new_config\n if isinstance(config, dict):\n return config\n for k, v in asdict(config).items():\n if isinstance(v, Enum):\n v = v.name\n elif isinstance(v, list):\n v = [elem.name if isinstance(elem, Enum) else elem for elem in v]\n new_config[k] = v\n return new_config\n", "output": ["ensure_valid_data_type_or_raise", "ensure_valid_mode_or_raise", "default_quantization_parameters", "CalibrationConfig", "AutoCalibrationConfig", "ORTConfig", "QuantizationConfig", "AutoQuantizationConfig", "AutoOptimizationConfig", "OptimizationConfig", "Box"], "metadata": {"file_path": "optimum-main/optimum/onnxruntime/configuration.py", "file_length": 13491, "symbol_dict": [{"symbol": "ensure_valid_mode_or_raise", "type": "mannual_defined_function", "byte_location": 15130, "location": 4134}, {"symbol": "ensure_valid_data_type_or_raise", "type": "mannual_defined_function", "byte_location": 15577, "location": 4255}, {"symbol": "default_quantization_parameters", "type": "mannual_defined_function", "byte_location": 16575, "location": 4540}, {"symbol": "ORTConfig", "type": "mannual_defined_class", "byte_location": 47191, "location": 12868}, {"symbol": "AutoQuantizationConfig", "type": "mannual_defined_class", "byte_location": 17570, "location": 4857}, {"symbol": "OptimizationConfig", "type": "mannual_defined_class", "byte_location": 32881, "location": 8955}, {"symbol": "AutoOptimizationConfig", "type": "mannual_defined_class", "byte_location": 41852, "location": 11428}, {"symbol": "QuantizationConfig", "type": "mannual_defined_class", "byte_location": 9575, "location": 2602}, {"symbol": "Box", "type": "mannual_defined_class", "byte_location": 40543, "location": 11019}, {"symbol": "AutoCalibrationConfig", "type": "mannual_defined_class", "byte_location": 5287, "location": 1445}, {"symbol": "CalibrationConfig", "type": "mannual_defined_class", "byte_location": 1955, "location": 580}]}} {"input": "import argparse\nimport gc\nimport hashlib\nimport itertools\nimport logging\nimport math\nimport os\nimport threading\nimport warnings\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport datasets\nimport diffusers\nimport numpy as np\nimport psutil\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nimport transformers\nfrom accelerate import Accelerator\nfrom accelerate.logging import get_logger\nfrom accelerate.utils import set_seed\nfrom diffusers import (\n AutoencoderKL,\n DDPMScheduler,\n DiffusionPipeline,\n DPMSolverMultistepScheduler,\n UNet2DConditionModel,\n)\nfrom diffusers.optimization import get_scheduler\nfrom diffusers.utils import check_min_version\nfrom diffusers.utils.import_utils import is_xformers_available\nfrom huggingface_hub import HfFolder, Repository, whoami\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom tqdm.auto import tqdm\nfrom transformers import AutoTokenizer, PretrainedConfig\n\nfrom peft import LoHaConfig, LoKrConfig, LoraConfig, get_peft_model\n\n\n# Will error if the minimal version of diffusers is not installed. Remove at your own risks.\ncheck_min_version(\"0.10.0.dev0\")\n\nlogger = get_logger(__name__)\n\nUNET_TARGET_MODULES = [\n \"to_q\",\n \"to_k\",\n \"to_v\",\n \"proj\",\n \"proj_in\",\n \"proj_out\",\n \"conv\",\n \"conv1\",\n \"conv2\",\n \"conv_shortcut\",\n \"to_out.0\",\n \"time_emb_proj\",\n \"ff.net.2\",\n]\n\nTEXT_ENCODER_TARGET_MODULES = [\"fc1\", \"fc2\", \"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\"]\n\n\ndef import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):\n text_encoder_config = PretrainedConfig.from_pretrained(\n pretrained_model_name_or_path,\n subfolder=\"text_encoder\",\n revision=revision,\n )\n model_class = text_encoder_config.architectures[0]\n\n if model_class == \"CLIPTextModel\":\n from transformers import CLIPTextModel\n\n return CLIPTextModel\n elif model_class == \"RobertaSeriesModelWithTransformation\":\n from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation\n\n return RobertaSeriesModelWithTransformation\n else:\n raise ValueError(f\"{model_class} is not supported.\")\n\n\ndef create_unet_adapter_config(args: argparse.Namespace) -> Union[LoraConfig, LoHaConfig, LoKrConfig]:\n if args.adapter == \"full\":\n raise ValueError(\"Cannot create unet adapter config for full parameter\")\n\n if args.adapter == \"lora\":\n config = LoraConfig(\n r=args.unet_r,\n lora_alpha=args.unet_alpha,\n target_modules=UNET_TARGET_MODULES,\n lora_dropout=args.unet_dropout,\n bias=args.unet_bias,\n init_lora_weights=True,\n )\n elif args.adapter == \"loha\":\n config = LoHaConfig(\n r=args.unet_r,\n alpha=args.unet_alpha,\n target_modules=UNET_TARGET_MODULES,\n rank_dropout=args.unet_rank_dropout,\n module_dropout=args.unet_module_dropout,\n use_effective_conv2d=args.unet_use_effective_conv2d,\n init_weights=True,\n )\n elif args.adapter == \"lokr\":\n config = LoKrConfig(\n r=args.unet_r,\n alpha=args.unet_alpha,\n target_modules=UNET_TARGET_MODULES,\n rank_dropout=args.unet_rank_dropout,\n module_dropout=args.unet_module_dropout,\n use_effective_conv2d=args.unet_use_effective_conv2d,\n decompose_both=args.unet_decompose_both,\n decompose_factor=args.unet_decompose_factor,\n init_weights=True,\n )\n else:\n raise ValueError(f\"Unknown adapter type {args.adapter}\")\n\n return config\n\n\ndef create_text_encoder_adapter_config(args: argparse.Namespace) -> Union[LoraConfig, LoHaConfig, LoKrConfig]:\n if args.adapter == \"full\":\n raise ValueError(\"Cannot create text_encoder adapter config for full parameter\")\n\n if args.adapter == \"lora\":\n config = LoraConfig(\n r=args.te_r,\n lora_alpha=args.te_alpha,\n target_modules=TEXT_ENCODER_TARGET_MODULES,\n lora_dropout=args.te_dropout,\n bias=args.te_bias,\n init_lora_weights=True,\n )\n elif args.adapter == \"loha\":\n config = LoHaConfig(\n r=args.te_r,\n alpha=args.te_alpha,\n target_modules=TEXT_ENCODER_TARGET_MODULES,\n rank_dropout=args.te_rank_dropout,\n module_dropout=args.te_module_dropout,\n init_weights=True,\n )\n elif args.adapter == \"lokr\":\n config = LoKrConfig(\n r=args.te_r,\n alpha=args.te_alpha,\n target_modules=TEXT_ENCODER_TARGET_MODULES,\n rank_dropout=args.te_rank_dropout,\n module_dropout=args.te_module_dropout,\n decompose_both=args.te_decompose_both,\n decompose_factor=args.te_decompose_factor,\n init_weights=True,\n )\n else:\n raise ValueError(f\"Unknown adapter type {args.adapter}\")\n\n return config\n\n\ndef parse_args(input_args=None):\n parser = argparse.ArgumentParser(description=\"Simple example of a training script.\")\n parser.add_argument(\n \"--pretrained_model_name_or_path\",\n type=str,\n default=None,\n required=True,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n )\n parser.add_argument(\n \"--revision\",\n type=str,\n default=None,\n required=False,\n help=\"Revision of pretrained model identifier from huggingface.co/models.\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n type=str,\n default=None,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--instance_data_dir\",\n type=str,\n default=None,\n required=True,\n help=\"A folder containing the training data of instance images.\",\n )\n parser.add_argument(\n \"--class_data_dir\",\n type=str,\n default=None,\n required=False,\n help=\"A folder containing the training data of class images.\",\n )\n parser.add_argument(\n \"--instance_prompt\",\n type=str,\n default=None,\n required=True,\n help=\"The prompt with identifier specifying the instance\",\n )\n parser.add_argument(\n \"--class_prompt\",\n type=str,\n default=None,\n help=\"The prompt to specify images in the same class as provided instance images.\",\n )\n parser.add_argument(\n \"--with_prior_preservation\",\n default=False,\n action=\"store_true\",\n help=\"Flag to add prior preservation loss.\",\n )\n parser.add_argument(\"--prior_loss_weight\", type=float, default=1.0, help=\"The weight of prior preservation loss.\")\n parser.add_argument(\n \"--num_class_images\",\n type=int,\n default=100,\n help=(\n \"Minimal class images for prior preservation loss. If there are not enough images already present in\"\n \" class_data_dir, additional images will be sampled with class_prompt.\"\n ),\n )\n parser.add_argument(\n \"--validation_prompt\",\n type=str,\n default=None,\n help=\"A prompt that is used during validation to verify that the model is learning.\",\n )\n parser.add_argument(\n \"--num_validation_images\",\n type=int,\n default=4,\n help=\"Number of images that should be generated during validation with `validation_prompt`.\",\n )\n parser.add_argument(\n \"--validation_steps\",\n type=int,\n default=100,\n help=(\n \"Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt\"\n \" `args.validation_prompt` multiple times: `args.num_validation_images`.\"\n ),\n )\n parser.add_argument(\n \"--output_dir\",\n type=str,\n default=\"text-inversion-model\",\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n parser.add_argument(\n \"--resolution\",\n type=int,\n default=512,\n help=(\n \"The resolution for input images, all the images in the train/validation dataset will be resized to this\"\n \" resolution\"\n ),\n )\n parser.add_argument(\n \"--center_crop\", action=\"store_true\", help=\"Whether to center crop images before resizing to resolution\"\n )\n parser.add_argument(\"--train_text_encoder\", action=\"store_true\", help=\"Whether to train the text encoder\")\n\n parser.add_argument(\n \"--train_batch_size\", type=int, default=4, help=\"Batch size (per device) for the training dataloader.\"\n )\n parser.add_argument(\n \"--sample_batch_size\", type=int, default=4, help=\"Batch size (per device) for sampling images.\"\n )\n parser.add_argument(\"--num_train_epochs\", type=int, default=1)\n parser.add_argument(\n \"--max_train_steps\",\n type=int,\n default=None,\n help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n )\n parser.add_argument(\n \"--checkpointing_steps\",\n type=int,\n default=500,\n help=(\n \"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final\"\n \" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming\"\n \" training using `--resume_from_checkpoint`.\"\n ),\n )\n parser.add_argument(\n \"--resume_from_checkpoint\",\n type=str,\n default=None,\n help=(\n \"Whether training should be resumed from a previous checkpoint. Use a path saved by\"\n ' `--checkpointing_steps`, or `\"latest\"` to automatically select the last available checkpoint.'\n ),\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\n \"--gradient_checkpointing\",\n action=\"store_true\",\n help=\"Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=5e-6,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n )\n parser.add_argument(\n \"--scale_lr\",\n action=\"store_true\",\n default=False,\n help=\"Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.\",\n )\n parser.add_argument(\n \"--lr_scheduler\",\n type=str,\n default=\"constant\",\n help=(\n 'The scheduler type to use. Choose between [\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\",'\n ' \"constant\", \"constant_with_warmup\"]'\n ),\n )\n parser.add_argument(\n \"--lr_warmup_steps\", type=int, default=500, help=\"Number of steps for the warmup in the lr scheduler.\"\n )\n parser.add_argument(\n \"--lr_num_cycles\",\n type=int,\n default=1,\n help=\"Number of hard resets of the lr in cosine_with_restarts scheduler.\",\n )\n parser.add_argument(\"--lr_power\", type=float, default=1.0, help=\"Power factor of the polynomial scheduler.\")\n parser.add_argument(\n \"--use_8bit_adam\", action=\"store_true\", help=\"Whether or not to use 8-bit Adam from bitsandbytes.\"\n )\n parser.add_argument(\"--adam_beta1\", type=float, default=0.9, help=\"The beta1 parameter for the Adam optimizer.\")\n parser.add_argument(\"--adam_beta2\", type=float, default=0.999, help=\"The beta2 parameter for the Adam optimizer.\")\n parser.add_argument(\"--adam_weight_decay\", type=float, default=1e-2, help=\"Weight decay to use.\")\n parser.add_argument(\"--adam_epsilon\", type=float, default=1e-08, help=\"Epsilon value for the Adam optimizer\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the model to the Hub.\")\n parser.add_argument(\"--hub_token\", type=str, default=None, help=\"The token to use to push to the Model Hub.\")\n parser.add_argument(\n \"--hub_model_id\",\n type=str,\n default=None,\n help=\"The name of the repository to keep in sync with the local `output_dir`.\",\n )\n parser.add_argument(\n \"--logging_dir\",\n type=str,\n default=\"logs\",\n help=(\n \"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to\"\n \" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***.\"\n ),\n )\n parser.add_argument(\n \"--allow_tf32\",\n action=\"store_true\",\n help=(\n \"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see\"\n \" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices\"\n ),\n )\n parser.add_argument(\n \"--report_to\",\n type=str,\n default=\"tensorboard\",\n help=(\n 'The integration to report the results and logs to. Supported platforms are `\"tensorboard\"`'\n ' (default), `\"wandb\"` and `\"comet_ml\"`. Use `\"all\"` to report to all integrations.'\n ),\n )\n parser.add_argument(\n \"--wandb_key\",\n type=str,\n default=None,\n help=(\"If report to option is set to wandb, api-key for wandb used for login to wandb \"),\n )\n parser.add_argument(\n \"--wandb_project_name\",\n type=str,\n default=None,\n help=(\"If report to option is set to wandb, project name in wandb for log tracking \"),\n )\n parser.add_argument(\n \"--mixed_precision\",\n type=str,\n default=None,\n choices=[\"no\", \"fp16\", \"bf16\"],\n help=(\n \"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=\"\n \" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the\"\n \" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config.\"\n ),\n )\n parser.add_argument(\n \"--prior_generation_precision\",\n type=str,\n default=None,\n choices=[\"no\", \"fp32\", \"fp16\", \"bf16\"],\n help=(\n \"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=\"\n \" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32.\"\n ),\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\n \"--enable_xformers_memory_efficient_attention\", action=\"store_true\", help=\"Whether or not to use xformers.\"\n )\n\n # Adapter arguments\n subparsers = parser.add_subparsers(dest=\"adapter\")\n\n # Dummy subparser to train whole model\n subparsers.add_parser(\"full\", help=\"Train full model without adapters\")\n\n # LoRA adapter\n lora = subparsers.add_parser(\"lora\", help=\"Use LoRA adapter\")\n lora.add_argument(\"--unet_r\", type=int, default=8, help=\"LoRA rank for unet\")\n lora.add_argument(\"--unet_alpha\", type=int, default=8, help=\"LoRA alpha for unet\")\n lora.add_argument(\"--unet_dropout\", type=float, default=0.0, help=\"LoRA dropout probability for unet\")\n lora.add_argument(\n \"--unet_bias\",\n type=str,\n default=\"none\",\n help=\"Bias type for LoRA. Can be 'none', 'all' or 'lora_only'\",\n )\n lora.add_argument(\n \"--te_r\", type=int, default=8, help=\"LoRA rank for text_encoder, only used if `train_text_encoder` is True\"\n )\n lora.add_argument(\n \"--te_alpha\",\n type=int,\n default=8,\n help=\"LoRA alpha for text_encoder, only used if `train_text_encoder` is True\",\n )\n lora.add_argument(\n \"--te_dropout\",\n type=float,\n default=0.0,\n help=\"LoRA dropout probability for text_encoder, only used if `train_text_encoder` is True\",\n )\n lora.add_argument(\n \"--te_bias\",\n type=str,\n default=\"none\",\n help=\"Bias type for LoRA. Can be 'none', 'all' or 'lora_only', only used if `train_text_encoder` is True\",\n )\n\n # LoHa adapter\n loha = subparsers.add_parser(\"loha\", help=\"Use LoHa adapter\")\n loha.add_argument(\"--unet_r\", type=int, default=8, help=\"LoHa rank for unet\")\n loha.add_argument(\"--unet_alpha\", type=int, default=8, help=\"LoHa alpha for unet\")\n loha.add_argument(\"--unet_rank_dropout\", type=float, default=0.0, help=\"LoHa rank_dropout probability for unet\")\n loha.add_argument(\n \"--unet_module_dropout\", type=float, default=0.0, help=\"LoHa module_dropout probability for unet\"\n )\n loha.add_argument(\n \"--unet_use_effective_conv2d\",\n action=\"store_true\",\n help=\"Use parameter effective decomposition in unet for Conv2d 3x3 with ksize > 1\",\n )\n loha.add_argument(\n \"--te_r\", type=int, default=8, help=\"LoHa rank for text_encoder, only used if `train_text_encoder` is True\"\n )\n loha.add_argument(\n \"--te_alpha\",\n type=int,\n default=8,\n help=\"LoHa alpha for text_encoder, only used if `train_text_encoder` is True\",\n )\n loha.add_argument(\n \"--te_rank_dropout\",\n type=float,\n default=0.0,\n help=\"LoHa rank_dropout probability for text_encoder, only used if `train_text_encoder` is True\",\n )\n loha.add_argument(\n \"--te_module_dropout\",\n type=float,\n default=0.0,\n help=\"LoHa module_dropout probability for text_encoder, only used if `train_text_encoder` is True\",\n )\n\n # LoKr adapter\n lokr = subparsers.add_parser(\"lokr\", help=\"Use LoKr adapter\")\n lokr.add_argument(\"--unet_r\", type=int, default=8, help=\"LoKr rank for unet\")\n lokr.add_argument(\"--unet_alpha\", type=int, default=8, help=\"LoKr alpha for unet\")\n lokr.add_argument(\"--unet_rank_dropout\", type=float, default=0.0, help=\"LoKr rank_dropout probability for unet\")\n lokr.add_argument(\n \"--unet_module_dropout\", type=float, default=0.0, help=\"LoKr module_dropout probability for unet\"\n )\n lokr.add_argument(\n \"--unet_use_effective_conv2d\",\n action=\"store_true\",\n help=\"Use parameter effective decomposition in unet for Conv2d 3x3 with ksize > 1\",\n )\n lokr.add_argument(\n \"--unet_decompose_both\", action=\"store_true\", help=\"Decompose left matrix in kronecker product for unet\"\n )\n lokr.add_argument(\n \"--unet_decompose_factor\", type=int, default=-1, help=\"Decompose factor in kronecker product for unet\"\n )\n lokr.add_argument(\n \"--te_r\", type=int, default=8, help=\"LoKr rank for text_encoder, only used if `train_text_encoder` is True\"\n )\n lokr.add_argument(\n \"--te_alpha\",\n type=int,\n default=8,\n help=\"LoKr alpha for text_encoder, only used if `train_text_encoder` is True\",\n )\n lokr.add_argument(\n \"--te_rank_dropout\",\n type=float,\n default=0.0,\n help=\"LoKr rank_dropout probability for text_encoder, only used if `train_text_encoder` is True\",\n )\n lokr.add_argument(\n \"--te_module_dropout\",\n type=float,\n default=0.0,\n help=\"LoKr module_dropout probability for text_encoder, only used if `train_text_encoder` is True\",\n )\n lokr.add_argument(\n \"--te_decompose_both\",\n action=\"store_true\",\n help=\"Decompose left matrix in kronecker product for text_encoder, only used if `train_text_encoder` is True\",\n )\n lokr.add_argument(\n \"--te_decompose_factor\",\n type=int,\n default=-1,\n help=\"Decompose factor in kronecker product for text_encoder, only used if `train_text_encoder` is True\",\n )\n\n if input_args is not None:\n args = parser.parse_args(input_args)\n else:\n args = parser.parse_args()\n\n env_local_rank = int(os.environ.get(\"LOCAL_RANK\", -1))\n if env_local_rank != -1 and env_local_rank != args.local_rank:\n args.local_rank = env_local_rank\n\n if args.with_prior_preservation:\n if args.class_data_dir is None:\n raise ValueError(\"You must specify a data directory for class images.\")\n if args.class_prompt is None:\n raise ValueError(\"You must specify prompt for class images.\")\n else:\n # logger is not available yet\n if args.class_data_dir is not None:\n warnings.warn(\"You need not use --class_data_dir without --with_prior_preservation.\")\n if args.class_prompt is not None:\n warnings.warn(\"You need not use --class_prompt without --with_prior_preservation.\")\n\n return args\n\n\n# Converting Bytes to Megabytes\ndef b2mb(x):\n return int(x / 2**20)\n\n\n# This context manager is used to track the peak memory usage of the process\nclass TorchTracemalloc:\n def __enter__(self):\n gc.collect()\n torch.cuda.empty_cache()\n torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero\n self.begin = torch.cuda.memory_allocated()\n self.process = psutil.Process()\n\n self.cpu_begin = self.cpu_mem_used()\n self.peak_monitoring = True\n peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)\n peak_monitor_thread.daemon = True\n peak_monitor_thread.start()\n return self\n\n def cpu_mem_used(self):\n \"\"\"get resident set size memory for the current process\"\"\"\n return self.process.memory_info().rss\n\n def peak_monitor_func(self):\n self.cpu_peak = -1\n\n while True:\n self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak)\n\n # can't sleep or will not catch the peak right (this comment is here on purpose)\n # time.sleep(0.001) # 1msec\n\n if not self.peak_monitoring:\n break\n\n def __exit__(self, *exc):\n self.peak_monitoring = False\n\n gc.collect()\n torch.cuda.empty_cache()\n self.end = torch.cuda.memory_allocated()\n self.peak = torch.cuda.max_memory_allocated()\n self.used = b2mb(self.end - self.begin)\n self.peaked = b2mb(self.peak - self.begin)\n\n self.cpu_end = self.cpu_mem_used()\n self.cpu_used = b2mb(self.cpu_end - self.cpu_begin)\n self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin)\n # print(f\"delta used/peak {self.used:4d}/{self.peaked:4d}\")\n\n\nclass DreamBoothDataset(Dataset):\n \"\"\"\n A dataset to prepare the instance and class images with the prompts for fine-tuning the model.\n It pre-processes the images and the tokenizes prompts.\n \"\"\"\n\n def __init__(\n self,\n instance_data_root,\n instance_prompt,\n tokenizer,\n class_data_root=None,\n class_prompt=None,\n size=512,\n center_crop=False,\n ):\n self.size = size\n self.center_crop = center_crop\n self.tokenizer = tokenizer\n\n self.instance_data_root = Path(instance_data_root)\n if not self.instance_data_root.exists():\n raise ValueError(\"Instance images root doesn't exists.\")\n\n self.instance_images_path = list(Path(instance_data_root).iterdir())\n self.num_instance_images = len(self.instance_images_path)\n self.instance_prompt = instance_prompt\n self._length = self.num_instance_images\n\n if class_data_root is not None:\n self.class_data_root = Path(class_data_root)\n self.class_data_root.mkdir(parents=True, exist_ok=True)\n self.class_images_path = list(self.class_data_root.iterdir())\n self.num_class_images = len(self.class_images_path)\n self._length = max(self.num_class_images, self.num_instance_images)\n self.class_prompt = class_prompt\n else:\n self.class_data_root = None\n\n self.image_transforms = transforms.Compose(\n [\n transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),\n transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]),\n ]\n )\n\n def __len__(self):\n return self._length\n\n def __getitem__(self, index):\n example = {}\n instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])\n if not instance_image.mode == \"RGB\":\n instance_image = instance_image.convert(\"RGB\")\n example[\"instance_images\"] = self.image_transforms(instance_image)\n example[\"instance_prompt_ids\"] = self.tokenizer(\n self.instance_prompt,\n truncation=True,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n ).input_ids\n\n if self.class_data_root:\n class_image = Image.open(self.class_images_path[index % self.num_class_images])\n if not class_image.mode == \"RGB\":\n class_image = class_image.convert(\"RGB\")\n example[\"class_images\"] = self.image_transforms(class_image)\n example[\"class_prompt_ids\"] = self.tokenizer(\n self.class_prompt,\n truncation=True,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n ).input_ids\n\n return example\n\n\ndef collate_fn(examples, with_prior_preservation=False):\n input_ids = [example[\"instance_prompt_ids\"] for example in examples]\n pixel_values = [example[\"instance_images\"] for example in examples]\n\n # Concat class and instance examples for prior preservation.\n # We do this to avoid doing two forward passes.\n if with_prior_preservation:\n input_ids += [example[\"class_prompt_ids\"] for example in examples]\n pixel_values += [example[\"class_images\"] for example in examples]\n\n pixel_values = torch.stack(pixel_values)\n pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()\n\n input_ids = torch.cat(input_ids, dim=0)\n\n batch = {\n \"input_ids\": input_ids,\n \"pixel_values\": pixel_values,\n }\n return batch\n\n\nclass PromptDataset(Dataset):\n \"A simple dataset to prepare the prompts to generate class images on multiple GPUs.\"\n\n def __init__(self, prompt, num_samples):\n self.prompt = prompt\n self.num_samples = num_samples\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, index):\n example = {}\n example[\"prompt\"] = self.prompt\n example[\"index\"] = index\n return example\n\n\ndef get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):\n if token is None:\n token = HfFolder.get_token()\n if organization is None:\n username = whoami(token)[\"name\"]\n return f\"{username}/{model_id}\"\n else:\n return f\"{organization}/{model_id}\"\n\n\ndef main(args):\n logging_dir = Path(args.output_dir, args.logging_dir)\n\n accelerator = Accelerator(\n gradient_accumulation_steps=args.gradient_accumulation_steps,\n mixed_precision=args.mixed_precision,\n log_with=args.report_to,\n project_dir=logging_dir,\n )\n if args.report_to == \"wandb\":\n import wandb\n\n wandb.login(key=args.wandb_key)\n wandb.init(project=args.wandb_project_name)\n # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate\n # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.\n # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.\n if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:\n raise ValueError(\n \"Gradient accumulation is not supported when training the text encoder in distributed training. \"\n \"Please set gradient_accumulation_steps to 1. This feature will be supported in the future.\"\n )\n\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n logger.info(accelerator.state, main_process_only=False)\n if accelerator.is_local_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_warning()\n diffusers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n diffusers.utils.logging.set_verbosity_error()\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n set_seed(args.seed)\n\n # Generate class images if prior preservation is enabled.\n if args.with_prior_preservation:\n class_images_dir = Path(args.class_data_dir)\n if not class_images_dir.exists():\n class_images_dir.mkdir(parents=True)\n cur_class_images = len(list(class_images_dir.iterdir()))\n\n if cur_class_images < args.num_class_images:\n torch_dtype = torch.float16 if accelerator.device.type == \"cuda\" else torch.float32\n if args.prior_generation_precision == \"fp32\":\n torch_dtype = torch.float32\n elif args.prior_generation_precision == \"fp16\":\n torch_dtype = torch.float16\n elif args.prior_generation_precision == \"bf16\":\n torch_dtype = torch.bfloat16\n pipeline = DiffusionPipeline.from_pretrained(\n args.pretrained_model_name_or_path,\n torch_dtype=torch_dtype,\n safety_checker=None,\n revision=args.revision,\n )\n pipeline.set_progress_bar_config(disable=True)\n\n num_new_images = args.num_class_images - cur_class_images\n logger.info(f\"Number of class images to sample: {num_new_images}.\")\n\n sample_dataset = PromptDataset(args.class_prompt, num_new_images)\n sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)\n\n sample_dataloader = accelerator.prepare(sample_dataloader)\n pipeline.to(accelerator.device)\n\n for example in tqdm(\n sample_dataloader, desc=\"Generating class images\", disable=not accelerator.is_local_main_process\n ):\n images = pipeline(example[\"prompt\"]).images\n\n for i, image in enumerate(images):\n hash_image = hashlib.sha1(image.tobytes()).hexdigest()\n image_filename = class_images_dir / f\"{example['index'][i] + cur_class_images}-{hash_image}.jpg\"\n image.save(image_filename)\n\n del pipeline\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n # Handle the repository creation\n if accelerator.is_main_process:\n if args.push_to_hub:\n if args.hub_model_id is None:\n repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)\n else:\n repo_name = args.hub_model_id\n repo = Repository(args.output_dir, clone_from=repo_name) # noqa: F841\n\n with open(os.path.join(args.output_dir, \".gitignore\"), \"w+\") as gitignore:\n if \"step_*\" not in gitignore:\n gitignore.write(\"step_*\\n\")\n if \"epoch_*\" not in gitignore:\n gitignore.write(\"epoch_*\\n\")\n elif args.output_dir is not None:\n os.makedirs(args.output_dir, exist_ok=True)\n\n # Load the tokenizer\n if args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)\n elif args.pretrained_model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(\n args.pretrained_model_name_or_path,\n subfolder=\"tokenizer\",\n revision=args.revision,\n use_fast=False,\n )\n\n # import correct text encoder class\n text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)\n\n # Load scheduler and models\n noise_scheduler = DDPMScheduler(\n beta_start=0.00085,\n beta_end=0.012,\n beta_schedule=\"scaled_linear\",\n num_train_timesteps=1000,\n ) # DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder=\"scheduler\")\n text_encoder = text_encoder_cls.from_pretrained(\n args.pretrained_model_name_or_path, subfolder=\"text_encoder\", revision=args.revision\n )\n vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder=\"vae\", revision=args.revision)\n unet = UNet2DConditionModel.from_pretrained(\n args.pretrained_model_name_or_path, subfolder=\"unet\", revision=args.revision\n )\n\n if args.adapter != \"full\":\n config = create_unet_adapter_config(args)\n unet = get_peft_model(unet, config)\n unet.print_trainable_parameters()\n print(unet)\n\n vae.requires_grad_(False)\n if not args.train_text_encoder:\n text_encoder.requires_grad_(False)\n elif args.train_text_encoder and args.adapter != \"full\":\n config = create_text_encoder_adapter_config(args)\n text_encoder = get_peft_model(text_encoder, config)\n text_encoder.print_trainable_parameters()\n print(text_encoder)\n\n if args.enable_xformers_memory_efficient_attention:\n if is_xformers_available():\n unet.enable_xformers_memory_efficient_attention()\n else:\n raise ValueError(\"xformers is not available. Make sure it is installed correctly\")\n\n if args.gradient_checkpointing:\n unet.enable_gradient_checkpointing()\n if args.train_text_encoder and not args.adapter != \"full\":\n text_encoder.gradient_checkpointing_enable()\n\n # Enable TF32 for faster training on Ampere GPUs,\n # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices\n if args.allow_tf32:\n torch.backends.cuda.matmul.allow_tf32 = True\n\n if args.scale_lr:\n args.learning_rate = (\n args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes\n )\n\n # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs\n if args.use_8bit_adam:\n try:\n import bitsandbytes as bnb\n except ImportError:\n raise ImportError(\n \"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.\"\n )\n\n optimizer_class = bnb.optim.AdamW8bit\n else:\n optimizer_class = torch.optim.AdamW\n\n # Optimizer creation\n params_to_optimize = (\n itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()\n )\n optimizer = optimizer_class(\n params_to_optimize,\n lr=args.learning_rate,\n betas=(args.adam_beta1, args.adam_beta2),\n weight_decay=args.adam_weight_decay,\n eps=args.adam_epsilon,\n )\n\n # Dataset and DataLoaders creation:\n train_dataset = DreamBoothDataset(\n instance_data_root=args.instance_data_dir,\n instance_prompt=args.instance_prompt,\n class_data_root=args.class_data_dir if args.with_prior_preservation else None,\n class_prompt=args.class_prompt,\n tokenizer=tokenizer,\n size=args.resolution,\n center_crop=args.center_crop,\n )\n\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=args.train_batch_size,\n shuffle=True,\n collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),\n num_workers=1,\n )\n\n # Scheduler and math around the number of training steps.\n overrode_max_train_steps = False\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n if args.max_train_steps is None:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n overrode_max_train_steps = True\n\n lr_scheduler = get_scheduler(\n args.lr_scheduler,\n optimizer=optimizer,\n num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,\n num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,\n num_cycles=args.lr_num_cycles,\n power=args.lr_power,\n )\n\n # Prepare everything with our `accelerator`.\n if args.train_text_encoder:\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, text_encoder, optimizer, train_dataloader, lr_scheduler\n )\n else:\n unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(\n unet, optimizer, train_dataloader, lr_scheduler\n )\n\n # For mixed precision training we cast the text_encoder and vae weights to half-precision\n # as these models are only used for inference, keeping weights in full precision is not required.\n weight_dtype = torch.float32\n if accelerator.mixed_precision == \"fp16\":\n weight_dtype = torch.float16\n elif accelerator.mixed_precision == \"bf16\":\n weight_dtype = torch.bfloat16\n\n # Move vae and text_encoder to device and cast to weight_dtype\n vae.to(accelerator.device, dtype=weight_dtype)\n if not args.train_text_encoder:\n text_encoder.to(accelerator.device, dtype=weight_dtype)\n\n # We need to recalculate our total training steps as the size of the training dataloader may have changed.\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n if overrode_max_train_steps:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n # Afterwards we recalculate our number of training epochs\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n # We need to initialize the trackers we use, and also store our configuration.\n # The trackers initializes automatically on the main process.\n if accelerator.is_main_process:\n accelerator.init_trackers(\"dreambooth\", config=vars(args))\n\n # Train!\n total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num batches each epoch = {len(train_dataloader)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n global_step = 0\n first_epoch = 0\n\n # Potentially load in the weights and states from a previous save\n if args.resume_from_checkpoint:\n if args.resume_from_checkpoint != \"latest\":\n path = os.path.basename(args.resume_from_checkpoint)\n else:\n # Get the mos recent checkpoint\n dirs = os.listdir(args.output_dir)\n dirs = [d for d in dirs if d.startswith(\"checkpoint\")]\n dirs = sorted(dirs, key=lambda x: int(x.split(\"-\")[1]))\n path = dirs[-1]\n accelerator.print(f\"Resuming from checkpoint {path}\")\n accelerator.load_state(os.path.join(args.output_dir, path))\n global_step = int(path.split(\"-\")[1])\n\n resume_global_step = global_step * args.gradient_accumulation_steps\n first_epoch = resume_global_step // num_update_steps_per_epoch\n resume_step = resume_global_step % num_update_steps_per_epoch\n\n # Only show the progress bar once on each machine.\n progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)\n progress_bar.set_description(\"Steps\")\n\n for epoch in range(first_epoch, args.num_train_epochs):\n unet.train()\n if args.train_text_encoder:\n text_encoder.train()\n with TorchTracemalloc() as tracemalloc:\n for step, batch in enumerate(train_dataloader):\n # Skip steps until we reach the resumed step\n if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:\n if step % args.gradient_accumulation_steps == 0:\n progress_bar.update(1)\n if args.report_to == \"wandb\":\n accelerator.print(progress_bar)\n continue\n\n with accelerator.accumulate(unet):\n # Convert images to latent space\n latents = vae.encode(batch[\"pixel_values\"].to(dtype=weight_dtype)).latent_dist.sample()\n latents = latents * 0.18215\n\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(latents)\n bsz = latents.shape[0]\n # Sample a random timestep for each image\n timesteps = torch.randint(\n 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device\n )\n timesteps = timesteps.long()\n\n # Add noise to the latents according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)\n\n # Get the text embedding for conditioning\n encoder_hidden_states = text_encoder(batch[\"input_ids\"])[0]\n\n # Predict the noise residual\n model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample\n\n # Get the target for loss depending on the prediction type\n if noise_scheduler.config.prediction_type == \"epsilon\":\n target = noise\n elif noise_scheduler.config.prediction_type == \"v_prediction\":\n target = noise_scheduler.get_velocity(latents, noise, timesteps)\n else:\n raise ValueError(f\"Unknown prediction type {noise_scheduler.config.prediction_type}\")\n\n if args.with_prior_preservation:\n # Chunk the noise and model_pred into two parts and compute the loss on each part separately.\n model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)\n target, target_prior = torch.chunk(target, 2, dim=0)\n\n # Compute instance loss\n loss = F.mse_loss(model_pred.float(), target.float(), reduction=\"mean\")\n\n # Compute prior loss\n prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction=\"mean\")\n\n # Add the prior loss to the instance loss.\n loss = loss + args.prior_loss_weight * prior_loss\n else:\n loss = F.mse_loss(model_pred.float(), target.float(), reduction=\"mean\")\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n params_to_clip = (\n itertools.chain(unet.parameters(), text_encoder.parameters())\n if args.train_text_encoder\n else unet.parameters()\n )\n accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n progress_bar.update(1)\n if args.report_to == \"wandb\":\n accelerator.print(progress_bar)\n global_step += 1\n\n # if global_step % args.checkpointing_steps == 0:\n # if accelerator.is_main_process:\n # save_path = os.path.join(args.output_dir, f\"checkpoint-{global_step}\")\n # accelerator.save_state(save_path)\n # logger.info(f\"Saved state to {save_path}\")\n\n logs = {\"loss\": loss.detach().item(), \"lr\": lr_scheduler.get_last_lr()[0]}\n progress_bar.set_postfix(**logs)\n accelerator.log(logs, step=global_step)\n\n if (\n args.validation_prompt is not None\n and (step + num_update_steps_per_epoch * epoch) % args.validation_steps == 0\n ):\n logger.info(\n f\"Running validation... \\n Generating {args.num_validation_images} images with prompt:\"\n f\" {args.validation_prompt}.\"\n )\n # create pipeline\n pipeline = DiffusionPipeline.from_pretrained(\n args.pretrained_model_name_or_path,\n safety_checker=None,\n revision=args.revision,\n )\n # set `keep_fp32_wrapper` to True because we do not want to remove\n # mixed precision hooks while we are still training\n pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True)\n pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True)\n pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)\n pipeline = pipeline.to(accelerator.device)\n pipeline.set_progress_bar_config(disable=True)\n\n # Set evaliation mode\n pipeline.unet.eval()\n pipeline.text_encoder.eval()\n\n # run inference\n if args.seed is not None:\n generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)\n else:\n generator = None\n images = []\n for _ in range(args.num_validation_images):\n image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]\n images.append(image)\n\n for tracker in accelerator.trackers:\n if tracker.name == \"tensorboard\":\n np_images = np.stack([np.asarray(img) for img in images])\n tracker.writer.add_images(\"validation\", np_images, epoch, dataformats=\"NHWC\")\n if tracker.name == \"wandb\":\n import wandb\n\n tracker.log(\n {\n \"validation\": [\n wandb.Image(image, caption=f\"{i}: {args.validation_prompt}\")\n for i, image in enumerate(images)\n ]\n }\n )\n\n # Set evaliation mode\n pipeline.unet.train()\n pipeline.text_encoder.train()\n\n del pipeline\n torch.cuda.empty_cache()\n\n if global_step >= args.max_train_steps:\n break\n # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage\n accelerator.print(f\"GPU Memory before entering the train : {b2mb(tracemalloc.begin)}\")\n accelerator.print(f\"GPU Memory consumed at the end of the train (end-begin): {tracemalloc.used}\")\n accelerator.print(f\"GPU Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}\")\n accelerator.print(\n f\"GPU Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}\"\n )\n\n accelerator.print(f\"CPU Memory before entering the train : {b2mb(tracemalloc.cpu_begin)}\")\n accelerator.print(f\"CPU Memory consumed at the end of the train (end-begin): {tracemalloc.cpu_used}\")\n accelerator.print(f\"CPU Peak Memory consumed during the train (max-begin): {tracemalloc.cpu_peaked}\")\n accelerator.print(\n \"CPU Total Peak Memory consumed during the train (max): {}\".format(\n tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)\n )\n )\n\n # Create the pipeline using using the trained modules and save it.\n accelerator.wait_for_everyone()\n if accelerator.is_main_process:\n if args.adapter != \"full\":\n unwarpped_unet = accelerator.unwrap_model(unet)\n unwarpped_unet.save_pretrained(\n os.path.join(args.output_dir, \"unet\"), state_dict=accelerator.get_state_dict(unet)\n )\n if args.train_text_encoder:\n unwarpped_text_encoder = accelerator.unwrap_model(text_encoder)\n unwarpped_text_encoder.save_pretrained(\n os.path.join(args.output_dir, \"text_encoder\"),\n state_dict=accelerator.get_state_dict(text_encoder),\n )\n else:\n pipeline = DiffusionPipeline.from_pretrained(\n args.pretrained_model_name_or_path,\n unet=accelerator.unwrap_model(unet),\n text_encoder=accelerator.unwrap_model(text_encoder),\n revision=args.revision,\n )\n pipeline.save_pretrained(args.output_dir)\n\n if args.push_to_hub:\n repo.push_to_hub(commit_message=\"End of training\", blocking=False, auto_lfs_prune=True)\n\n accelerator.end_training()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n", "output": ["main", "create_text_encoder_adapter_config", "import_model_class_from_model_name_or_path", "create_unet_adapter_config", "collate_fn", "parse_args", "b2mb", "get_full_repo_name", "DreamBoothDataset", "PromptDataset", "TorchTracemalloc"], "metadata": {"file_path": "peft-main/examples/stable_diffusion/train_dreambooth.py", "file_length": 15457, "symbol_dict": [{"symbol": "import_model_class_from_model_name_or_path", "type": "mannual_defined_function", "byte_location": 1540, "location": 514}, {"symbol": "create_unet_adapter_config", "type": "mannual_defined_function", "byte_location": 2282, "location": 735}, {"symbol": "b2mb", "type": "mannual_defined_function", "byte_location": 21188, "location": 6603}, {"symbol": "collate_fn", "type": "mannual_defined_function", "byte_location": 25940, "location": 8058}, {"symbol": "get_full_repo_name", "type": "mannual_defined_function", "byte_location": 27167, "location": 8420}, {"symbol": "parse_args", "type": "mannual_defined_function", "byte_location": 5124, "location": 1654}, {"symbol": "main", "type": "mannual_defined_function", "byte_location": 27496, "location": 8525}, {"symbol": "create_text_encoder_adapter_config", "type": "mannual_defined_function", "byte_location": 3762, "location": 1220}, {"symbol": "DreamBoothDataset", "type": "mannual_defined_class", "byte_location": 22890, "location": 7185}, {"symbol": "PromptDataset", "type": "mannual_defined_class", "byte_location": 26724, "location": 8296}, {"symbol": "TorchTracemalloc", "type": "mannual_defined_class", "byte_location": 21306, "location": 6642}]}}