Bases: Hermes2ProToolParser
  Source code in vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py
  | @ToolParserManager.register_module("longcat")
class LongcatFlashToolParser(Hermes2ProToolParser):
    def __init__(self, tokenizer: AnyTokenizer):
        super().__init__(tokenizer)
        self.tool_call_start_token: str = "<longcat_tool_call>"
        self.tool_call_end_token: str = "</longcat_tool_call>"
        self.tool_call_regex = re.compile(
            r"<longcat_tool_call>(.*?)</longcat_tool_call>|<longcat_tool_call>(.*)",
            re.DOTALL,
        )
        self.tool_call_start_token_ids = self.model_tokenizer.encode(
            self.tool_call_start_token, add_special_tokens=False
        )
        self.tool_call_end_token_ids = self.model_tokenizer.encode(
            self.tool_call_end_token, add_special_tokens=False
        )
        self.tool_call_start_token_array = [
            self.model_tokenizer.decode([token_id])
            for token_id in self.tool_call_start_token_ids
        ]
        self.tool_call_end_token_array = [
            self.model_tokenizer.decode([token_id])
            for token_id in self.tool_call_end_token_ids
        ]
  | 
        tool_call_end_token  instance-attribute  
 tool_call_end_token: str = '</longcat_tool_call>'
   
      tool_call_end_token_array  instance-attribute  
 tool_call_end_token_array = [
    (decode([token_id]))
    for token_id in (tool_call_end_token_ids)
]
   
      tool_call_end_token_ids  instance-attribute  
 tool_call_end_token_ids = encode(
    tool_call_end_token, add_special_tokens=False
)
   
      tool_call_regex  instance-attribute  
 tool_call_regex = compile(
    "<longcat_tool_call>(.*?)</longcat_tool_call>|<longcat_tool_call>(.*)",
    DOTALL,
)
   
      tool_call_start_token  instance-attribute  
 tool_call_start_token: str = '<longcat_tool_call>'
   
      tool_call_start_token_array  instance-attribute  
 tool_call_start_token_array = [
    (decode([token_id]))
    for token_id in (tool_call_start_token_ids)
]
   
      tool_call_start_token_ids  instance-attribute  
 tool_call_start_token_ids = encode(
    tool_call_start_token, add_special_tokens=False
)
   
      __init__ 
    Source code in vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py
  | def __init__(self, tokenizer: AnyTokenizer):
    super().__init__(tokenizer)
    self.tool_call_start_token: str = "<longcat_tool_call>"
    self.tool_call_end_token: str = "</longcat_tool_call>"
    self.tool_call_regex = re.compile(
        r"<longcat_tool_call>(.*?)</longcat_tool_call>|<longcat_tool_call>(.*)",
        re.DOTALL,
    )
    self.tool_call_start_token_ids = self.model_tokenizer.encode(
        self.tool_call_start_token, add_special_tokens=False
    )
    self.tool_call_end_token_ids = self.model_tokenizer.encode(
        self.tool_call_end_token, add_special_tokens=False
    )
    self.tool_call_start_token_array = [
        self.model_tokenizer.decode([token_id])
        for token_id in self.tool_call_start_token_ids
    ]
    self.tool_call_end_token_array = [
        self.model_tokenizer.decode([token_id])
        for token_id in self.tool_call_end_token_ids
    ]
  |