Traceback (most recent call last) /tmp/ipykernel_2598928/3204809166.py in <cell ...

2025年11月12日 15:44 状态: pending

🚨 错误信息

--------------------------------------------------------------------------- AuthenticationError Traceback (most recent call last) /tmp/ipykernel_2598928/3204809166.py in <cell line: 22>() 21 # 执行问答 22 if __name__ == "__main__": ---> 23 result = qa_chain.invoke({"question": "中国的首都是哪?天安门在哪?"}) 24 print("AI答复:", result) ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/langchain_core/runnables/base.py in invoke(self, input, config, **kwargs) 3244 input_ = context.run(step.invoke, input_, config, **kwargs) 3245 else: -> 3246 input_ = context.run(step.invoke, input_, config) 3247 # finish the root run 3248 except BaseException as e: ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py in invoke(self, input, config, stop, **kwargs) 393 return cast( 394 "ChatGeneration", --> 395 self.generate_prompt( 396 [self._convert_input(input)], 397 stop=stop, ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py in generate_prompt(self, prompts, stop, callbacks, **kwargs) 1023 ) -> LLMResult: 1024 prompt_messages = [p.to_messages() for p in prompts] -> 1025 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) 1026 1027 @override ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py in generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs) 840 try: 841 results.append( --> 842 self._generate_with_cache( 843 m, 844 stop=stop, ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py in _generate_with_cache(self, messages, stop, run_manager, **kwargs) 1089 result = generate_from_stream(iter(chunks)) 1090 elif inspect.signature(self._generate).parameters.get("run_manager"): -> 1091 result = self._generate( 1092 messages, stop=stop, run_manager=run_manager, **kwargs 1093 ) ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/langchain_openai/chat_models/base.py in _generate(self, messages, stop, run_manager, **kwargs) 1211 if raw_response is not None and hasattr(raw_response, "http_response"): 1212 e.response = raw_response.http_response # type: ignore[attr-defined] -> 1213 raise e 1214 if ( 1215 self.include_response_headers ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/langchain_openai/chat_models/base.py in _generate(self, messages, stop, run_manager, **kwargs) 1206 ) 1207 else: -> 1208 raw_response = self.client.with_raw_response.create(**payload) 1209 response = raw_response.parse() 1210 except Exception as e: ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/openai/_legacy_response.py in wrapped(*args, **kwargs) 362 kwargs["extra_headers"] = extra_headers 363 --> 364 return cast(LegacyAPIResponse[R], func(*args, **kwargs)) 365 366 return wrapped ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/openai/_utils/_utils.py in wrapper(*args, **kwargs) 284 msg = f"Missing required argument: {quote(missing[0])}" 285 raise TypeError(msg) --> 286 return func(*args, **kwargs) 287 288 return wrapper # type: ignore ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/openai/resources/chat/completions/completions.py in create(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, prompt_cache_key, reasoning_effort, response_format, safety_identifier, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, verbosity, web_search_options, extra_headers, extra_query, extra_body, timeout) 1154 ) -> ChatCompletion | Stream[ChatCompletionChunk]: 1155 validate_response_format(response_format) -> 1156 return self._post( 1157 "/chat/completions", 1158 body=maybe_transform( ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/openai/_base_client.py in post(self, path, cast_to, body, options, files, stream, stream_cls) 1257 method="post", url=path, json_data=body, files=to_httpx_files(files), **options 1258 ) -> 1259 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) 1260 1261 def patch( ~/anaconda3/envs/my_langchain/lib/python3.9/site-packages/openai/_base_client.py in request(self, cast_to, options, stream, stream_cls) 1045 1046 log.debug("Re-raising status error") -> 1047 raise self._make_status_error_from_response(err.response) from None 1048 1049 break AuthenticationError: Error code: 401 - {'error': {'code': '', 'message': 'Token Status Unavailable (request id: 20251112154214536631682W0IRjsQh)', 'type': 'rix_api_error'}}

🤖 AI解决方案