diff --git a/requirements.txt b/requirements.txt index 45153ff..0afd796 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,10 @@ openai -dashscope \ No newline at end of file +dashscope +langchain +langchain-deepseek +langchain-community + +llama-index +llama-index-core +llama-index-llms-dashscope +llama-index-indices-managed-dashscope \ No newline at end of file diff --git a/test/langchain/compatible/deepseek.py b/test/langchain/compatible/deepseek.py new file mode 100644 index 0000000..374910a --- /dev/null +++ b/test/langchain/compatible/deepseek.py @@ -0,0 +1,15 @@ +from langchain_openai import ChatOpenAI +from langchain_deepseek import ChatDeepSeek +import os + +chatLLM = ChatDeepSeek( + api_key=os.getenv("DASHSCOPE_API_KEY"), + base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", + model="deepseek-chat", # 此处以qwen-plus为例,您可按需更换模型名称。模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models + # other params... +) +messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "你是谁?"}] +response = chatLLM.invoke(messages) +print(response.model_dump_json()) \ No newline at end of file diff --git a/test/langchain/qwen.py b/test/langchain/qwen.py new file mode 100644 index 0000000..6d59f74 --- /dev/null +++ b/test/langchain/qwen.py @@ -0,0 +1,15 @@ +import getpass +import os + +if not os.environ.get("DEEPSEEK_API_KEY"): + os.environ["DEEPSEEK_API_KEY"] = os.environ['DASHSCOPE_API_KEY'] + +from langchain.chat_models import init_chat_model + +model = init_chat_model("qwen-max", model_provider="deepseek") + +messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "你是谁?"}] +response = model.invoke(messages) +print(response.model_dump_json()) \ No newline at end of file diff --git a/test/llamaindex/test_20250826_1.ipynb b/test/llamaindex/test_20250826_1.ipynb new file mode 100644 index 0000000..6bc8a9e --- /dev/null +++ b/test/llamaindex/test_20250826_1.ipynb @@ -0,0 +1,52 @@ +{ + "cells": [ + { + "metadata": {}, + "cell_type": "raw", + "source": [ + "from llama_index.llms.dashscope import DashScope\n", + "from llama_index.llms.openai import OpenAI\n", + "\n", + "llm = DashScope(model_name=\"qwen-max\") # 设置检索引擎生成回答时调用的大模型。" + ], + "id": "a5d3b9e1d4e6588f" + }, + { + "cell_type": "code", + "id": "initial_id", + "metadata": { + "collapsed": true, + "jupyter": { + "is_executing": true + } + }, + "source": [ + "response = llm.complete(\"William Shakespeare is \")\n", + "print(response)" + ], + "outputs": [], + "execution_count": null + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/test/llamaindex/test_20250826_1.py b/test/llamaindex/test_20250826_1.py new file mode 100644 index 0000000..5b81202 --- /dev/null +++ b/test/llamaindex/test_20250826_1.py @@ -0,0 +1,41 @@ +from llama_index.core.base.llms.types import ChatMessage +from llama_index.llms.dashscope import DashScope +import asyncio + +llm = DashScope(model_name="qwen-max") # 设置检索引擎生成回答时调用的大模型。 + +def test1(): + response = llm.complete("William Shakespeare is ") + print(response) + +async def test2(): + response = await llm.acomplete("William Shakespeare is ") + print(response) + + +def test3(): + response = llm.stream_complete("William Shakespeare is ") + for chunk in response: + print(chunk) + +def test4(): + handle = llm.stream_complete("William Shakespeare is ") + + for token in handle: + print(token.delta, end="", flush=True) + + +def test5(): + messages = [ + ChatMessage(role="system", content="You are a helpful assistant."), + ChatMessage(role="user", content="Tell me a joke."), + ] + chat_response = llm.chat(messages) + print(chat_response) + +if __name__ == '__main__': + # test1() + # asyncio.run(test2()) + # test3() + # test4() + test5() \ No newline at end of file diff --git a/test/llamaindex/test_20250826_2.ipynb b/test/llamaindex/test_20250826_2.ipynb new file mode 100644 index 0000000..c6f1d25 --- /dev/null +++ b/test/llamaindex/test_20250826_2.ipynb @@ -0,0 +1,44 @@ +{ + "cells": [ + { + "metadata": {}, + "cell_type": "raw", + "source": "print(\"hello\")", + "id": "a5d3b9e1d4e6588f" + }, + { + "cell_type": "code", + "id": "initial_id", + "metadata": { + "collapsed": true, + "jupyter": { + "is_executing": true + } + }, + "source": "", + "outputs": [], + "execution_count": null + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}