feat(sdk): enhance Python SDK with 27 new Service API endpoints (#26401)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
lyzno1
2025-09-29 19:22:58 +08:00
committed by GitHub
parent b2bcb6d21a
commit f60aa36fa0
3 changed files with 640 additions and 1 deletions

View File

@@ -1,5 +1,6 @@
import json
from typing import IO, Literal
from typing import Literal, Union, Dict, List, Any, Optional, IO
import requests
@@ -49,6 +50,18 @@ class DifyClient:
params = {"user": user}
return self._send_request("GET", "/meta", params=params)
def get_app_info(self):
"""Get basic application information including name, description, tags, and mode."""
return self._send_request("GET", "/info")
def get_app_site_info(self):
"""Get application site information."""
return self._send_request("GET", "/site")
def get_file_preview(self, file_id: str):
"""Get file preview by file ID."""
return self._send_request("GET", f"/files/{file_id}/preview")
class CompletionClient(DifyClient):
def create_completion_message(
@@ -144,6 +157,51 @@ class ChatClient(DifyClient):
files = {"file": audio_file}
return self._send_request_with_files("POST", "/audio-to-text", data, files)
# Annotation APIs
def annotation_reply_action(
self,
action: Literal["enable", "disable"],
score_threshold: float,
embedding_provider_name: str,
embedding_model_name: str,
):
"""Enable or disable annotation reply feature."""
# Backend API requires these fields to be non-None values
if score_threshold is None or embedding_provider_name is None or embedding_model_name is None:
raise ValueError("score_threshold, embedding_provider_name, and embedding_model_name cannot be None")
data = {
"score_threshold": score_threshold,
"embedding_provider_name": embedding_provider_name,
"embedding_model_name": embedding_model_name,
}
return self._send_request("POST", f"/apps/annotation-reply/{action}", json=data)
def get_annotation_reply_status(self, action: Literal["enable", "disable"], job_id: str):
"""Get the status of an annotation reply action job."""
return self._send_request("GET", f"/apps/annotation-reply/{action}/status/{job_id}")
def list_annotations(self, page: int = 1, limit: int = 20, keyword: str = ""):
"""List annotations for the application."""
params = {"page": page, "limit": limit}
if keyword:
params["keyword"] = keyword
return self._send_request("GET", "/apps/annotations", params=params)
def create_annotation(self, question: str, answer: str):
"""Create a new annotation."""
data = {"question": question, "answer": answer}
return self._send_request("POST", "/apps/annotations", json=data)
def update_annotation(self, annotation_id: str, question: str, answer: str):
"""Update an existing annotation."""
data = {"question": question, "answer": answer}
return self._send_request("PUT", f"/apps/annotations/{annotation_id}", json=data)
def delete_annotation(self, annotation_id: str):
"""Delete an annotation."""
return self._send_request("DELETE", f"/apps/annotations/{annotation_id}")
class WorkflowClient(DifyClient):
def run(self, inputs: dict, response_mode: Literal["blocking", "streaming"] = "streaming", user: str = "abc-123"):
@@ -157,6 +215,55 @@ class WorkflowClient(DifyClient):
def get_result(self, workflow_run_id):
return self._send_request("GET", f"/workflows/run/{workflow_run_id}")
def get_workflow_logs(
self,
keyword: str = None,
status: Literal["succeeded", "failed", "stopped"] | None = None,
page: int = 1,
limit: int = 20,
created_at__before: str = None,
created_at__after: str = None,
created_by_end_user_session_id: str = None,
created_by_account: str = None,
):
"""Get workflow execution logs with optional filtering."""
params = {"page": page, "limit": limit}
if keyword:
params["keyword"] = keyword
if status:
params["status"] = status
if created_at__before:
params["created_at__before"] = created_at__before
if created_at__after:
params["created_at__after"] = created_at__after
if created_by_end_user_session_id:
params["created_by_end_user_session_id"] = created_by_end_user_session_id
if created_by_account:
params["created_by_account"] = created_by_account
return self._send_request("GET", "/workflows/logs", params=params)
def run_specific_workflow(
self,
workflow_id: str,
inputs: dict,
response_mode: Literal["blocking", "streaming"] = "streaming",
user: str = "abc-123",
):
"""Run a specific workflow by workflow ID."""
data = {"inputs": inputs, "response_mode": response_mode, "user": user}
return self._send_request(
"POST", f"/workflows/{workflow_id}/run", data, stream=True if response_mode == "streaming" else False
)
class WorkspaceClient(DifyClient):
"""Client for workspace-related operations."""
def get_available_models(self, model_type: str):
"""Get available models by model type."""
url = f"/workspaces/current/models/model-types/{model_type}"
return self._send_request("GET", url)
class KnowledgeBaseClient(DifyClient):
def __init__(
@@ -443,3 +550,117 @@ class KnowledgeBaseClient(DifyClient):
data = {"segment": segment_data}
url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments/{segment_id}"
return self._send_request("POST", url, json=data, **kwargs)
# Advanced Knowledge Base APIs
def hit_testing(
self, query: str, retrieval_model: Dict[str, Any] = None, external_retrieval_model: Dict[str, Any] = None
):
"""Perform hit testing on the dataset."""
data = {"query": query}
if retrieval_model:
data["retrieval_model"] = retrieval_model
if external_retrieval_model:
data["external_retrieval_model"] = external_retrieval_model
url = f"/datasets/{self._get_dataset_id()}/hit-testing"
return self._send_request("POST", url, json=data)
def get_dataset_metadata(self):
"""Get dataset metadata."""
url = f"/datasets/{self._get_dataset_id()}/metadata"
return self._send_request("GET", url)
def create_dataset_metadata(self, metadata_data: Dict[str, Any]):
"""Create dataset metadata."""
url = f"/datasets/{self._get_dataset_id()}/metadata"
return self._send_request("POST", url, json=metadata_data)
def update_dataset_metadata(self, metadata_id: str, metadata_data: Dict[str, Any]):
"""Update dataset metadata."""
url = f"/datasets/{self._get_dataset_id()}/metadata/{metadata_id}"
return self._send_request("PATCH", url, json=metadata_data)
def get_built_in_metadata(self):
"""Get built-in metadata."""
url = f"/datasets/{self._get_dataset_id()}/metadata/built-in"
return self._send_request("GET", url)
def manage_built_in_metadata(self, action: str, metadata_data: Dict[str, Any] = None):
"""Manage built-in metadata with specified action."""
data = metadata_data or {}
url = f"/datasets/{self._get_dataset_id()}/metadata/built-in/{action}"
return self._send_request("POST", url, json=data)
def update_documents_metadata(self, operation_data: List[Dict[str, Any]]):
"""Update metadata for multiple documents."""
url = f"/datasets/{self._get_dataset_id()}/documents/metadata"
data = {"operation_data": operation_data}
return self._send_request("POST", url, json=data)
# Dataset Tags APIs
def list_dataset_tags(self):
"""List all dataset tags."""
return self._send_request("GET", "/datasets/tags")
def bind_dataset_tags(self, tag_ids: List[str]):
"""Bind tags to dataset."""
data = {"tag_ids": tag_ids, "target_id": self._get_dataset_id()}
return self._send_request("POST", "/datasets/tags/binding", json=data)
def unbind_dataset_tag(self, tag_id: str):
"""Unbind a single tag from dataset."""
data = {"tag_id": tag_id, "target_id": self._get_dataset_id()}
return self._send_request("POST", "/datasets/tags/unbinding", json=data)
def get_dataset_tags(self):
"""Get tags for current dataset."""
url = f"/datasets/{self._get_dataset_id()}/tags"
return self._send_request("GET", url)
# RAG Pipeline APIs
def get_datasource_plugins(self, is_published: bool = True):
"""Get datasource plugins for RAG pipeline."""
params = {"is_published": is_published}
url = f"/datasets/{self._get_dataset_id()}/pipeline/datasource-plugins"
return self._send_request("GET", url, params=params)
def run_datasource_node(
self,
node_id: str,
inputs: Dict[str, Any],
datasource_type: str,
is_published: bool = True,
credential_id: str = None,
):
"""Run a datasource node in RAG pipeline."""
data = {"inputs": inputs, "datasource_type": datasource_type, "is_published": is_published}
if credential_id:
data["credential_id"] = credential_id
url = f"/datasets/{self._get_dataset_id()}/pipeline/datasource/nodes/{node_id}/run"
return self._send_request("POST", url, json=data, stream=True)
def run_rag_pipeline(
self,
inputs: Dict[str, Any],
datasource_type: str,
datasource_info_list: List[Dict[str, Any]],
start_node_id: str,
is_published: bool = True,
response_mode: Literal["streaming", "blocking"] = "blocking",
):
"""Run RAG pipeline."""
data = {
"inputs": inputs,
"datasource_type": datasource_type,
"datasource_info_list": datasource_info_list,
"start_node_id": start_node_id,
"is_published": is_published,
"response_mode": response_mode,
}
url = f"/datasets/{self._get_dataset_id()}/pipeline/run"
return self._send_request("POST", url, json=data, stream=response_mode == "streaming")
def upload_pipeline_file(self, file_path: str):
"""Upload file for RAG pipeline."""
with open(file_path, "rb") as f:
files = {"file": f}
return self._send_request_with_files("POST", "/datasets/pipeline/file-upload", {}, files)