Merge branch 'main' into feat/plugin
commit
02d26818ad
@ -0,0 +1,25 @@
|
||||
model: meta.llama3-1-405b-instruct-v1:0
|
||||
label:
|
||||
en_US: Llama 3.1 405B Instruct
|
||||
model_type: llm
|
||||
model_properties:
|
||||
mode: completion
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
default: 0.5
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
default: 0.9
|
||||
- name: max_gen_len
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 512
|
||||
min: 1
|
||||
max: 2048
|
||||
pricing:
|
||||
input: '0.00532'
|
||||
output: '0.016'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
@ -0,0 +1,5 @@
|
||||
model: hunyuan-embedding
|
||||
model_type: text-embedding
|
||||
model_properties:
|
||||
context_size: 1024
|
||||
max_chunks: 1
|
||||
@ -0,0 +1,173 @@
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
from tencentcloud.common import credential
|
||||
from tencentcloud.common.exception import TencentCloudSDKException
|
||||
from tencentcloud.common.profile.client_profile import ClientProfile
|
||||
from tencentcloud.common.profile.http_profile import HttpProfile
|
||||
from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
|
||||
|
||||
from core.model_runtime.entities.model_entities import PriceType
|
||||
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
|
||||
from core.model_runtime.errors.invoke import (
|
||||
InvokeError,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class HunyuanTextEmbeddingModel(TextEmbeddingModel):
|
||||
"""
|
||||
Model class for Hunyuan text embedding model.
|
||||
"""
|
||||
|
||||
def _invoke(self, model: str, credentials: dict,
|
||||
texts: list[str], user: Optional[str] = None) \
|
||||
-> TextEmbeddingResult:
|
||||
"""
|
||||
Invoke text embedding model
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param texts: texts to embed
|
||||
:param user: unique user id
|
||||
:return: embeddings result
|
||||
"""
|
||||
|
||||
if model != 'hunyuan-embedding':
|
||||
raise ValueError('Invalid model name')
|
||||
|
||||
client = self._setup_hunyuan_client(credentials)
|
||||
|
||||
embeddings = []
|
||||
token_usage = 0
|
||||
|
||||
for input in texts:
|
||||
request = models.GetEmbeddingRequest()
|
||||
params = {
|
||||
"Input": input
|
||||
}
|
||||
request.from_json_string(json.dumps(params))
|
||||
response = client.GetEmbedding(request)
|
||||
usage = response.Usage.TotalTokens
|
||||
|
||||
embeddings.extend([data.Embedding for data in response.Data])
|
||||
token_usage += usage
|
||||
|
||||
result = TextEmbeddingResult(
|
||||
model=model,
|
||||
embeddings=embeddings,
|
||||
usage=self._calc_response_usage(
|
||||
model=model,
|
||||
credentials=credentials,
|
||||
tokens=token_usage
|
||||
)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
"""
|
||||
Validate credentials
|
||||
"""
|
||||
try:
|
||||
client = self._setup_hunyuan_client(credentials)
|
||||
|
||||
req = models.ChatCompletionsRequest()
|
||||
params = {
|
||||
"Model": model,
|
||||
"Messages": [{
|
||||
"Role": "user",
|
||||
"Content": "hello"
|
||||
}],
|
||||
"TopP": 1,
|
||||
"Temperature": 0,
|
||||
"Stream": False
|
||||
}
|
||||
req.from_json_string(json.dumps(params))
|
||||
client.ChatCompletions(req)
|
||||
except Exception as e:
|
||||
raise CredentialsValidateFailedError(f'Credentials validation failed: {e}')
|
||||
|
||||
def _setup_hunyuan_client(self, credentials):
|
||||
secret_id = credentials['secret_id']
|
||||
secret_key = credentials['secret_key']
|
||||
cred = credential.Credential(secret_id, secret_key)
|
||||
httpProfile = HttpProfile()
|
||||
httpProfile.endpoint = "hunyuan.tencentcloudapi.com"
|
||||
clientProfile = ClientProfile()
|
||||
clientProfile.httpProfile = httpProfile
|
||||
client = hunyuan_client.HunyuanClient(cred, "", clientProfile)
|
||||
return client
|
||||
|
||||
def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage:
|
||||
"""
|
||||
Calculate response usage
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param tokens: input tokens
|
||||
:return: usage
|
||||
"""
|
||||
# get input price info
|
||||
input_price_info = self.get_price(
|
||||
model=model,
|
||||
credentials=credentials,
|
||||
price_type=PriceType.INPUT,
|
||||
tokens=tokens
|
||||
)
|
||||
|
||||
# transform usage
|
||||
usage = EmbeddingUsage(
|
||||
tokens=tokens,
|
||||
total_tokens=tokens,
|
||||
unit_price=input_price_info.unit_price,
|
||||
price_unit=input_price_info.unit,
|
||||
total_price=input_price_info.total_amount,
|
||||
currency=input_price_info.currency,
|
||||
latency=time.perf_counter() - self.started_at
|
||||
)
|
||||
|
||||
return usage
|
||||
|
||||
@property
|
||||
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
|
||||
"""
|
||||
Map model invoke error to unified error
|
||||
The key is the error type thrown to the caller
|
||||
The value is the error type thrown by the model,
|
||||
which needs to be converted into a unified error type for the caller.
|
||||
|
||||
:return: Invoke error mapping
|
||||
"""
|
||||
return {
|
||||
InvokeError: [TencentCloudSDKException],
|
||||
}
|
||||
|
||||
def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int:
|
||||
"""
|
||||
Get number of tokens for given prompt messages
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param texts: texts to embed
|
||||
:return:
|
||||
"""
|
||||
# client = self._setup_hunyuan_client(credentials)
|
||||
|
||||
num_tokens = 0
|
||||
for text in texts:
|
||||
num_tokens += self._get_num_tokens_by_gpt2(text)
|
||||
# use client.GetTokenCount to get num tokens
|
||||
# request = models.GetTokenCountRequest()
|
||||
# params = {
|
||||
# "Prompt": text
|
||||
# }
|
||||
# request.from_json_string(json.dumps(params))
|
||||
# response = client.GetTokenCount(request)
|
||||
# num_tokens += response.TokenCount
|
||||
|
||||
return num_tokens
|
||||
@ -0,0 +1,30 @@
|
||||
model: deepseek-ai/DeepSeek-Coder-V2-Instruct
|
||||
label:
|
||||
en_US: deepseek-ai/DeepSeek-Coder-V2-Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
help:
|
||||
zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
|
||||
en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
pricing:
|
||||
input: '1.33'
|
||||
output: '1.33'
|
||||
unit: '0.000001'
|
||||
currency: RMB
|
||||
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
|
||||
<svg width="800px" height="800px" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg" fill="none">
|
||||
<path fill="#252F3E" d="M4.51 7.687c0 .197.02.357.058.475.042.117.096.245.17.384a.233.233 0 01.037.123c0 .053-.032.107-.1.16l-.336.224a.255.255 0 01-.138.048c-.054 0-.107-.026-.16-.074a1.652 1.652 0 01-.192-.251 4.137 4.137 0 01-.165-.315c-.415.491-.936.737-1.564.737-.447 0-.804-.129-1.064-.385-.261-.256-.394-.598-.394-1.025 0-.454.16-.822.484-1.1.325-.278.756-.416 1.304-.416.18 0 .367.016.564.042.197.027.4.07.612.118v-.39c0-.406-.085-.689-.25-.854-.17-.166-.458-.246-.868-.246-.186 0-.377.022-.574.07a4.23 4.23 0 00-.575.181 1.525 1.525 0 01-.186.07.326.326 0 01-.085.016c-.075 0-.112-.054-.112-.166v-.262c0-.085.01-.15.037-.186a.399.399 0 01.15-.113c.185-.096.409-.176.67-.24.26-.07.537-.101.83-.101.633 0 1.096.144 1.394.432.293.288.442.726.442 1.314v1.73h.01zm-2.161.811c.175 0 .356-.032.548-.096.191-.064.362-.182.505-.342a.848.848 0 00.181-.341c.032-.129.054-.283.054-.465V7.03a4.43 4.43 0 00-.49-.09 3.996 3.996 0 00-.5-.033c-.357 0-.618.07-.793.214-.176.144-.26.347-.26.614 0 .25.063.437.196.566.128.133.314.197.559.197zm4.273.577c-.096 0-.16-.016-.202-.054-.043-.032-.08-.106-.112-.208l-1.25-4.127a.938.938 0 01-.049-.214c0-.085.043-.133.128-.133h.522c.1 0 .17.016.207.053.043.032.075.107.107.208l.894 3.535.83-3.535c.026-.106.058-.176.1-.208a.365.365 0 01.214-.053h.425c.102 0 .17.016.213.053.043.032.08.107.101.208l.841 3.578.92-3.578a.458.458 0 01.107-.208.346.346 0 01.208-.053h.495c.085 0 .133.043.133.133 0 .027-.006.054-.01.086a.76.76 0 01-.038.133l-1.283 4.127c-.032.107-.069.177-.111.209a.34.34 0 01-.203.053h-.457c-.101 0-.17-.016-.213-.053-.043-.038-.08-.107-.101-.214L8.213 5.37l-.82 3.439c-.026.107-.058.176-.1.213-.043.038-.118.054-.213.054h-.458zm6.838.144a3.51 3.51 0 01-.82-.096c-.266-.064-.473-.134-.612-.214-.085-.048-.143-.101-.165-.15a.378.378 0 01-.031-.149v-.272c0-.112.042-.166.122-.166a.3.3 0 01.096.016c.032.011.08.032.133.054.18.08.378.144.585.187.213.042.42.064.633.064.336 0 .596-.059.777-.176a.575.575 0 00.277-.508.52.52 0 00-.144-.373c-.095-.102-.276-.193-.537-.278l-.772-.24c-.388-.123-.676-.305-.851-.545a1.275 1.275 0 01-.266-.774c0-.224.048-.422.143-.593.096-.17.224-.32.384-.438.16-.122.34-.213.553-.277.213-.064.436-.091.67-.091.118 0 .24.005.357.021.122.016.234.038.346.06.106.026.208.052.303.085.096.032.17.064.224.096a.46.46 0 01.16.133.289.289 0 01.047.176v.251c0 .112-.042.171-.122.171a.552.552 0 01-.202-.064 2.427 2.427 0 00-1.022-.208c-.303 0-.543.048-.708.15-.165.1-.25.256-.25.475 0 .149.053.277.16.379.106.101.303.202.585.293l.756.24c.383.123.66.294.825.513.165.219.244.47.244.748 0 .23-.047.437-.138.619a1.436 1.436 0 01-.388.47c-.165.133-.362.23-.591.299-.24.075-.49.112-.761.112z"/>
|
||||
<g fill="#F90" fill-rule="evenodd" clip-rule="evenodd">
|
||||
<path d="M14.465 11.813c-1.75 1.297-4.294 1.986-6.481 1.986-3.065 0-5.827-1.137-7.913-3.027-.165-.15-.016-.353.18-.235 2.257 1.313 5.04 2.109 7.92 2.109 1.941 0 4.075-.406 6.039-1.239.293-.133.543.192.255.406z"/>
|
||||
<path d="M15.194 10.98c-.223-.287-1.479-.138-2.048-.069-.17.022-.197-.128-.043-.24 1-.705 2.645-.502 2.836-.267.192.24-.053 1.89-.99 2.68-.143.123-.281.06-.218-.1.213-.53.687-1.72.463-2.003z"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.3 KiB |
@ -0,0 +1,25 @@
|
||||
from core.tools.errors import ToolProviderCredentialValidationError
|
||||
from core.tools.provider.builtin.aws.tools.sagemaker_text_rerank import SageMakerReRankTool
|
||||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
||||
|
||||
class SageMakerProvider(BuiltinToolProviderController):
|
||||
def _validate_credentials(self, credentials: dict) -> None:
|
||||
try:
|
||||
SageMakerReRankTool().fork_tool_runtime(
|
||||
runtime={
|
||||
"credentials": credentials,
|
||||
}
|
||||
).invoke(
|
||||
user_id='',
|
||||
tool_parameters={
|
||||
"sagemaker_endpoint" : "",
|
||||
"query": "misaka mikoto",
|
||||
"candidate_texts" : "hello$$$hello world",
|
||||
"topk" : 5,
|
||||
"aws_region" : ""
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
raise ToolProviderCredentialValidationError(str(e))
|
||||
|
||||
@ -0,0 +1,15 @@
|
||||
identity:
|
||||
author: AWS
|
||||
name: aws
|
||||
label:
|
||||
en_US: AWS
|
||||
zh_Hans: 亚马逊云科技
|
||||
pt_BR: AWS
|
||||
description:
|
||||
en_US: Services on AWS.
|
||||
zh_Hans: 亚马逊云科技的各类服务
|
||||
pt_BR: Services on AWS.
|
||||
icon: icon.svg
|
||||
tags:
|
||||
- search
|
||||
credentials_for_provider:
|
||||
@ -0,0 +1,83 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Union
|
||||
|
||||
import boto3
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class GuardrailParameters(BaseModel):
|
||||
guardrail_id: str = Field(..., description="The identifier of the guardrail")
|
||||
guardrail_version: str = Field(..., description="The version of the guardrail")
|
||||
source: str = Field(..., description="The source of the content")
|
||||
text: str = Field(..., description="The text to apply the guardrail to")
|
||||
aws_region: str = Field(default="us-east-1", description="AWS region for the Bedrock client")
|
||||
|
||||
class ApplyGuardrailTool(BuiltinTool):
|
||||
def _invoke(self,
|
||||
user_id: str,
|
||||
tool_parameters: dict[str, Any]
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
Invoke the ApplyGuardrail tool
|
||||
"""
|
||||
try:
|
||||
# Validate and parse input parameters
|
||||
params = GuardrailParameters(**tool_parameters)
|
||||
|
||||
# Initialize AWS client
|
||||
bedrock_client = boto3.client('bedrock-runtime', region_name=params.aws_region)
|
||||
|
||||
# Apply guardrail
|
||||
response = bedrock_client.apply_guardrail(
|
||||
guardrailIdentifier=params.guardrail_id,
|
||||
guardrailVersion=params.guardrail_version,
|
||||
source=params.source,
|
||||
content=[{"text": {"text": params.text}}]
|
||||
)
|
||||
|
||||
# Check for empty response
|
||||
if not response:
|
||||
return self.create_text_message(text="Received empty response from AWS Bedrock.")
|
||||
|
||||
# Process the result
|
||||
action = response.get("action", "No action specified")
|
||||
outputs = response.get("outputs", [])
|
||||
output = outputs[0].get("text", "No output received") if outputs else "No output received"
|
||||
assessments = response.get("assessments", [])
|
||||
|
||||
# Format assessments
|
||||
formatted_assessments = []
|
||||
for assessment in assessments:
|
||||
for policy_type, policy_data in assessment.items():
|
||||
if isinstance(policy_data, dict) and 'topics' in policy_data:
|
||||
for topic in policy_data['topics']:
|
||||
formatted_assessments.append(f"Policy: {policy_type}, Topic: {topic['name']}, Type: {topic['type']}, Action: {topic['action']}")
|
||||
else:
|
||||
formatted_assessments.append(f"Policy: {policy_type}, Data: {policy_data}")
|
||||
|
||||
result = f"Action: {action}\n "
|
||||
result += f"Output: {output}\n "
|
||||
if formatted_assessments:
|
||||
result += "Assessments:\n " + "\n ".join(formatted_assessments) + "\n "
|
||||
# result += f"Full response: {json.dumps(response, indent=2, ensure_ascii=False)}"
|
||||
|
||||
return self.create_text_message(text=result)
|
||||
|
||||
except boto3.exceptions.BotoCoreError as e:
|
||||
error_message = f'AWS service error: {str(e)}'
|
||||
logger.error(error_message, exc_info=True)
|
||||
return self.create_text_message(text=error_message)
|
||||
except json.JSONDecodeError as e:
|
||||
error_message = f'JSON parsing error: {str(e)}'
|
||||
logger.error(error_message, exc_info=True)
|
||||
return self.create_text_message(text=error_message)
|
||||
except Exception as e:
|
||||
error_message = f'An unexpected error occurred: {str(e)}'
|
||||
logger.error(error_message, exc_info=True)
|
||||
return self.create_text_message(text=error_message)
|
||||
@ -0,0 +1,56 @@
|
||||
identity:
|
||||
name: apply_guardrail
|
||||
author: AWS
|
||||
label:
|
||||
en_US: Content Moderation Guardrails
|
||||
zh_Hans: 内容审查护栏
|
||||
description:
|
||||
human:
|
||||
en_US: Content Moderation Guardrails utilizes the ApplyGuardrail API, a feature of Guardrails for Amazon Bedrock. This API is capable of evaluating input prompts and model responses for all Foundation Models (FMs), including those on Amazon Bedrock, custom FMs, and third-party FMs. By implementing this functionality, organizations can achieve centralized governance across all their generative AI applications, thereby enhancing control and consistency in content moderation.
|
||||
zh_Hans: 内容审查护栏采用 Guardrails for Amazon Bedrock 功能中的 ApplyGuardrail API 。ApplyGuardrail 可以评估所有基础模型(FMs)的输入提示和模型响应,包括 Amazon Bedrock 上的 FMs、自定义 FMs 和第三方 FMs。通过实施这一功能, 组织可以在所有生成式 AI 应用程序中实现集中化的治理,从而增强内容审核的控制力和一致性。
|
||||
llm: Content Moderation Guardrails utilizes the ApplyGuardrail API, a feature of Guardrails for Amazon Bedrock. This API is capable of evaluating input prompts and model responses for all Foundation Models (FMs), including those on Amazon Bedrock, custom FMs, and third-party FMs. By implementing this functionality, organizations can achieve centralized governance across all their generative AI applications, thereby enhancing control and consistency in content moderation.
|
||||
parameters:
|
||||
- name: guardrail_id
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Guardrail ID
|
||||
zh_Hans: Guardrail ID
|
||||
human_description:
|
||||
en_US: Please enter the ID of the Guardrail that has already been created on Amazon Bedrock, for example 'qk5nk0e4b77b'.
|
||||
zh_Hans: 请输入已经在 Amazon Bedrock 上创建好的 Guardrail ID, 例如 'qk5nk0e4b77b'.
|
||||
llm_description: Please enter the ID of the Guardrail that has already been created on Amazon Bedrock, for example 'qk5nk0e4b77b'.
|
||||
form: form
|
||||
- name: guardrail_version
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Guardrail Version Number
|
||||
zh_Hans: Guardrail 版本号码
|
||||
human_description:
|
||||
en_US: Please enter the published version of the Guardrail ID that has already been created on Amazon Bedrock. This is typically a version number, such as 2.
|
||||
zh_Hans: 请输入已经在Amazon Bedrock 上创建好的Guardrail ID发布的版本, 通常使用版本号, 例如2.
|
||||
llm_description: Please enter the published version of the Guardrail ID that has already been created on Amazon Bedrock. This is typically a version number, such as 2.
|
||||
form: form
|
||||
- name: source
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Content Source (INPUT or OUTPUT)
|
||||
zh_Hans: 内容来源 (INPUT or OUTPUT)
|
||||
human_description:
|
||||
en_US: The source of data used in the request to apply the guardrail. Valid Values "INPUT | OUTPUT"
|
||||
zh_Hans: 用于应用护栏的请求中所使用的数据来源。有效值为 "INPUT | OUTPUT"
|
||||
llm_description: The source of data used in the request to apply the guardrail. Valid Values "INPUT | OUTPUT"
|
||||
form: form
|
||||
- name: text
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Content to be reviewed
|
||||
zh_Hans: 待审查内容
|
||||
human_description:
|
||||
en_US: The content used for requesting guardrail review, which can be either user input or LLM output.
|
||||
zh_Hans: 用于请求护栏审查的内容,可以是用户输入或 LLM 输出。
|
||||
llm_description: The content used for requesting guardrail review, which can be either user input or LLM output.
|
||||
form: llm
|
||||
@ -0,0 +1,88 @@
|
||||
import json
|
||||
from typing import Any, Union
|
||||
|
||||
import boto3
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class LambdaTranslateUtilsTool(BuiltinTool):
|
||||
lambda_client: Any = None
|
||||
|
||||
def _invoke_lambda(self, text_content, src_lang, dest_lang, model_id, dictionary_name, request_type, lambda_name):
|
||||
msg = {
|
||||
"src_content":text_content,
|
||||
"src_lang": src_lang,
|
||||
"dest_lang":dest_lang,
|
||||
"dictionary_id": dictionary_name,
|
||||
"request_type" : request_type,
|
||||
"model_id" : model_id
|
||||
}
|
||||
|
||||
invoke_response = self.lambda_client.invoke(FunctionName=lambda_name,
|
||||
InvocationType='RequestResponse',
|
||||
Payload=json.dumps(msg))
|
||||
response_body = invoke_response['Payload']
|
||||
|
||||
response_str = response_body.read().decode("unicode_escape")
|
||||
|
||||
return response_str
|
||||
|
||||
def _invoke(self,
|
||||
user_id: str,
|
||||
tool_parameters: dict[str, Any],
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
line = 0
|
||||
try:
|
||||
if not self.lambda_client:
|
||||
aws_region = tool_parameters.get('aws_region')
|
||||
if aws_region:
|
||||
self.lambda_client = boto3.client("lambda", region_name=aws_region)
|
||||
else:
|
||||
self.lambda_client = boto3.client("lambda")
|
||||
|
||||
line = 1
|
||||
text_content = tool_parameters.get('text_content', '')
|
||||
if not text_content:
|
||||
return self.create_text_message('Please input text_content')
|
||||
|
||||
line = 2
|
||||
src_lang = tool_parameters.get('src_lang', '')
|
||||
if not src_lang:
|
||||
return self.create_text_message('Please input src_lang')
|
||||
|
||||
line = 3
|
||||
dest_lang = tool_parameters.get('dest_lang', '')
|
||||
if not dest_lang:
|
||||
return self.create_text_message('Please input dest_lang')
|
||||
|
||||
line = 4
|
||||
lambda_name = tool_parameters.get('lambda_name', '')
|
||||
if not lambda_name:
|
||||
return self.create_text_message('Please input lambda_name')
|
||||
|
||||
line = 5
|
||||
request_type = tool_parameters.get('request_type', '')
|
||||
if not request_type:
|
||||
return self.create_text_message('Please input request_type')
|
||||
|
||||
line = 6
|
||||
model_id = tool_parameters.get('model_id', '')
|
||||
if not model_id:
|
||||
return self.create_text_message('Please input model_id')
|
||||
|
||||
line = 7
|
||||
dictionary_name = tool_parameters.get('dictionary_name', '')
|
||||
if not dictionary_name:
|
||||
return self.create_text_message('Please input dictionary_name')
|
||||
|
||||
result = self._invoke_lambda(text_content, src_lang, dest_lang, model_id, dictionary_name, request_type, lambda_name)
|
||||
|
||||
return self.create_text_message(text=result)
|
||||
|
||||
except Exception as e:
|
||||
return self.create_text_message(f'Exception {str(e)}, line : {line}')
|
||||
@ -0,0 +1,86 @@
|
||||
import json
|
||||
from typing import Any, Union
|
||||
|
||||
import boto3
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class SageMakerReRankTool(BuiltinTool):
|
||||
sagemaker_client: Any = None
|
||||
sagemaker_endpoint:str = None
|
||||
topk:int = None
|
||||
|
||||
def _sagemaker_rerank(self, query_input: str, docs: list[str], rerank_endpoint:str):
|
||||
inputs = [query_input]*len(docs)
|
||||
response_model = self.sagemaker_client.invoke_endpoint(
|
||||
EndpointName=rerank_endpoint,
|
||||
Body=json.dumps(
|
||||
{
|
||||
"inputs": inputs,
|
||||
"docs": docs
|
||||
}
|
||||
),
|
||||
ContentType="application/json",
|
||||
)
|
||||
json_str = response_model['Body'].read().decode('utf8')
|
||||
json_obj = json.loads(json_str)
|
||||
scores = json_obj['scores']
|
||||
return scores if isinstance(scores, list) else [scores]
|
||||
|
||||
def _invoke(self,
|
||||
user_id: str,
|
||||
tool_parameters: dict[str, Any],
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
invoke tools
|
||||
"""
|
||||
line = 0
|
||||
try:
|
||||
if not self.sagemaker_client:
|
||||
aws_region = tool_parameters.get('aws_region')
|
||||
if aws_region:
|
||||
self.sagemaker_client = boto3.client("sagemaker-runtime", region_name=aws_region)
|
||||
else:
|
||||
self.sagemaker_client = boto3.client("sagemaker-runtime")
|
||||
|
||||
line = 1
|
||||
if not self.sagemaker_endpoint:
|
||||
self.sagemaker_endpoint = tool_parameters.get('sagemaker_endpoint')
|
||||
|
||||
line = 2
|
||||
if not self.topk:
|
||||
self.topk = tool_parameters.get('topk', 5)
|
||||
|
||||
line = 3
|
||||
query = tool_parameters.get('query', '')
|
||||
if not query:
|
||||
return self.create_text_message('Please input query')
|
||||
|
||||
line = 4
|
||||
candidate_texts = tool_parameters.get('candidate_texts')
|
||||
if not candidate_texts:
|
||||
return self.create_text_message('Please input candidate_texts')
|
||||
|
||||
line = 5
|
||||
candidate_docs = json.loads(candidate_texts)
|
||||
docs = [ item.get('content') for item in candidate_docs ]
|
||||
|
||||
line = 6
|
||||
scores = self._sagemaker_rerank(query_input=query, docs=docs, rerank_endpoint=self.sagemaker_endpoint)
|
||||
|
||||
line = 7
|
||||
for idx in range(len(candidate_docs)):
|
||||
candidate_docs[idx]["score"] = scores[idx]
|
||||
|
||||
line = 8
|
||||
sorted_candidate_docs = sorted(candidate_docs, key=lambda x: x['score'], reverse=True)
|
||||
|
||||
line = 9
|
||||
results_str = json.dumps(sorted_candidate_docs[:self.topk], ensure_ascii=False)
|
||||
return self.create_text_message(text=results_str)
|
||||
|
||||
except Exception as e:
|
||||
return self.create_text_message(f'Exception {str(e)}, line : {line}')
|
||||
|
||||
@ -0,0 +1,82 @@
|
||||
identity:
|
||||
name: sagemaker_text_rerank
|
||||
author: AWS
|
||||
label:
|
||||
en_US: SagemakerRerank
|
||||
zh_Hans: Sagemaker重排序
|
||||
pt_BR: SagemakerRerank
|
||||
icon: icon.svg
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for performing text similarity ranking. You can find deploy notebook on Github Repo - https://github.com/aws-samples/dify-aws-tool
|
||||
zh_Hans: Sagemaker重排序工具, 请参考 Github Repo - https://github.com/aws-samples/dify-aws-tool上的部署脚本
|
||||
pt_BR: A tool for performing text similarity ranking.
|
||||
llm: A tool for performing text similarity ranking. You can find deploy notebook on Github Repo - https://github.com/aws-samples/dify-aws-tool
|
||||
parameters:
|
||||
- name: sagemaker_endpoint
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: sagemaker endpoint for reranking
|
||||
zh_Hans: 重排序的SageMaker 端点
|
||||
pt_BR: sagemaker endpoint for reranking
|
||||
human_description:
|
||||
en_US: sagemaker endpoint for reranking
|
||||
zh_Hans: 重排序的SageMaker 端点
|
||||
pt_BR: sagemaker endpoint for reranking
|
||||
llm_description: sagemaker endpoint for reranking
|
||||
form: form
|
||||
- name: query
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Query string
|
||||
zh_Hans: 查询语句
|
||||
pt_BR: Query string
|
||||
human_description:
|
||||
en_US: key words for searching
|
||||
zh_Hans: 查询关键词
|
||||
pt_BR: key words for searching
|
||||
llm_description: key words for searching
|
||||
form: llm
|
||||
- name: candidate_texts
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: text candidates
|
||||
zh_Hans: 候选文本
|
||||
pt_BR: text candidates
|
||||
human_description:
|
||||
en_US: searched candidates by query
|
||||
zh_Hans: 查询文本搜到候选文本
|
||||
pt_BR: searched candidates by query
|
||||
llm_description: searched candidates by query
|
||||
form: llm
|
||||
- name: topk
|
||||
type: number
|
||||
required: false
|
||||
form: form
|
||||
label:
|
||||
en_US: Limit for results count
|
||||
zh_Hans: 返回个数限制
|
||||
pt_BR: Limit for results count
|
||||
human_description:
|
||||
en_US: Limit for results count
|
||||
zh_Hans: 返回个数限制
|
||||
pt_BR: Limit for results count
|
||||
min: 1
|
||||
max: 10
|
||||
default: 5
|
||||
- name: aws_region
|
||||
type: string
|
||||
required: false
|
||||
label:
|
||||
en_US: region of sagemaker endpoint
|
||||
zh_Hans: SageMaker 端点所在的region
|
||||
pt_BR: region of sagemaker endpoint
|
||||
human_description:
|
||||
en_US: region of sagemaker endpoint
|
||||
zh_Hans: SageMaker 端点所在的region
|
||||
pt_BR: region of sagemaker endpoint
|
||||
llm_description: region of sagemaker endpoint
|
||||
form: form
|
||||
@ -0,0 +1,104 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.hunyuan.text_embedding.text_embedding import HunyuanTextEmbeddingModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = HunyuanTextEmbeddingModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model='hunyuan-embedding',
|
||||
credentials={
|
||||
'secret_id': 'invalid_key',
|
||||
'secret_key': 'invalid_key'
|
||||
}
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model='hunyuan-embedding',
|
||||
credentials={
|
||||
'secret_id': os.environ.get('HUNYUAN_SECRET_ID'),
|
||||
'secret_key': os.environ.get('HUNYUAN_SECRET_KEY')
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = HunyuanTextEmbeddingModel()
|
||||
|
||||
result = model.invoke(
|
||||
model='hunyuan-embedding',
|
||||
credentials={
|
||||
'secret_id': os.environ.get('HUNYUAN_SECRET_ID'),
|
||||
'secret_key': os.environ.get('HUNYUAN_SECRET_KEY')
|
||||
},
|
||||
texts=[
|
||||
"hello",
|
||||
"world"
|
||||
],
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(result, TextEmbeddingResult)
|
||||
assert len(result.embeddings) == 2
|
||||
assert result.usage.total_tokens == 6
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = HunyuanTextEmbeddingModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model='hunyuan-embedding',
|
||||
credentials={
|
||||
'secret_id': os.environ.get('HUNYUAN_SECRET_ID'),
|
||||
'secret_key': os.environ.get('HUNYUAN_SECRET_KEY')
|
||||
},
|
||||
texts=[
|
||||
"hello",
|
||||
"world"
|
||||
]
|
||||
)
|
||||
|
||||
assert num_tokens == 2
|
||||
|
||||
def test_max_chunks():
|
||||
model = HunyuanTextEmbeddingModel()
|
||||
|
||||
result = model.invoke(
|
||||
model='hunyuan-embedding',
|
||||
credentials={
|
||||
'secret_id': os.environ.get('HUNYUAN_SECRET_ID'),
|
||||
'secret_key': os.environ.get('HUNYUAN_SECRET_KEY')
|
||||
},
|
||||
texts=[
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
"hello",
|
||||
"world",
|
||||
]
|
||||
)
|
||||
|
||||
assert isinstance(result, TextEmbeddingResult)
|
||||
assert len(result.embeddings) == 22
|
||||
@ -0,0 +1,106 @@
|
||||
import os
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.siliconflow.llm.llm import SiliconflowLargeLanguageModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = SiliconflowLargeLanguageModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model='deepseek-ai/DeepSeek-V2-Chat',
|
||||
credentials={
|
||||
'api_key': 'invalid_key'
|
||||
}
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model='deepseek-ai/DeepSeek-V2-Chat',
|
||||
credentials={
|
||||
'api_key': os.environ.get('API_KEY')
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = SiliconflowLargeLanguageModel()
|
||||
|
||||
response = model.invoke(
|
||||
model='deepseek-ai/DeepSeek-V2-Chat',
|
||||
credentials={
|
||||
'api_key': os.environ.get('API_KEY')
|
||||
},
|
||||
prompt_messages=[
|
||||
UserPromptMessage(
|
||||
content='Who are you?'
|
||||
)
|
||||
],
|
||||
model_parameters={
|
||||
'temperature': 0.5,
|
||||
'max_tokens': 10
|
||||
},
|
||||
stop=['How'],
|
||||
stream=False,
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(response, LLMResult)
|
||||
assert len(response.message.content) > 0
|
||||
|
||||
|
||||
def test_invoke_stream_model():
|
||||
model = SiliconflowLargeLanguageModel()
|
||||
|
||||
response = model.invoke(
|
||||
model='deepseek-ai/DeepSeek-V2-Chat',
|
||||
credentials={
|
||||
'api_key': os.environ.get('API_KEY')
|
||||
},
|
||||
prompt_messages=[
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
],
|
||||
model_parameters={
|
||||
'temperature': 0.5,
|
||||
'max_tokens': 100,
|
||||
'seed': 1234
|
||||
},
|
||||
stream=True,
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(response, Generator)
|
||||
|
||||
for chunk in response:
|
||||
assert isinstance(chunk, LLMResultChunk)
|
||||
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||||
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||||
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||||
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = SiliconflowLargeLanguageModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model='deepseek-ai/DeepSeek-V2-Chat',
|
||||
credentials={
|
||||
'api_key': os.environ.get('API_KEY')
|
||||
},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content='You are a helpful AI assistant.',
|
||||
),
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
assert num_tokens == 12
|
||||
@ -0,0 +1,21 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.siliconflow.siliconflow import SiliconflowProvider
|
||||
|
||||
|
||||
def test_validate_provider_credentials():
|
||||
provider = SiliconflowProvider()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
provider.validate_provider_credentials(
|
||||
credentials={}
|
||||
)
|
||||
|
||||
provider.validate_provider_credentials(
|
||||
credentials={
|
||||
'api_key': os.environ.get('API_KEY')
|
||||
}
|
||||
)
|
||||
@ -0,0 +1,76 @@
|
||||
# Launching new servers with SSL certificates
|
||||
|
||||
## Short description
|
||||
|
||||
Docker-compose certbot configurations with Backward compatibility (without certbot container).
|
||||
Use `docker-compose --profile certbot up` to use this features.
|
||||
|
||||
## The simplest way for launching new servers with SSL certificates
|
||||
|
||||
1. Get letsencrypt certs
|
||||
set `.env` values
|
||||
```properties
|
||||
NGINX_SSL_CERT_FILENAME=fullchain.pem
|
||||
NGINX_SSL_CERT_KEY_FILENAME=privkey.pem
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE=true
|
||||
CERTBOT_DOMAIN=your_domain.com
|
||||
CERTBOT_EMAIL=example@your_domain.com
|
||||
```
|
||||
excecute command:
|
||||
```shell
|
||||
sudo docker network prune
|
||||
sudo docker-compose --profile certbot up --force-recreate -d
|
||||
```
|
||||
then after the containers launched:
|
||||
```shell
|
||||
sudo docker-compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
2. Edit `.env` file and `sudo docker-compose --profile certbot up` again.
|
||||
set `.env` value additionally
|
||||
```properties
|
||||
NGINX_HTTPS_ENABLED=true
|
||||
```
|
||||
excecute command:
|
||||
```shell
|
||||
sudo docker-compose --profile certbot up -d --no-deps --force-recreate nginx
|
||||
```
|
||||
Then you can access your serve with HTTPS.
|
||||
[https://your_domain.com](https://your_domain.com)
|
||||
|
||||
## SSL certificates renewal
|
||||
|
||||
For SSL certificates renewal, execute commands below:
|
||||
|
||||
```shell
|
||||
sudo docker-compose exec -it certbot /bin/sh /update-cert.sh
|
||||
sudo docker-compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## Options for certbot
|
||||
|
||||
`CERTBOT_OPTIONS` key might be helpful for testing. i.e.,
|
||||
|
||||
```properties
|
||||
CERTBOT_OPTIONS=--dry-run
|
||||
```
|
||||
|
||||
To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates.
|
||||
|
||||
```shell
|
||||
sudo docker-compose --profile certbot up -d --no-deps --force-recreate certbot
|
||||
sudo docker-compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
|
||||
Then, reload the nginx container if necessary.
|
||||
|
||||
```shell
|
||||
sudo docker-compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## For legacy servers
|
||||
|
||||
To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option.
|
||||
|
||||
```shell
|
||||
sudo docker-compose up -d
|
||||
```
|
||||
@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
printf '%s\n' "Docker entrypoint script is running"
|
||||
|
||||
printf '%s\n' "\nChecking specific environment variables:"
|
||||
printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}"
|
||||
printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}"
|
||||
printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}"
|
||||
|
||||
printf '%s\n' "\nChecking mounted directories:"
|
||||
for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do
|
||||
if [ -d "$dir" ]; then
|
||||
printf '%s\n' "$dir exists. Contents:"
|
||||
ls -la "$dir"
|
||||
else
|
||||
printf '%s\n' "$dir does not exist."
|
||||
fi
|
||||
done
|
||||
|
||||
printf '%s\n' "\nGenerating update-cert.sh from template"
|
||||
sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \
|
||||
-e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \
|
||||
-e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \
|
||||
/update-cert.template.txt > /update-cert.sh
|
||||
|
||||
chmod +x /update-cert.sh
|
||||
|
||||
printf '%s\n' "\nExecuting command:" "$@"
|
||||
exec "$@"
|
||||
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DOMAIN="${CERTBOT_DOMAIN}"
|
||||
EMAIL="${CERTBOT_EMAIL}"
|
||||
OPTIONS="${CERTBOT_OPTIONS}"
|
||||
CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする
|
||||
|
||||
# Check if the certificate already exists
|
||||
if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then
|
||||
echo "Certificate exists. Attempting to renew..."
|
||||
certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS}
|
||||
else
|
||||
echo "Certificate does not exist. Obtaining a new certificate..."
|
||||
certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS}
|
||||
fi
|
||||
echo "Certificate operation successful"
|
||||
# Note: Nginx reload should be handled outside this container
|
||||
echo "Please ensure to reload Nginx to apply any certificate changes."
|
||||
@ -1,8 +1,8 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
listen ${NGINX_SSL_PORT} ssl;
|
||||
ssl_certificate ./../ssl/${NGINX_SSL_CERT_FILENAME};
|
||||
ssl_certificate_key ./../ssl/${NGINX_SSL_CERT_KEY_FILENAME};
|
||||
ssl_certificate ${SSL_CERTIFICATE_PATH};
|
||||
ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH};
|
||||
ssl_protocols ${NGINX_SSL_PROTOCOLS};
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
|
||||
@ -0,0 +1,62 @@
|
||||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import React, { useCallback } from 'react'
|
||||
import type { VariantProps } from 'class-variance-authority'
|
||||
import { cva } from 'class-variance-authority'
|
||||
import cn from '@/utils/classnames'
|
||||
|
||||
const variants = cva([], {
|
||||
variants: {
|
||||
align: {
|
||||
left: 'justify-start',
|
||||
center: 'justify-center',
|
||||
right: 'justify-end',
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
align: 'center',
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
type Props = {
|
||||
className?: string
|
||||
title: string
|
||||
onSelect: () => void
|
||||
selected: boolean
|
||||
disabled?: boolean
|
||||
align?: 'left' | 'center' | 'right'
|
||||
} & VariantProps<typeof variants>
|
||||
|
||||
const OptionCard: FC<Props> = ({
|
||||
className,
|
||||
title,
|
||||
onSelect,
|
||||
selected,
|
||||
disabled,
|
||||
align = 'center',
|
||||
}) => {
|
||||
const handleSelect = useCallback(() => {
|
||||
if (selected || disabled)
|
||||
return
|
||||
onSelect()
|
||||
}, [onSelect, selected, disabled])
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
'flex items-center px-2 h-8 rounded-md system-sm-regular bg-components-option-card-option-bg border border-components-option-card-option-bg text-text-secondary cursor-default',
|
||||
(!selected && !disabled) && 'hover:bg-components-option-card-option-bg-hover hover:border-components-option-card-option-border-hover hover:shadow-xs cursor-pointer',
|
||||
selected && 'bg-components-option-card-option-selected-bg border-[1.5px] border-components-option-card-option-selected-border system-sm-medium shadow-xs',
|
||||
disabled && 'text-text-disabled',
|
||||
variants({ align }),
|
||||
className,
|
||||
)}
|
||||
onClick={handleSelect}
|
||||
>
|
||||
{title}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default React.memo(OptionCard)
|
||||
Loading…
Reference in New Issue